file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
OpeningBalanceSheet.ts | import { BalanceSheet } from "./BalanceSheet";
export class OpeningBalanceSheet extends BalanceSheet { |
} |
|
zone.go | package controller
import (
"github.com/KubeOperator/KubeOperator/pkg/constant"
"github.com/KubeOperator/KubeOperator/pkg/controller/condition"
"github.com/KubeOperator/KubeOperator/pkg/controller/kolog"
"github.com/KubeOperator/KubeOperator/pkg/controller/page"
"github.com/KubeOperator/KubeOperator/pkg/dto"
"github.com/KubeOperator/KubeOperator/pkg/service"
"github.com/go-playground/validator/v10"
"github.com/kataras/iris/v12/context"
)
type ZoneController struct {
Ctx context.Context
ZoneService service.ZoneService
}
func NewZoneController() *ZoneController {
return &ZoneController{
ZoneService: service.NewZoneService(),
}
}
// List Zone
// @Tags zones
// @Summary Show all zones
// @Description 获取可用区列表
// @Accept json
// @Produce json
// @Success 200 {object} page.Page
// @Security ApiKeyAuth
// @Router /zones [get]
func (z ZoneController) Get() (*page.Page, error) {
p, _ := z.Ctx.Values().GetBool("page")
if p {
num, _ := z.Ctx.Values().GetInt(constant.PageNumQueryKey)
size, _ := z.Ctx.Values().GetInt(constant.PageSizeQueryKey)
return z.ZoneService.Page(num, size, condition.TODO())
} else {
var page page.Page
items, err := z.ZoneService.List(condition.TODO())
if err != nil {
return nil, err
}
page.Items = items
page.Total = len(items)
return &page, nil
}
}
// Search Zone
// @Tags zones
// @Summary Search zones
// @Description 过滤部署计划
// @Accept json
// @Produce json
// @Param conditions body condition.Conditions true "conditions"
// @Success 200 {object} page.Page
// @Security ApiKeyAuth
// @Router /zones/search [post]
func (z ZoneController) PostSearch() (*page.Page, error) {
p, _ := z.Ctx.Values().GetBool("page")
var conditions condition.Conditions
if z.Ctx.GetContentLength() > 0 {
if err := z.Ctx.ReadJSON(&conditions); err != nil {
return nil, err
}
}
if p {
num, _ := z.Ctx.Values().GetInt(constant.PageNumQueryKey)
size, _ := z.Ctx.Values().GetInt(constant.PageSizeQueryKey)
return z.ZoneService.Page(num, size, conditions)
} else {
var p page.Page
items, err := z.ZoneService.List(conditions)
if err != nil {
return nil, err
}
p.Items = items
p.Total = len(items)
return &p, nil
}
}
// Get Zone
// @Tags zones
// @Summary Show a zone
// @Description 获取单个可用区
// @Accept json
// @Produce json
// @Param name path string true "可用区名称"
// @Success 200 {object} dto.Zone
// @Security ApiKeyAuth
// @Router /zones/{name} [get]
func (z ZoneController) GetBy(name string) (*dto.Zone, error) {
return z.ZoneService.Get(name)
}
// Get Zones By Region
// @Tags zones
// @Summary Get zones by region
// @Description 获取跟区域关联的可用区
// @Accept json
// @Produce json
// @Param region path string true "区域名称"
// @Success 200 {Array} []dto.Zone
// @Security ApiKeyAuth
// @Router /zones/list/{region} [get]
func (z ZoneController) GetListBy(regionName string) ([]dto.Zone, error) {
return z.ZoneService.ListByRegionName(regionName)
}
// Create Zone
// @Tags zones
// @Summary Create a zone
// @Description 创建区域
// @Accept json
// @Produce json
// @Param request body dto.ZoneCreate true "request"
// @Success 200 {object} dto.Zone
// @Security ApiKeyAuth
// @Router /zones [post]
func (z ZoneController) Post() (*dto.Zone, error) {
var req dto.ZoneCreate
err := z.Ctx.ReadJSON(&req)
if err != nil {
return nil, err
}
validate := validator.New()
err = validate.Struct(req)
if err != nil {
return nil, err
}
operator := z.Ctx.Values().GetString("operator")
go kolog.Save | EATE_ZONE, req.Name)
return z.ZoneService.Create(req)
}
// Delete Zone
// @Tags zones
// @Summary Delete a zone
// @Description 删除区域
// @Accept json
// @Produce json
// @Param name path string true "可用区名称"
// @Security ApiKeyAuth
// @Router /zones/{name} [delete]
func (z ZoneController) DeleteBy(name string) error {
operator := z.Ctx.Values().GetString("operator")
go kolog.Save(operator, constant.DELETE_ZONE, name)
return z.ZoneService.Delete(name)
}
// Update Zone
// @Tags zones
// @Summary Update a zone
// @Description 更新区域
// @Accept json
// @Produce json
// @Param request body dto.ZoneUpdate true "request"
// @Param name path string true "区域名称"
// @Success 200 {object} dto.Zone
// @Security ApiKeyAuth
// @Router /zones/{name} [patch]
func (z ZoneController) PatchBy(name string) (*dto.Zone, error) {
var req dto.ZoneUpdate
err := z.Ctx.ReadJSON(&req)
if err != nil {
return nil, err
}
validate := validator.New()
err = validate.Struct(req)
if err != nil {
return nil, err
}
operator := z.Ctx.Values().GetString("operator")
go kolog.Save(operator, constant.UPDATE_ZONE, name)
return z.ZoneService.Update(name, req)
}
func (z ZoneController) PostBatch() error {
var req dto.ZoneOp
err := z.Ctx.ReadJSON(&req)
if err != nil {
return err
}
validate := validator.New()
err = validate.Struct(req)
if err != nil {
return err
}
err = z.ZoneService.Batch(req)
if err != nil {
return err
}
operator := z.Ctx.Values().GetString("operator")
delZone := ""
for _, item := range req.Items {
delZone += (item.Name + ",")
}
go kolog.Save(operator, constant.DELETE_ZONE, delZone)
return err
}
func (z ZoneController) PostClusters() (dto.CloudZoneResponse, error) {
var req dto.CloudZoneRequest
err := z.Ctx.ReadJSON(&req)
if err != nil {
return dto.CloudZoneResponse{}, err
}
data, err := z.ZoneService.ListClusters(req)
if err != nil {
return dto.CloudZoneResponse{}, err
}
return dto.CloudZoneResponse{Result: data}, err
}
func (z ZoneController) PostTemplates() (dto.CloudZoneResponse, error) {
var req dto.CloudZoneRequest
err := z.Ctx.ReadJSON(&req)
if err != nil {
return dto.CloudZoneResponse{}, err
}
data, err := z.ZoneService.ListTemplates(req)
if err != nil {
return dto.CloudZoneResponse{}, err
}
return dto.CloudZoneResponse{Result: data}, err
}
func (z ZoneController) PostDatastores() ([]dto.CloudDatastore, error) {
var req dto.CloudZoneRequest
err := z.Ctx.ReadJSON(&req)
if err != nil {
return nil, err
}
return z.ZoneService.ListDatastores(req)
}
| (operator, constant.CR |
_password-reset-finish.service.ts | <%#
Copyright 2013-2017 the original author or authors from the JHipster project.
This file is part of the JHipster project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
import { Injectable } from '@angular/core';
import { Http } from '@angular/http';
import { Observable } from 'rxjs/Rx';
<%_ if (authenticationType !== 'uaa') { _%>
import { SERVER_API_URL } from '../../../app.constants';
<%_ } _%>
@Injectable()
export class PasswordResetFinishService {
constructor(private http: Http) {} | }
} |
save(keyAndPassword: any): Observable<any> {
return this.http.post(<%- apiUrlPrefix %>api/account/reset-password/finish', keyAndPassword); |
plotwrf.py | """
Sopan Kurkute
University of Saskatchewan
plotwrf.py
Python 2.x
Python script to plot various WRF model output. Plots are saved as PNG.
example usage: plotwrf.py --infile filename.nc --sfc --tunit C --ppn -punit mm --td
Will plot surface chart and dewpoint in Celcius and precipitation in mm.
Use plotwrf.py --help to list all options
Last modified: 05/05/16
Skew-T plotting with the pyMeteo package available at: https://github.com/cwebster2/pyMeteo
Credit to Casey Webster
Skew-t plotting with SHARPpy package available at: https://github.com/sharppy/SHARPpy
Credit to: Patrick Marsh (SPC), Kelton Halbert (OU School of Meteorology), Greg Blumberg (OU/CIMMS), Tim Supinie (OU School of Meteorology)
"""
import matplotlib
#matplotlib.use('Agg') # UNCOMMENT THIS ONLY WHEN INVOKING FROM CRON SCRIPT
from scipy.io import netcdf # USE SCIPY MODULE
#from netCDF4 import Dataset # UNCOMMENT TO USE NETCDF 4 MODULE
from mpl_toolkits.basemap import Basemap
from matplotlib import cm
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import numpy as np
import datetime
from optparse import OptionParser
import os.path
import sys
import conversions as conv
import calc_vars as calc
import plot_funcs as pltfuncs
import funcs
import colormaps as cmap
# option parser
usage="usage: %prog [options] \n example usage: plotwrf.py --infile filename.nc --sfc --tunit C --td --ppn --punit mm"
parser = OptionParser(usage=usage, version="%prog 6.0 by Sopan Kurkute")
parser.add_option("--sfc", dest="sfc",action="store_true",help="Plot surface chart with 2m temp, wind barbs and MSLP")
parser.add_option("--t2", dest="t2", action="store_true", help="Plot 2m temp and wind barbs only")
parser.add_option("--mslp", dest="mslp", action="store_true", help="Plot MSLP only")
parser.add_option("--ppnaccum", dest="ppnaccum", action="store_true", help="Plot total accumulated precipitation")
parser.add_option("--ppn", dest="ppn", action="store_true", help="Plot total precipitation")
parser.add_option("--convppn", dest="convppn", action="store_true", help="Plot convective precipitation")
parser.add_option("--td", dest="td", action="store_true", help="Plot 2m dew point temperature")
parser.add_option("--rh", dest="rh", action="store_true", help="Plot relative humidity")
parser.add_option("--snow", dest="snow", action="store_true", help="Plot snow accumulation")
parser.add_option("--hail", dest="hail", action="store_true", help="Plot hail accumulaton")
parser.add_option("--simdbz", dest="simdbz", action="store_true", help="Plot simulated reflectivity")
parser.add_option("--compdbz", dest="compdbz", action="store_true", help="Plot composite reflectivity")
parser.add_option("--lcl", dest="lcl", action="store_true", help="Plot LCL (lifted condensation level)")
parser.add_option("--thetae", dest="thetae", action="store_true", help="Plot Theta-e (equivalent potential temperature)")
parser.add_option("--ua", dest="ua", action="store_true", help="Plot geopotential height, temperature and wind barbs at given pressure levels (hPa), --lvl")
parser.add_option("--lvl", dest="lvl", help="Pressure levels to interpolate to for upper level charts option --ua, --vv. Comma seperated e.g 250,500", default="500")
parser.add_option("--run", dest="run", type="string", help="Model initialisation time", default="00")
parser.add_option("--indir", dest="indir", type="string", help="Directory of the NetCDF file", default="")
parser.add_option("--outdir", dest="outdir", type="string", help="Directory to save plots too", default="")
parser.add_option("--infile", dest="infile", type="string", help="NetCDF filename", default="")
parser.add_option("--thin", dest="thin", type="int", help="Thinning factor for wind barbs", default=5)
parser.add_option("--tunit", dest="tunit", type="string", help="Unit of temperature (C or F)", default="C")
parser.add_option("--punit", dest="punit", type="string", help="Unit of precipitation (mm or inches)", default="mm")
parser.add_option("--save", dest="save", action="store_true", help="Save plots as png files")
parser.add_option("--v", dest="verbose", action="store_true", help="Enable verbose")
parser.add_option("--auto", dest="auto", action="store_true", help="Enable auto file input for daily WRF runs")
parser.add_option("--barbsize", dest="barbsize", type="int", help="Set the length of the wind barbs", default=7)
parser.add_option("--75lr", dest="lr75", action="store_true", help="Plot the H7-H5 lapse rates")
parser.add_option("--vort500", dest="vort500", action="store_true", help="Plot the 500mb absolute vorticity")
parser.add_option("--shear06", dest="shear06", action="store_true", help="Plot the 0-6km shear")
parser.add_option("--vv", dest="vv", action="store_true", help="Plot vertical velocity at specified levels --lvl")
parser.add_option("--irtemp", dest="irtemp", action="store_true", help="Plot IR Brightness Temperature")
parser.add_option("--skewt", dest="skewt", action="store_true", help="Plot Skew-t for a location. Uses pyMeteo package.")
parser.add_option("--slat", dest="slat", type="int", help="Latitude for Skew-t")
parser.add_option("--slon", dest="slon", type="int", help="Longitude for Skew-t")
parser.add_option("--getij", dest="getij", action="store_true", help="Get i,j and nearest Lat/Lon for entered Lat/Lon")
parser.add_option("--skewt2", dest="skewt2", action="store_true", help="Plot Skew-t for a location using SHARPpy")
parser.add_option("--uh25", dest="uh25", action="store_true", help="Plot 2-5km Updraft Helicity")
(opt, arg) = parser.parse_args()
indir = opt.indir # dir of input file
filein = opt.infile
if opt.auto: # for auto file input for daily runs
run = opt.run # model init time
filein = 'wrfout_d01_'+datetime.datetime.utcnow().strftime('%Y-%m-%d')+'_'+run+':00:00' # auto filename for current days run
while os.path.isfile(indir+filein) is False and not opt.auto: #if file doesnt exist get filename
print "File", filein, "not found! in directory:", indir
indir = raw_input("Please enter a directory (blank for current dir): ")
filein = raw_input("Please enter a filename: ")
try: #check if file exists and read in
print "Reading in file: ", indir+filein
#nc = Dataset(indir+filein) # for netcdf 4
nc = netcdf.netcdf_file(indir+filein,'r') # for scipy
except: # quit if cant read file
print "Something went wrong reading in the file"
print "QUITTING"
sys.exit()
outdir = opt.outdir # output image dir
## BASEMAP STUFF
#thin factor for wind barbs
thin = opt.thin
#get lats and lons for map projection
cen_lat = float(nc.CEN_LAT)
cen_lon = float(nc.CEN_LON)
truelat1 = float(nc.TRUELAT1)
truelat2 = float(nc.TRUELAT2)
standlon = float(nc.STAND_LON)
xlat = nc.variables['XLAT']
xlong = nc.variables['XLONG']
map_proj = int(nc.MAP_PROJ)
# dimensions of domain
x_dim = len(xlat[0,0,:])
y_dim = len(xlong[0,:,0])
# Get dx and dy. Grid size
dx = float(nc.DX)
dy = float(nc.DY)
#calculate plot width and height from grid size * dimension. Domain size
width_meters = dx * (x_dim - 1)
height_meters = dy * (y_dim - 1)
# Define gridlines
parallels = np.arange(-90,90,10)
meridians = np.arange(0,360,10)
# find projection and create map. Only LCC tested.
if map_proj == 1: #lambert conformal.
proj = 'lcc'
projname = 'Lambert Conformal'
elif map_proj == 2: # polar stereographic
proj = 'npstere'
projname = 'Polar Stereographic'
elif map_proj == 3: # mercator
proj = 'merc'
projname = 'Mercator'
else: # not supported and quit
print "Projection ", map_proj, "unknown"
print "QUITTING"
sys.exit()
# make map
m = Basemap(resolution='i',projection=proj,width=width_meters,height=height_meters,lat_0=cen_lat,lon_0=cen_lon,lat_1=truelat1,lat_2=truelat2)
#m = Basemap(resolution='i',projection=proj,llcrnrlon=xlong[0,0,0],llcrnrlat=xlat[0,0,0],urcrnrlon=xlong[0,-1,-1],urcrnrlat=xlat[0,-1,-1],lat_0=cen_lat,lon_0=cen_lon)
#x, y = m(xlong[0,:,:],xlat[0,:,:])
# get lat/lons of ny by nx evenly space grid
# make lons, lats and x, y co ordinates
lons, lats = m.makegrid(x_dim, y_dim)
x, y = m(lons, lats) # compute map proj coordinates.
print "Using map projection: ", projname
## GET THIS DATA FOR NOW
times = nc.variables['Times'] #each time output in wrf nc file
t2 = nc.variables['T2'] #temp at 2m / Kelvin
u10 = nc.variables['U10'] #u10 wind / ms/s
v10 = nc.variables['V10'] #v10 wind / ms/s
psfc = nc.variables['PSFC'] #surface pressure / Pascals
rainc = nc.variables['RAINC'] # accumulated total cumulus precip
rainnc = nc.variables['RAINNC'] # accumulated total grid scale precip
thgt = nc.variables['HGT'] #terrain height
# general info
init = str(''.join(times[0])).replace('_',' ') # model init time
alltimes = [] #list to hold all times
### BEGIN PLOT FUNCTIONS ###
# savefile and makeplot and the functions for putting data on maps may stay here for now #
def savefile(filename): #save plot image as png
print "Saving file: ", filename
#print filename
plt.savefig(outdir+filename)
def makeplot(data,title,cblabel,clevs,cbticks,ftitle): # function to make plots
fig = plt.gcf() #get current fig
ax = plt.gca() #get current axis
#ax = fig.add_axes([0.1,0.1,0.8,0.8])
# draw parallels and meridians
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
# draw coastlines, state and country boundaries
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# set plot title
#ax.set_title(title+currtime)
ax.text(0,1.01*height_meters,title+'\nValid:'+currtime,fontsize=14)
ax.text(0.65*width_meters,1.01*height_meters,'Init: '+init, fontsize=12)
#fig.suptitle('Init: '+init+'', fontsize=12) #init title
if clevs is False:
# No color bar
pass
else: #create color bar
cbar = m.colorbar(data,location='bottom',pad="5%")
cbar.set_label(cblabel)
if cbticks:
cbar.set_ticks(clevs)
cbar.ax.tick_params(labelsize=8)
if opt.save:
#create filename for image and save file
filename = ftitle+filetime+'.png'
#filename = ftitle+str(time)+'.png' #save file with number instead of date and time
savefile(filename) #save image file
else:
plt.show()
def t2wind(): # plot t2 and wind barbs
# create figure
plt.figure(figsize=(8,8))
temps = t2[time] # temps in K
if opt.tunit == 'F':
t2f = conv.k_to_f(temps) # convert to F
clevs = np.arange(-30,115,5) # levels / degF
cs = m.contourf(x,y,t2f,clevs,cmap=cm.get_cmap('gist_ncar'))
elif opt.tunit == 'C':
t2c = conv.k_to_c(temps) # convert to C
clevs = np.arange(-40,55,5) # levels / degC
cs = m.contourf(x,y,t2c,clevs,cmap=cm.get_cmap('gist_ncar'))
#make x and y grid points for barbs
#yy = np.arange(0, len(y), 8)
#xx = np.arange(0, len(x), 8)
#gp = np.meshgrid(yy, xx)
#print x[::thin,::thin].shape #check x co-ord thinning
#print u[time,::thin,::thin].shape #check u10 thinning
#x_th,y_th = m(xlong[0,::thin,::thin],xlat[0,::thin,::thin]) #another method to thin barbs
#convert wind to kts
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
title = "2m Temperature and Wind Barbs (kts)"
ftitle = "t2-wind-"
if opt.tunit == 'C':
cblabel = r'$\degree$C'
elif opt.tunit == 'F':
cblabel = r'$\degree$F'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def mslponly(): # plot MSLP only
#create figure
plt.figure(figsize=(8,8))
x, y = m(lons, lats)
psfchpa = conv.pa_to_hpa(psfc[time]) #convert Pa to hPa
mslp = calc.calc_mslp(psfchpa, thgt[0], t2[time]) # get mslp
mslp = gaussian_filter(mslp, sigma=3) #smooth wiggles
#find local min and local max
local_min, local_max = funcs.extrema(mslp, mode='wrap', window=50)
clevs = np.arange(900,1055,2.)
cs = m.contour(x,y,mslp,clevs,colors='k',linewidths=2.)
plt.clabel(cs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
xlows = x[local_min]; xhighs = x[local_max]
ylows = y[local_min]; yhighs = y[local_max]
lowvals = mslp[local_min]; highvals = mslp[local_max]
# plot lows as blue L's, with min pressure value underneath.
xyplotted = []
# don't plot if there is already a L or H within dmin meters.
yoffset = 0.022*(m.ymax-m.ymin)
dmin = yoffset
for x,y,p in zip(xlows, ylows, lowvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'L',fontsize=14,fontweight='bold', ha='center',va='center',color='b')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='b', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
# plot highs as red H's, with max pressure value underneath.
xyplotted = []
for x,y,p in zip(xhighs, yhighs, highvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'H',fontsize=14,fontweight='bold', ha='center',va='center',color='r')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='r', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
title = "MSLP (hPa)"
ftitle = 'mslp-'
cblabel = ''
clevs = False # no color bar levels
cbticks = False
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def precipaccum(): # plot total precip accumulation
# create figure
plt.figure(figsize=(8,8))
ppn = rainc[time]+rainnc[time] #ppn / mm
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
ppn = conv.mm_to_in(ppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,ppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Precipitation Accumulation"
ftitle = 'ppnaccum-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def precip(): # plot current precip at each time
# create figure
plt.figure(figsize=(8,8))
ppn = rainc[time]+rainnc[time] # total ppn / mm
currppn = np.array(ppn.shape)
if time == 0: # initial amount
currppn = ppn
else: # current amount
prev = rainc[time-1]+rainnc[time-1]
currppn = ppn-prev
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
currppn = conv.mm_to_in(currppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,currppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Precipitation"
ftitle = 'ppn-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def convprecip(): # plot current convective precip at each time
# create figure
plt.figure(figsize=(8,8))
convppn = rainc[time] #ppn / mm
currppn = np.array(convppn.shape)
if time == 0:
currppn = convppn
else:
prev = rainc[time-1]
currppn = convppn-prev
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
currppn = conv.mm_to_in(currppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,currppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Convective Precipitation"
ftitle = 'convppn-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def tdrh(): # plot td and rh
# create figure
plt.figure(figsize=(8,8))
q2 = nc.variables['Q2'][time] # water vapour mixing ratio at 2m
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
psfchpa = conv.pa_to_hpa(psfc[time]) # pres to hPa
es = calc.calc_es(t2c[time]) # calc es
ws = calc.calc_ws(es, psfchpa) # calc ws
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
if opt.rh:
rh = calc.calc_rh(q2, ws) #calc rh
clevs = np.arange(0,105,5)
cs = m.contourf(x,y,rh,clevs,cmap=cm.get_cmap('jet')) #plot RH
cblabel='RH \ %'
title = "Relative Humidity \n Valid: "
ftitle = 'rh-'
cbticks = True
elif opt.td:
rh = calc.calc_rh(q2, ws) # calc rh
td = calc.calc_dewpoint(es, rh) # calc td (deg C)
title = "2m Dew Point"
ftitle = 'td-'
if opt.tunit == 'C':
clevs = np.arange(-30,65,5) # levels / degC
cblabel = r'$\degree$C'
elif opt.tunit == 'F':
clevs = np.arange(-20,125,5) # levels / degF
td = conv.c_to_f(td) #convert celcius to fahrenheit
cblabel = r'$\degree$F'
cs = m.contourf(x,y,td,clevs,cmap=cm.get_cmap('gist_ncar')) #plot Td
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
cbticks=True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def upperair(): # plot upper air chart for given level. geopotential height, wind bards and temp
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
U = nc.variables['U'][time] # U wind component
V = nc.variables['V'][time] # V wind component
Unew = funcs.unstagger(U,'U') # unstagger u
Vnew = funcs.unstagger(V,'V') # unstagger v
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totalTheta = theta + theta0 # total potential temp
totalT = conv.k_to_c(calc.theta_to_temp(totalTheta, totalp)) # calc temps in C
levels = opt.lvl.split(',') # get list of levels
for level in levels:
plt.figure(figsize=(8,8)) #create fig for each plot
level = int(level) # make it int
#interp data for level
gphgt = funcs.linear_interp(totalgp,totalp,level)
totalTfinal = funcs.linear_interp(totalT,totalp,level)
uinterp = funcs.linear_interp(Unew,totalp,level)
vinterp = funcs.linear_interp(Vnew,totalp,level)
Ufinal = conv.ms_to_kts(uinterp) #convert to kts
Vfinal = conv.ms_to_kts(vinterp)
#speed = calc.calc_wspeed(Ufinal, Vfinal)
gphgt = conv.gphgt_to_hgt(gphgt) # convert to height (m)
gphgt = gaussian_filter(gphgt, sigma=3) # smooth wiggles
totalTfinal = gaussian_filter(totalTfinal, sigma=2)
# set gpheight levels for common pressure levels
if level == 250:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),60)
elif level == 500:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),60)
elif level == 700:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
elif level == 850:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
elif level == 925:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
else: # use generic 30m spacing
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
#plot all this up
cs = m.contour(x,y,gphgt,gpclevs,colors='k',linewidths=2.)
plt.clabel(cs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
tclevs = np.arange(np.min(totalTfinal),np.max(totalTfinal),4)
cs2 = m.contour(x,y,totalTfinal,tclevs,colors='r',linestyles='-',linewidths=2.)
plt.clabel(cs2,inline=True,fmt='%1.0f',fontsize=12,colors='r')
m.barbs(x[::thin,::thin], y[::thin,::thin], Ufinal[::thin,::thin], Vfinal[::thin,::thin],length=opt.barbsize) #plot barbs
level = str(level)
title = level+'mb Height (m), Temp (C), Wind Barbs (kts)'
ftitle = level+'mb-'
cblabel = 'kts'
clevs = False
cbticks = False
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def surface(): # plot surface chart. t2, wind barbs and mslp
# create figure
plt.figure(figsize=(8,8))
x, y = m(lons, lats)
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
if opt.tunit == 'F':
t2f = conv.c_to_f(t2c) #convert celcius to fahrenheit
clevs = np.arange(-30,115,5) # levels / degF
cs = m.contourf(x,y,t2f,clevs,cmap=cm.get_cmap('gist_ncar'))
cblabel = r'$\degree$F'
elif opt.tunit == 'C':
clevs = np.arange(-40,55,5) # levels / degC
cs = m.contourf(x,y,t2c,clevs,cmap=cm.get_cmap('gist_ncar'))
cblabel = r'$\degree$C'
cbticks = True
psfchpa = conv.pa_to_hpa(psfc[time]) #convert Pa to hPa
mslp = calc.calc_mslp(psfchpa, thgt[0], t2[time]) # get mslp
mslp = gaussian_filter(mslp, sigma=3) # smooth wiggles
local_min, local_max = funcs.extrema(mslp, mode='wrap', window=50)
#make x and y grid points for barbs
#yy = np.arange(0, len(y), 8)
#xx = np.arange(0, len(x), 8)
#gp = np.meshgrid(yy, xx)
#print x[::thin,::thin].shape #check x co-ord thinning
#print u[time,::thin,::thin].shape #check u10 thinning
#x_th,y_th = m(xlong[0,::thin,::thin],xlat[0,::thin,::thin]) #another method to thin barbs
#convert wind to kts
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
title = "2m Temp, Wind Barbs (kts), MSLP (hPa)"
ftitle = 'sfc-'
pclevs = np.arange(900,1055,2.)
pcs = m.contour(x,y,mslp,pclevs,colors='k',linewidths=2.)
plt.clabel(pcs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
xlows = x[local_min]; xhighs = x[local_max]
ylows = y[local_min]; yhighs = y[local_max]
lowvals = mslp[local_min]; highvals = mslp[local_max]
# plot lows as blue L's, with min pressure value underneath.
xyplotted = []
# don't plot if there is already a L or H within dmin meters.
yoffset = 0.022*(m.ymax-m.ymin)
dmin = yoffset
for x,y,p in zip(xlows, ylows, lowvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'L',fontsize=14,fontweight='bold', ha='center',va='center',color='b')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='b', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
# plot highs as red H's, with max pressure value underneath.
xyplotted = []
for x,y,p in zip(xhighs, yhighs, highvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'H',fontsize=14,fontweight='bold',
ha='center',va='center',color='r')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='r', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def snowaccum(): # plot snow accumulation
# create figure
plt.figure(figsize=(8,8))
snow = nc.variables['SNOWNC'][time] # total accumulated grid scale snow and ice / mm at each time
if opt.punit == 'mm':
clevs = [0,0.5,1,2.5,3,4,5,8,10,15,20,30,40,50,80,100,150,200,250,500]
cblabel = 'mm'
elif opt.punit == 'in':
snow = conv.mm_to_in(snow) # convert to inches
clevs = [0.25,0.5,0.75,1,1.5,2,2.5,3,4,5,6,8,10,12,14,16,18,20,22,24]
cblabel = 'inches'
cbticks = True
norm = matplotlib.colors.BoundaryNorm(clevs, 19) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,snow,clevs,norm=norm,cmap=cmap.snow_colormap)
title = "Snow Accumulation"
ftitle = 'snow-'
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def hailaccum(): # plot hail accumulation
# create figure
plt.figure(figsize=(8,8))
hail = nc.variables['HAILNC'][time] # accimulated total grid scale hail / mm at each time
if opt.punit == 'mm':
clevs = [0.5,1.,1.5,2.,2.5,3.,4.,5.,6.,7.,8.,9.,10.,11.,12.]
cblabel = 'mm'
elif opt.punit == 'in':
hail = conv.mm_to_in(hail) # convert to inches
clevs = [0.01,0.02,0.04,0.06,0.08,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55]
cblabel = 'inches'
cbticks = True
norm = matplotlib.colors.BoundaryNorm(clevs, 14) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,hail,clevs,norm=norm,cmap=cmap.hail_colormap)
title = "Hail Accumulation"
ftitle = 'hail-'
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def simudbz(): # plot simulated reflectivity, mp_physics dependent
# create figure
plt.figure(figsize=(8,8))
qrain = nc.variables['QRAIN'] # rain water mixing ratio
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
rhoa = calc.calc_rhoa(psfc[time], t2[time])
Qrain = qrain[time,1] # rain mixing ratio
Qrain = np.nan_to_num(Qrain) # change NaN to zeroes, changge infs to nums
try: #depends on MP scheme
Qsn = nc.variables['QSNOW'] # try to get snow mixing ratio
except:
Qsn = np.zeros(np.shape(qrain)) # else create zeros array same shape as qrain
Qsnow = Qsn[time,1] # snow mixing ratio
Qsnow = np.nan_to_num(Qsnow) # change NaN to zeros
dBZ = calc.calc_dbz(t2c, rhoa, Qrain, Qsnow)
clevs = np.arange(0,85,5)
norm = matplotlib.colors.BoundaryNorm(clevs, 17) # normalize levels
cs = m.contourf(x,y,dBZ,clevs,norm=norm,cmap=cmap.dbz_colormap)
title = "Simulated Reflectivity"
ftitle = 'simdbz-'
cblabel = 'dBZ'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def compodbz(): # plot composite reflectivity, mp_physics dependent
# create figure
plt.figure(figsize=(8,8))
try: #get refl from do_radar_ref=1
refl = nc.variables['REFL_10CM'][time]
dBZ = np.zeros(refl[0,0].shape)
dBZ = np.max(refl, axis=0)
#for i in range(len(refl[1,:,1])):
# for j in range(len(refl[1,1,:])):
# dBZ[i,j]=np.max(refl[:,i,j])
except: # calculate reflectivity
Qrainall = nc.variables['QRAIN'][time] # rain water mixing ratio at all levels
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
rhoa = calc.calc_rhoa(psfc[time], t2[time])
try: # depends on MP scheme
Qsn = nc.variables['QSNOW'] # try to get snow mixing ratio
except:
Qsn = np.zeros(np.shape(Qrainall)) # else create zeros array same shape as qrain
Qsnowall = Qsn[time] # get all Qsnow values at all levels for each time
Qrainmax = np.max(Qrainall, axis=0) #max rain QV
Qsnowmax = np.max(Qsnowall, axis=0) #max snow QV
dBZ = calc.calc_dbz(t2c, rhoa, Qrainmax, Qsnowmax)
clevs = np.arange(0,85,5)
norm = matplotlib.colors.BoundaryNorm(clevs, 17) # normalize levels
cs = m.contourf(x,y,dBZ,clevs,norm=norm,cmap=cmap.dbz_colormap)
title = "Composite Reflectivity"
ftitle = 'compdbz-'
cblabel = 'dBZ'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def lclhgt(): # plot lcl height
# create figure
plt.figure(figsize=(8,8))
q2 = nc.variables['Q2'][time] # water vapour mixing ratio at 2m
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
psfchpa = conv.pa_to_hpa(psfc[time])
es = calc.calc_es(t2c)
ws = calc.calc_ws(es, psfchpa)
rh = calc.calc_rh(q2, ws)
td = calc.calc_dewpoint(es, rh)
lcl = calc.calc_lcl(t2c, td)
clevs = np.arange(0,6000,500)
cs = m.contourf(x,y,lcl,clevs,cmap=cmap.lcl_colormap)
title = "LCL Height"
ftitle = 'lcl-'
cblabel = 'm'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def thetaE(): # plot theta-e
# create figure
|
def h75lr(): # 700-500mb lapse rates
# create figure
plt.figure(figsize=(8,8))
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totalTheta = theta + theta0 # total potential temp
totalT= conv.k_to_c(calc.theta_to_temp(totalTheta, totalp)) # calc temp in deg C
# interp temps to levels
totalT700 = funcs.linear_interp(totalT,totalp,700)
totalT500 = funcs.linear_interp(totalT,totalp,500)
# calc h7-h5 lapse rates
lr = totalT700 - totalT500
clevs = np.arange(5,10.5,.5) # conditionally unstable levels
cs = m.contourf(x,y,lr,clevs,cmap=cm.get_cmap('gist_ncar'))
title = "H7-H5 Lapse Rates"
ftitle = 'h75lr-'
cblabel = r'$\degree$C'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def absvort500(): # plot 500mb absolute vorticity
# create figure
plt.figure(figsize=(8,8))
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
fcoriolis = calc.calc_fcoriolis(xlat[0])
uinterp = funcs.linear_interp(U,totalp,500) #interp to 500mb
vinterp = funcs.linear_interp(V,totalp,500)
vertvort = calc.calc_vertvort(uinterp, vinterp, dx)
avort = vertvort + fcoriolis # absolute vorticity
avort = np.multiply(avort, 1e5) # scale up for levels
clevs = np.arange(-6, 52, 2)
cs = m.contourf(x,y,avort,clevs,cmap=cm.get_cmap('gist_ncar'))
title = '500mb Absolute Vorticity'
ftitle = '500absvort-'
cblabel = r'$10^{-5} s^{-1}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def shr06(): # plot the 0-6km shear vector
# create figure
plt.figure(figsize=(8,8))
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component # UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
u10kts = conv.ms_to_kts(u10[time]) # sfc wind in kts
v10kts = conv.ms_to_kts(v10[time])
u6 = funcs.interp_generic(6000, (totalgp/9.81), U) # interp to 6km
v6 = funcs.interp_generic(6000, (totalgp/9.81), V)
u6kts = conv.ms_to_kts(u6) # convert 6km wind to kts
v6kts = conv.ms_to_kts(v6)
#using 10m wind as sfc wind
ushr = u6kts - u10kts # calc 0-6 shr in kts
vshr = v6kts - v10kts
speed = calc.calc_wspeed(ushr, vshr)
# plot data
clevs = np.arange(20,145,5)
cs = m.contourf(x, y, speed, clevs, cmap=cm.get_cmap('gist_ncar'))
m.barbs(x[::thin,::thin], y[::thin,::thin], ushr[::thin,::thin], vshr[::thin,::thin],length=opt.barbsize) #plot barbs
title = '0-6km Shear'
ftitle = 'shr06-'
cblabel = 'kts'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def vertvol(): # plot the vertical velocity at levels. NEEDS CORRECTING TO VERTICAL MOTION OMEGA EQUATION
W = funcs.unstagger(nc.variables['W'][time],'W') # unstaggered vertical velocity
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
levels = opt.lvl.split(',') # get list of levels
for level in levels:
plt.figure(figsize=(8,8)) #create fig for each plot
level = int(level) # make it int
Wfinal = funcs.linear_interp(W,totalp,level) # interpolate W to levels
clevs = np.arange(-2.0,2.2,0.2)
cs = m.contourf(x,y,Wfinal,clevs,cmap=cm.get_cmap('gist_ncar'))
level = str(level)
title = level+'mb Vertical Velocity'
ftitle = level+'mbvv-'
cblabel = r'$ms^{-1}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def olr_to_temp(): # convert OLR to IR temp
plt.figure(figsize=(8,8))
olr = nc.variables['OLR'][time]
olrtemp = np.power(olr / 5.67e-8, 0.25) - 273.15 # calc temp using Stefan-Boltzman law and convert to deg C
clevs = np.arange(-80, 36 ,4)
cs = m.contourf(x,y,olrtemp,clevs,cmap=cmap.irsat_colormap)
title = 'IR Brightness Temp'
ftitle = 'irtemp-'
cblabel = r'$\degree$C'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def pymeteo_skewt(): # uses pyMeteo package (https://github.com/cwebster2/pyMeteo) to plot skew-t for lat/lon. Credit Casey Webster
import pymeteo.skewt as skewt
try:
skewt.plot_wrf(filein,opt.slat,opt.slon,time,'skewt'+str(time)+'.png')
except:
print "LAT/LON NOT IN DOMAIN. QUITTING"
sys.exit()
def plot_skewt(): # plot skew-t by writing data to file and use SHARPpy available at: https://github.com/sharppy/SHARPpy
i, j = funcs.latlon_ij(opt.slat, opt.slon, xlat, xlong)
inlat = xlat[0,i,j]
inlon = xlong[0,i,j]
pb = nc.variables['PB'][time,:,i,j] #base state pressure, Pa
p = nc.variables['P'][time,:,i,j] # perturbation pressure, Pa
totalp = p + pb # total pressure
ph = nc.variables['PH'][time,:,i,j] #perturbation geopotential
phb = nc.variables['PHB'][time,:,i,j] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
U = nc.variables['U'][time,:,i,j] # U wind component
V = nc.variables['V'][time,:,i,j] # V wind component
theta = nc.variables['T'][time,:,i,j] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totaltheta = theta+theta0 # total potential temp
qvapor = nc.variables['QVAPOR'][time,:,i,j] #water vapor mixing ratio kg/kg
#need to calc these variables for skewt
level = conv.pa_to_hpa(totalp) # levels in hPa
height = conv.gphgt_to_hgt(totalgp) # heights in m
temps = calc.theta_to_temp(totaltheta, totalp) # temps in degK
tempc = conv.k_to_c(temps) # temps in degC
es = calc.calc_es(tempc) # calc es
ws = calc.calc_ws(es, level) # calc ws
rh = calc.calc_rh(qvapor, ws) # calc rh
dwpt = calc.calc_dewpoint(es, rh) # calc dewpoint in degC
winddir = calc.calc_wdir(U, V) # calc wind dir
wspd = conv.ms_to_kts(calc.calc_wspeed(U, V)) # calc wind spd
skewt_data = funcs.skewt_data(timestamp, level, height, tempc, dwpt, winddir, wspd, inlat, inlon) # write the data to SPC file format
pltfuncs.do_sharppy(skewt_data) # use SHARPpy to plot skew-t
def updraft_hel(): # plot the 2-5km updraft helicity
plt.figure(figsize=(8,8))
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component # UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
W = funcs.unstagger(nc.variables['W'][time],'W') # unstaggered vertical velocity
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
heights = totalgp / 9.81
levels = 6 # no of levels in between bottom and top of a layer (add extra one to get to very top of layer)
depth = 1000 # depth of layer
dz = depth / (levels-1) # increment / m
#create arrays to hold all the values at each level
u2km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v2km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
u3km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v3km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
u4km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v4km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
#u5km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
#v5km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
w2km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
w3km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
w4km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
#w5km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
zeta2km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
zeta3km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
zeta4km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
#zeta5km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
for i in range(0,levels): # loop through to interpolate to levels and store in array
print "Interpolating...doing loop ", i, "of ", (levels-1)
increment = i*dz
u2km[i] = funcs.interp_generic(2000+increment, heights, U)
v2km[i] = funcs.interp_generic(2000+increment, heights, V)
u3km[i] = funcs.interp_generic(3000+increment, heights, U)
v3km[i] = funcs.interp_generic(3000+increment, heights, V)
u4km[i] = funcs.interp_generic(4000+increment, heights, U)
v4km[i] = funcs.interp_generic(4000+increment, heights, V)
#u5km[i] = funcs.interp_generic(5000+increment, heights, U)
#v5km[i] = funcs.interp_generic(5000+increment, heights, V)
w2km[i] = funcs.interp_generic(2000+increment, heights, W)
w3km[i] = funcs.interp_generic(3000+increment, heights, W)
w4km[i] = funcs.interp_generic(4000+increment, heights, W)
#w5km[i] = funcs.interp_generic(2000+increment, heights, W)
zeta2km[i] = calc.calc_vertvort(u2km[i], v2km[i], dx)
zeta3km[i] = calc.calc_vertvort(u3km[i], v3km[i], dx)
zeta4km[i] = calc.calc_vertvort(u4km[i], v4km[i], dx)
#zeta5km[i] = calc.calc_vertvort(u5km[i], v5km[i], dx)
# calc the layer mean
w2to3 = np.mean(w2km, axis=0)
w3to4 = np.mean(w3km, axis=0)
w4to5 = np.mean(w4km, axis=0)
zeta2to3 = np.mean(zeta2km, axis=0)
zeta3to4 = np.mean(zeta3km, axis=0)
zeta4to5 = np.mean(zeta4km, axis=0)
# calc the 2-5km UH
UH = ( w2to3*zeta2to3 + w3to4*zeta3to4 + w4to5*zeta4to5 ) * 1000
#u2km = funcs.interp_generic(2000, heights, U)
#v2km = funcs.interp_generic(2000, heights, V)
#u3km = funcs.interp_generic(3000, heights, U)
#v3km = funcs.interp_generic(3000, heights, V)
#u4km = funcs.interp_generic(4000, heights, U)
#v4km = funcs.interp_generic(4000, heights, V)
#u5km = funcs.interp_generic(5000, heights, U)
#v5km = funcs.interp_generic(5000, heights, V)
#w2km = funcs.interp_generic(2000, heights, W)
#w3km = funcs.interp_generic(2000, heights, W)
#w4km = funcs.interp_generic(2000, heights, W)
#w5km = funcs.interp_generic(2000, heights, W)
#w2to3 = 0.5 * ( w2km + w3km )
#w3to4 = 0.5 * ( w3km + w4km )
#w4to5 = 0.5 * ( w4km + w5km )
#zeta2km = calc.calc_vertvort(u2km, v2km, dx)
#zeta3km = calc.calc_vertvort(u3km, v3km, dx)
#zeta4km = calc.calc_vertvort(u4km, v4km, dx)
#zeta5km = calc.calc_vertvort(u5km, v5km, dx)
#zeta2to3 = 0.5 * ( zeta2km + zeta3km )
#zeta3to4 = 0.5 * ( zeta3km + zeta4km )
#zeta4to5 = 0.5 * ( zeta4km + zeta5km )
#UH = ( w2to3*zeta2to3 + w3to4*zeta3to4 + w4to5*zeta4to5 ) * 1000
clevs = np.arange(0,210,10)
cs = m.contourf(x,y,UH,clevs,cmap=cmap.uh_colormap)
title = '2-5km Updraft Helicity'
ftitle = 'uh-'
cblabel = r'$m^{2}s^{-2}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
### END PLOT FUNCTIONS ###
flag = False # to check for plotting options
#### BEGIN TIME LOOP ####
for time in range(times.shape[0]):
currtime = str(''.join(times[time])).replace('_', ' ') #get current model time
filetime = currtime.translate(None, ':').replace(' ', '_') # time for filename
alltimes.append(currtime) # all times in output
timestamp = currtime[8:10]+currtime[5:7]+currtime[2:4]+'/'+currtime[11:13]+currtime[14:16]
if opt.t2: #plot 2m temp and wind barbs
print "Plotting Temperature and Wind Barbs for time: ", currtime
t2wind()
flag = True
if opt.mslp: #plot surface pressure only
print "Plotting MSLP for time: ", currtime
mslponly()
flag = True
if opt.ppnaccum: #plot total precipitation
print "Plotting Precipitation Accumulation for time: ", currtime
precipaccum()
flag = True
if opt.ppn: # plot current ppn
print "Plotting Precipitation for time: ", currtime
precip()
flag = True
if opt.convppn: # plot convective ppn
print "Plotting Convective Precipitation for time: ", currtime
convprecip()
flag = True
if opt.td or opt.rh: #plot dew point or RH
flag = True
if opt.td:
print "Plotting Dew Point for time: ", currtime
elif opt.rh:
print "Plotting RH for time: ", currtime
tdrh()
if opt.ua: #plot upper air charts
print "Plotting upper level chart for time: ", currtime
upperair()
flag = True
if opt.sfc: #plot surface chart. t2, wind and mslp
print "Plotting Surface Chart for time: ", currtime
surface()
flag = True
if opt.snow: #plot snow accumulation
print "Plotting Snow Accumulation for time: ", currtime
snowaccum()
flag = True
if opt.hail: #plot hail accumulation
print "Plotting Hail Accumulation for time: ", currtime
hailaccum()
flag = True
if opt.simdbz: #simulated reflectivity
print "Plotting Simulated Reflectivity for time: ", currtime
simudbz()
flag = True
if opt.compdbz: #composite reflectivity
print "Plotting Composite Reflectivity for time: ", currtime
compodbz()
flag = True
if opt.lcl: #plot LCL
print "Plotting LCL for time: ", currtime
lclhgt()
flag = True
if opt.thetae: #plot theta-e
print "Plotting Theta-e for time: ", currtime
thetaE()
flag= True
if opt.lr75: #plot h7-h5 lapse rates
print "Plotting H7-H5 lapse rates for time: ", currtime
h75lr()
flag = True
if opt.vort500: # plot 500mb absolute vorticity
print "Plotting 500mb absolute vorticity for time: ", currtime
absvort500()
flag = True
if opt.shear06:
print "Plotting 0-6km Shear for time: ", currtime
shr06()
flag = True
if opt.vv:
print "Plotting vertical velocity for time: ", currtime
vertvol()
flag = True
if opt.irtemp:
print "Plotting IR Brightness Temp for time: ", currtime
olr_to_temp()
flag = True
if opt.skewt:
print "Plotting Skew-t for time: ", currtime
pymeteo_skewt()
flag = True
if opt.getij:
print "Getting i, j for lat=",opt.slat, ', lon=',opt.slon
funcs.latlon_ij(opt.slat, opt.slon, xlat, xlong)
#print "A less accurate method:"
#funcs.latlon_ij2(opt.slat, opt.slon, xlat, xlong)
flag = True
sys.exit()
if opt.skewt2:
print "Plotting Skew-t for time: ", currtime
plot_skewt()
flag = True
if opt.uh25:
print "Plotting 2-5km Updraft Helicity for time: ", currtime
updraft_hel()
flag = True
if flag is False: # do this when no options given
print "Please provide options to plot. Use plotwrf.py --help"
print "QUITTING"
sys.exit()
#pass
#### END TIME LOOP ####
if opt.verbose: #verbose output
print "\n*VERBOSE OUTPUT*"
print "\nindir= ", indir
print "infile= ", filein
print "outdir=", outdir
print "Model initialisation time: ", init
print "Timestep: ", nc.variables['ITIMESTEP'][1]
print "Times in file: ", alltimes
print "west_east: ", x_dim
print "south_north: ", y_dim
print "Model dimentions (metres): ", width_meters, height_meters
print "dx, dy: ", dx, dy
print "Center lat: ", cen_lat
print "Center lon: ", cen_lon
print "Model top: ", nc.variables['P_TOP'][0]
print "Map projection: ", proj, '-' , projname
nc.close() # close netcdf file
| plt.figure(figsize=(8,8))
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
theta = theta[0] + theta0 # total theta
psfchpa = conv.pa_to_hpa(psfc[time])
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
es = calc.calc_es(t2c)
ws = calc.calc_ws(es, psfchpa)
thetae = calc.calc_thetae(theta, t2[time], ws)
clevs = np.arange(260,372,4) # set by max and min of data
cs = m.contourf(x,y,thetae,clevs,cmap=cm.get_cmap('gist_ncar'))
title = "Theta-e"
ftitle = 'thetae-'
cblabel = 'K'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle) |
metrics_test.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package hints
import (
"os"
"path/filepath"
"sort"
"testing"
"github.com/docker/docker/pkg/ioutils"
"github.com/stretchr/testify/assert"
"github.com/elastic/beats/v7/libbeat/common"
"github.com/elastic/beats/v7/libbeat/common/bus"
"github.com/elastic/beats/v7/libbeat/keystore"
"github.com/elastic/beats/v7/metricbeat/mb"
)
func TestGenerateHints(t *testing.T) {
tests := []struct {
message string
event bus.Event
len int
result common.MapStr
}{
{
message: "Empty event hints should return empty config",
event: bus.Event{
"host": "1.2.3.4",
"kubernetes": common.MapStr{
"container": common.MapStr{
"name": "foobar",
"id": "abc",
},
},
"docker": common.MapStr{
"container": common.MapStr{
"name": "foobar",
"id": "abc",
},
},
},
len: 0,
result: common.MapStr{},
},
{
message: "Hints without host should return nothing",
event: bus.Event{
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmodule",
},
},
},
len: 0,
result: common.MapStr{},
},
{
message: "Hints without matching port should return nothing",
event: bus.Event{
"host": "1.2.3.4",
"port": 9090,
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"hosts": "${data.host}:8888",
},
},
},
len: 0,
result: common.MapStr{},
},
{
message: "Hints with multiple hosts return only the matching one",
event: bus.Event{
"host": "1.2.3.4",
"port": 9090,
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"hosts": "${data.host}:8888,${data.host}:9090",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"metricsets": []string{"default"},
"timeout": "3s",
"period": "1m",
"enabled": true,
"hosts": []interface{}{"1.2.3.4:9090"},
},
},
{
message: "Hints with multiple hosts return only the one with the template",
event: bus.Event{
"host": "1.2.3.4",
"port": 9090,
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"hosts": "${data.host}:8888,${data.host}:${data.port}",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"metricsets": []string{"default"},
"timeout": "3s",
"period": "1m",
"enabled": true,
"hosts": []interface{}{"1.2.3.4:9090"},
},
},
{
message: "Only module hint should return all metricsets",
event: bus.Event{
"host": "1.2.3.4",
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmodule",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmodule",
"metricsets": []string{"one", "two"},
"timeout": "3s",
"period": "1m",
"enabled": true,
},
},
{
message: "Metricsets hint works",
event: bus.Event{
"host": "1.2.3.4",
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmodule",
"metricsets": "one",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmodule",
"metricsets": []string{"one"},
"timeout": "3s",
"period": "1m",
"enabled": true,
},
},
{
message: "Only module, it should return defaults",
event: bus.Event{
"host": "1.2.3.4",
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"metricsets": []string{"default"},
"timeout": "3s",
"period": "1m",
"enabled": true,
},
},
{
message: "Module defined in modules as a JSON string should return a config",
event: bus.Event{
"host": "1.2.3.4",
"hints": common.MapStr{
"metrics": common.MapStr{
"raw": "{\"enabled\":true,\"metricsets\":[\"default\"],\"module\":\"mockmoduledefaults\",\"period\":\"1m\",\"timeout\":\"3s\"}",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"metricsets": []string{"default"},
"timeout": "3s",
"period": "1m",
"enabled": true,
},
},
{
message: "Module, namespace, host hint should return valid config with port should return hosts for " +
"docker host network scenario",
event: bus.Event{
"host": "1.2.3.4",
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"hosts": "${data.host}:9090",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"metricsets": []string{"default"},
"timeout": "3s",
"period": "1m",
"enabled": true,
"hosts": []interface{}{"1.2.3.4:9090"},
},
},
{
message: "Module with processor config must return an module having the processor defined",
event: bus.Event{
"host": "1.2.3.4",
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"hosts": "${data.host}:9090",
"processors": common.MapStr{
"add_locale": common.MapStr{
"abbrevation": "MST",
},
},
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"metricsets": []string{"default"},
"timeout": "3s",
"period": "1m",
"enabled": true,
"hosts": []interface{}{"1.2.3.4:9090"},
"processors": []interface{}{
map[string]interface{}{
"add_locale": map[string]interface{}{
"abbrevation": "MST",
},
},
},
},
},
{
message: "Module, namespace, host hint should return valid config",
event: bus.Event{
"host": "1.2.3.4",
"port": 9090,
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"hosts": "${data.host}:9090",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"metricsets": []string{"default"},
"hosts": []interface{}{"1.2.3.4:9090"},
"timeout": "3s",
"period": "1m",
"enabled": true,
},
},
{
message: "Module, namespace, host hint shouldn't return when port isn't the same has hint",
event: bus.Event{
"host": "1.2.3.4",
"port": 80,
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"hosts": "${data.host}:8080",
},
},
},
len: 0,
},
{
message: "Non http URLs with valid host port combination should return a valid config",
event: bus.Event{
"host": "1.2.3.4",
"port": 3306,
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"hosts": "tcp(${data.host}:3306)/",
},
},
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"namespace": "test",
"metricsets": []string{"default"},
"hosts": []interface{}{"tcp(1.2.3.4:3306)/"},
"timeout": "3s",
"period": "1m",
"enabled": true,
},
},
}
for _, test := range tests {
mockRegister := mb.NewRegister()
mockRegister.MustAddMetricSet("mockmodule", "one", NewMockMetricSet, mb.DefaultMetricSet())
mockRegister.MustAddMetricSet("mockmodule", "two", NewMockMetricSet, mb.DefaultMetricSet())
mockRegister.MustAddMetricSet("mockmoduledefaults", "default", NewMockMetricSet, mb.DefaultMetricSet())
mockRegister.MustAddMetricSet("mockmoduledefaults", "other", NewMockMetricSet)
m := metricHints{
Key: defaultConfig().Key,
Registry: mockRegister,
}
cfgs := m.CreateConfig(test.event) | config := common.MapStr{}
err := cfgs[0].Unpack(&config)
assert.Nil(t, err, test.message)
// metricsets order is random, order it for tests
if v, err := config.GetValue("metricsets"); err == nil {
if msets, ok := v.([]interface{}); ok {
metricsets := make([]string, len(msets))
for i, v := range msets {
metricsets[i] = v.(string)
}
sort.Strings(metricsets)
config["metricsets"] = metricsets
}
}
assert.Equal(t, test.result, config, test.message)
}
}
}
func TestGenerateHintsDoesNotAccessGlobalKeystore(t *testing.T) {
path := getTemporaryKeystoreFile()
defer os.Remove(path)
// store the secret
keystore := createAnExistingKeystore(path, "stored_secret")
os.Setenv("PASSWORD", "env_secret")
tests := []struct {
message string
event bus.Event
len int
result common.MapStr
}{
{
message: "Module, namespace, host hint should return valid config",
event: bus.Event{
"host": "1.2.3.4",
"port": 9090,
"hints": common.MapStr{
"metrics": common.MapStr{
"module": "mockmoduledefaults",
"hosts": "${data.host}:9090",
"password": "${PASSWORD}",
},
},
"keystore": keystore,
},
len: 1,
result: common.MapStr{
"module": "mockmoduledefaults",
"metricsets": []string{"default"},
"hosts": []interface{}{"1.2.3.4:9090"},
"timeout": "3s",
"period": "1m",
"enabled": true,
"password": "env_secret",
},
},
}
for _, test := range tests {
mockRegister := mb.NewRegister()
mockRegister.MustAddMetricSet("mockmoduledefaults", "default", NewMockMetricSet, mb.DefaultMetricSet())
m := metricHints{
Key: defaultConfig().Key,
Registry: mockRegister,
}
cfgs := m.CreateConfig(test.event)
assert.Equal(t, len(cfgs), test.len)
if len(cfgs) != 0 {
config := common.MapStr{}
err := cfgs[0].Unpack(&config)
assert.Nil(t, err, test.message)
// metricsets order is random, order it for tests
if v, err := config.GetValue("metricsets"); err == nil {
if msets, ok := v.([]interface{}); ok {
metricsets := make([]string, len(msets))
for i, v := range msets {
metricsets[i] = v.(string)
}
sort.Strings(metricsets)
config["metricsets"] = metricsets
}
}
assert.Equal(t, test.result, config, test.message)
}
}
}
type MockMetricSet struct {
mb.BaseMetricSet
}
func NewMockMetricSet(base mb.BaseMetricSet) (mb.MetricSet, error) {
return &MockMetricSet{}, nil
}
func (ms *MockMetricSet) Fetch(report mb.Reporter) {
}
// create a keystore with an existing key
/// `PASSWORD` with the value of `secret` variable.
func createAnExistingKeystore(path string, secret string) keystore.Keystore {
keyStore, err := keystore.NewFileKeystore(path)
// Fail fast in the test suite
if err != nil {
panic(err)
}
writableKeystore, err := keystore.AsWritableKeystore(keyStore)
if err != nil {
panic(err)
}
writableKeystore.Store("PASSWORD", []byte(secret))
writableKeystore.Save()
return keyStore
}
// create a temporary file on disk to save the keystore.
func getTemporaryKeystoreFile() string {
path, err := ioutils.TempDir("", "testing")
if err != nil {
panic(err)
}
return filepath.Join(path, "keystore")
} | assert.Equal(t, len(cfgs), test.len)
if len(cfgs) != 0 { |
0012_auto_20211027_2331.py | # Generated by Django 3.1.4 on 2021-10-27 18:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roomate', '0011_auto_20211027_1651'),
]
operations = [
migrations.AlterField(
model_name='contactus',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField( | ),
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
] | model_name='dataform',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), |
getVirtualWan.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20190701
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// VirtualWAN Resource.
func LookupVirtualWan(ctx *pulumi.Context, args *LookupVirtualWanArgs, opts ...pulumi.InvokeOption) (*LookupVirtualWanResult, error) {
var rv LookupVirtualWanResult
err := ctx.Invoke("azure-native:network/v20190701:getVirtualWan", args, &rv, opts...)
if err != nil |
return &rv, nil
}
type LookupVirtualWanArgs struct {
// The resource group name of the VirtualWan.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The name of the VirtualWAN being retrieved.
VirtualWANName string `pulumi:"virtualWANName"`
}
// VirtualWAN Resource.
type LookupVirtualWanResult struct {
// True if branch to branch traffic is allowed.
AllowBranchToBranchTraffic *bool `pulumi:"allowBranchToBranchTraffic"`
// True if Vnet to Vnet traffic is allowed.
AllowVnetToVnetTraffic *bool `pulumi:"allowVnetToVnetTraffic"`
// Vpn encryption to be disabled or not.
DisableVpnEncryption *bool `pulumi:"disableVpnEncryption"`
// A unique read-only string that changes whenever the resource is updated.
Etag string `pulumi:"etag"`
// Resource ID.
Id *string `pulumi:"id"`
// Resource location.
Location string `pulumi:"location"`
// Resource name.
Name string `pulumi:"name"`
// The office local breakout category.
Office365LocalBreakoutCategory string `pulumi:"office365LocalBreakoutCategory"`
// List of all P2SVpnServerConfigurations associated with the virtual wan.
P2SVpnServerConfigurations []P2SVpnServerConfigurationResponse `pulumi:"p2SVpnServerConfigurations"`
// The provisioning state of the virtual WAN resource.
ProvisioningState string `pulumi:"provisioningState"`
// The Security Provider name.
SecurityProviderName *string `pulumi:"securityProviderName"`
// Resource tags.
Tags map[string]string `pulumi:"tags"`
// Resource type.
Type string `pulumi:"type"`
// List of VirtualHubs in the VirtualWAN.
VirtualHubs []SubResourceResponse `pulumi:"virtualHubs"`
// List of VpnSites in the VirtualWAN.
VpnSites []SubResourceResponse `pulumi:"vpnSites"`
}
| {
return nil, err
} |
lib.rs | //! Wrapper type for by-address hashing and comparison.
//! This crate is an extension of [`by_address`] crate with the short name of core struct ByAddress
//! and the implementation of some From/Into-like traits. This crate DOES depend on libstd,
//! unlike original [`by_address`] crate, so it can not be used in [`no_std`] projects.
//! The description below was taken mainly from the original crate.
//!
//! [ByAddr] can be used to wrap any pointer type (i.e. any type that implements the Deref
//! trait). This includes references, raw pointers, smart pointers like `Rc<T>` and `Box<T>`, and
//! specialized pointer-like types such as `Vec<T>` and `String`.
//!
//! Comparison, ordering, and hashing of the wrapped pointer will be based on the address of its
//! contents, rather than their value.
//!
//! ```
//! use by_addr::ByAddr;
//! use std::rc::Rc;
//!
//! let rc = Rc::new(5);
//! let x = ByAddr(rc.clone());
//! let y = ByAddr(rc.clone());
//!
//! // x and y are two pointers to the same address:
//! assert_eq!(x, y);
//!
//! // Same as let z = 5.into_byaddr();
//! // or ByAddr::from_target(5) (only for ByAddr<T> where T: Sized)
//! let z = ByAddr(Rc::new(5));
//!
//! // *x and *z have the same value, but not the same address:
//! assert_ne!(x, z);
//! ```
//!
//! You can use wrapped pointers as keys in hashed or ordered collections, like BTreeMap/BTreeSet
//! or HashMap/HashSet, even if the target of the pointer doesn't implement hashing or ordering.
//! This even includes pointers to trait objects, which usually don't implement the Eq trait
//! because it is not object-safe.
//!
//! ```
//! # use by_addr::ByAddr;
//! # use std::collections::HashSet;
//! #
//! /// Call each item in `callbacks`, skipping any duplicate references.
//! fn call_each_once(callbacks: &[&Fn()]) {
//! let mut seen: HashSet<ByAddr<&Fn()>> = HashSet::new();
//! for &f in callbacks {
//! if seen.insert(ByAddr(f)) {
//! f();
//! }
//! }
//! }
//! ```
//!
//! If `T` is a pointer to an unsized type, then comparison and ordering of `ByAddr<T>` compare
//! the entire fat pointer, not just the "thin" data address. This means that two slice pointers
//! are consider equal only if they have the same starting address *and* length.
//!
//! ```
//! # use by_addr::ByAddr;
//! #
//! let v = [1, 2, 3, 4];
//!
//! assert_eq!(ByAddr(&v[0..4]), ByAddr(&v[0..4])); // Same address and length.
//! assert_ne!(ByAddr(&v[0..4]), ByAddr(&v[0..2])); // Same address, different length.
//! ```
//!
//! [`no_std`]: https://doc.rust-lang.org/book/first-edition/using-rust-without-the-standard-library.html
//! [`by_address`]: https://docs.rs/by_address/1.0.4/by_address/
//! [ByAddr]: struct.ByAddr.html
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use by_address::ByAddress as ByAddr;
use std::ops::Deref;
pub trait FromTarget<T>: Deref {
fn from_target(t: T) -> Self;
}
impl<T, Y> FromTarget<Y> for ByAddr<T> where T: From<Y> + Deref {
fn from_target(t: Y) -> ByAddr<T> { ByAddr(t.into()) }
}
pub trait IntoByAddr<T>: Into<T> where T: Deref {
fn into_byaddr(self) -> ByAddr<T>;
}
impl<T, Y> IntoByAddr<T> for Y where Y: Into<T>, T: Deref + From<Y> {
fn | (self) -> ByAddr<T> { ByAddr::from_target(self) }
}
| into_byaddr |
keyvalue.go | /*
Copyright © 2020 GUILLAUME FOURNIER
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package keyvalue
import (
"encoding/binary"
"fmt"
"math/rand"
"net"
"unsafe"
"github.com/pkg/errors"
"github.com/Gui774ume/network-security-probe/pkg/model/kernel"
"github.com/Gui774ume/network-security-probe/pkg/utils"
)
// Cookie - Unique cookie used to identify a profile in the kernel
type Cookie uint32
// CookieValue - Cookie structure used as value in hashmaps
type CookieValue struct {
Cookie Cookie
}
// NewCookie - Returns a new cookie randomly generated
func NewCookie() Cookie {
return Cookie(rand.Uint32())
}
// CookieListContains - Checks if a cookie is in a list of cookies.
// The functions returns the first id in the map if there is a match.
func CookieListContains(cookies []Cookie, cookie Cookie) int {
for index, c := range cookies {
if c == cookie {
return index
}
}
return -1
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (cv *CookieValue) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
valueB, err := utils.InterfaceToBytes(cv, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&valueB[0]), nil
}
// NewRandomMapName - Returns a new map name randomly generated
func NewRandomMapName() string {
allowedCharacters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
allowedCharactersLen := len(allowedCharacters)
nameLength := 15
b := make([]rune, nameLength)
for i := range b {
b[i] = allowedCharacters[rand.Intn(allowedCharactersLen)]
}
return string(b)
}
// SecurityProfileActionValue - Security profile action used for value in hashmaps
type SecurityProfileActionValue struct {
Action kernel.SecurityProfileAction
}
// NetworkAttack - Network attack
type NetworkAttack uint32
const (
// EmptyNetworkAttack - Used to specify that no attacks was selected. This is the default.
EmptyNetworkAttack NetworkAttack = 0
// ArpSpoofing - ARP spoofing network attack
ArpSpoofing NetworkAttack = 1 << 0
)
func (na NetworkAttack) String() string {
switch na {
case EmptyNetworkAttack:
return "EmptyNetworkAttack"
case ArpSpoofing:
return "ArpSpoofing"
default:
return "Unknown"
}
}
// KeyValue - Key value of a rule in a profile
type KeyValue struct {
Key Key | func (kv *KeyValue) String() string {
return fmt.Sprintf("%v", *kv)
}
// GetMapSection - Returns the map section of the hashmap in which the key-value should live
func (kv *KeyValue) GetMapSection() string {
return kv.Key.GetMapSection()
}
// GetKey - Returns the unsafe pointer to the key
func (kv *KeyValue) GetKey(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
return kv.Key.GetUnsafePointer(byteOrder)
}
// GetValue - Returns the unsafe pointer to the value
func (kv *KeyValue) GetValue(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
valueB, err := utils.InterfaceToBytes(kv.Value, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&valueB[0]), nil
}
// Key - Key interface
type Key interface {
GetMapSection() string
GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error)
String() string
}
// NetworkAttacksKey - Network attacks key structure
type NetworkAttacksKey struct {
Cookie Cookie
}
func (nak *NetworkAttacksKey) String() string {
return fmt.Sprintf("%v", *nak)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (nak *NetworkAttacksKey) GetMapSection() string {
return "network_attacks_rules"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (nak *NetworkAttacksKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(nak, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// NetworkAttacksValue - Network attack value
type NetworkAttacksValue struct {
Value NetworkAttack
}
// ActionKey - Action key structure
type ActionKey struct {
Cookie Cookie
}
func (ak *ActionKey) String() string {
return fmt.Sprintf("%v", *ak)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (ak *ActionKey) GetMapSection() string {
return "action_rules"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (ak *ActionKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(ak, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// ActionValue - Action value
type ActionValue struct {
Action kernel.SecurityProfileAction
}
// ProtocolKey - Protocol key structure
type ProtocolKey struct {
Cookie Cookie
Protocol uint16
TrafficType kernel.TrafficType
Layer uint8
}
func (pk *ProtocolKey) String() string {
return fmt.Sprintf("%v", *pk)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (pk *ProtocolKey) GetMapSection() string {
return "protocol_rules"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (pk *ProtocolKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(pk, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// ProtocolPortKey - ProtocolPortKey key structure
type ProtocolPortKey struct {
Cookie Cookie
Protocol uint16
Port uint16
TrafficType kernel.TrafficType
}
func (ppk *ProtocolPortKey) String() string {
return fmt.Sprintf("%v", *ppk)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (ppk *ProtocolPortKey) GetMapSection() string {
return "protocol_port_rules"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (ppk *ProtocolPortKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(ppk, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// DNSKey - DNS key structure
type DNSKey struct {
DNS [kernel.DNSMaxLength]byte
Cookie Cookie
TrafficType kernel.TrafficType
Layer uint8
}
func (k *DNSKey) String() string {
return fmt.Sprintf("%v", *k)
}
// NewDNSKey - Creates a new DNSKey and encodes the domain string appropriately
func NewDNSKey(tt kernel.TrafficType, cookie Cookie, layer uint8, dns string) (*DNSKey, error) {
encodedName, err := utils.EncodeDNS(dns)
if err != nil {
return nil, errors.Wrap(err, "couldn't encode DNS name")
}
rep := DNSKey{
TrafficType: tt,
Cookie: cookie,
Layer: layer,
DNS: encodedName,
}
return &rep, nil
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (k *DNSKey) GetMapSection() string {
return "dns_rules"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (k *DNSKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(k, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// HTTPKey - Http key structure
type HTTPKey struct {
TrafficType kernel.TrafficType
Cookie Cookie
Method [kernel.HTTPMaxMethodLength]byte
URI [kernel.HTTPMaxURILength]byte
}
func (k *HTTPKey) String() string {
return fmt.Sprintf("%v", *k)
}
// NewHTTPKey - Creates a new HTTPKey and encodes the method and URI appropriately
func NewHTTPKey(tt kernel.TrafficType, cookie Cookie, method string, uri string) *HTTPKey {
rep := HTTPKey{
TrafficType: tt,
Cookie: cookie,
Method: [kernel.HTTPMaxMethodLength]byte{},
URI: [kernel.HTTPMaxURILength]byte{},
}
copy(rep.Method[:], method)
copy(rep.URI[:], uri)
return &rep
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (k HTTPKey) GetMapSection() string {
return "http_rules"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (k *HTTPKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(k, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// PIDnsKey - Process namespace key
type PIDnsKey struct {
NS uint64
}
func (pk *PIDnsKey) String() string {
return fmt.Sprintf("%v", *pk)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (pk PIDnsKey) GetMapSection() string {
return "pidns_profile_id"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (pk *PIDnsKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(pk, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// NETnsKey - Process namespace key
type NETnsKey struct {
NS uint64
}
func (nk *NETnsKey) String() string {
return fmt.Sprintf("%v", *nk)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (nk NETnsKey) GetMapSection() string {
return "netns_profile_id"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (nk *NETnsKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(nk, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// PIDKey - Process key
type PIDKey struct {
PID uint32
}
func (pk *PIDKey) String() string {
return fmt.Sprintf("%v", *pk)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (pk PIDKey) GetMapSection() string {
return "pid_binary_id"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (pk *PIDKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(pk, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// BinaryPathKey - Binary path key
type BinaryPathKey struct {
Cookie Cookie
Path [kernel.PathMax]byte
}
func (bpk *BinaryPathKey) String() string {
return fmt.Sprintf("%v", *bpk)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (bpk BinaryPathKey) GetMapSection() string {
return "path_binary_id"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (bpk *BinaryPathKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(bpk, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// MapOfMapsKeyValue - Key value of a rule in a profile
type MapOfMapsKeyValue struct {
MapOfMapsKey *KeyValue
MapSectionToClone string
Keys []*KeyValue
}
func (momkv *MapOfMapsKeyValue) String() string {
return fmt.Sprintf("%v", *momkv)
}
// CIDRKey - CIDR key
type CIDRKey struct {
Prefix uint32
Data [16]uint8
}
func (k *CIDRKey) String() string {
return fmt.Sprintf("%v", *k)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (k *CIDRKey) GetMapSection() string {
return "cidr_ranges"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (k *CIDRKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(k, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// CIDRRouterKey - CIDR router key
type CIDRRouterKey struct {
Cookie Cookie
IPVersion kernel.NetworkProtocol
TrafficType kernel.TrafficType
}
func (k *CIDRRouterKey) String() string {
return fmt.Sprintf("%v", *k)
}
// GetMapSection - Returns the kernel map section of the hasmap for this key
func (k *CIDRRouterKey) GetMapSection() string {
return "cidr_rules"
}
// GetUnsafePointer - Returns an unsafe Pointer to the data
func (k *CIDRRouterKey) GetUnsafePointer(byteOrder binary.ByteOrder) (unsafe.Pointer, error) {
keyB, err := utils.InterfaceToBytes(k, byteOrder)
if err != nil {
return nil, err
}
return unsafe.Pointer(&keyB[0]), nil
}
// NewCIDRMapOfMapsKeyValue - Creates a new MapOfMaps key-value for the provided cidrs
func NewCIDRMapOfMapsKeyValue(cidrs []string, cookie Cookie, tt kernel.TrafficType, ipVersion kernel.NetworkProtocol, action interface{}) (*MapOfMapsKeyValue, error) {
cidrKv := MapOfMapsKeyValue{
MapOfMapsKey: &KeyValue{
Key: &CIDRRouterKey{
TrafficType: tt,
Cookie: cookie,
IPVersion: ipVersion,
},
Value: nil,
},
MapSectionToClone: "cidr_ranges",
Keys: []*KeyValue{},
}
for _, cidr := range cidrs {
ip, net, err := net.ParseCIDR(cidr)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse CIDR %v", cidr)
}
prefix, _ := net.Mask.Size()
cidrk := CIDRKey{
Prefix: uint32(prefix),
}
switch ipVersion {
case kernel.EthPIP:
ip4 := ip.To4()
if ip4 == nil {
return nil, errors.New("invalid IPv4 addr")
}
copy(cidrk.Data[:], ip4)
case kernel.EthPIPV6:
ip6 := ip.To16()
if ip6 == nil {
return nil, errors.New("invalid IPv6 addr")
}
copy(cidrk.Data[:], ip6)
}
cidrkv := KeyValue{
Key: &cidrk,
Value: action,
}
cidrKv.Keys = append(cidrKv.Keys, &cidrkv)
}
return &cidrKv, nil
} | Value interface{}
}
|
load.go | package configure
import (
"gopkg.in/ini.v1"
"github.com/dev-pipeline/dpl-go/pkg/dpl"
"github.com/dev-pipeline/dpl-go/pkg/dpl/configfile"
)
func applyConfig(config *ini.File) (dpl.Project, error) |
func loadRawConfig(data []byte) (dpl.Project, error) {
configFile, err := configfile.LoadRawConfig(data)
if err != nil {
return nil, err
}
project, err := applyConfig(configFile)
if err != nil {
return nil, err
}
return project, nil
}
func loadConfig(path string) (dpl.Project, error) {
configFile, err := configfile.LoadProjectConfig(path)
if err != nil {
return nil, err
}
project, err := applyConfig(configFile)
if err != nil {
return nil, err
}
return project, nil
}
| {
project := IniProject{
config: config,
}
for _, component := range config.Sections() {
if component.Name() != ini.DEFAULT_SECTION {
projectComponent := IniComponent{
config: component,
}
err := dpl.ValidateComponent(&projectComponent)
if err != nil {
return nil, err
}
}
}
err := dpl.ValidateProject(&project)
if err != nil {
return nil, err
}
return &project, nil
} |
lib.rs | #[macro_use]
extern crate lazy_static;
mod bterm;
mod consoles;
pub mod embedding;
mod gamestate;
mod hal;
mod initializer;
mod input;
pub mod rex;
pub type BResult<T> = anyhow::Result<T, Box<dyn std::error::Error + Send + Sync>>;
pub(crate) use input::clear_input_state;
pub type FontCharType = u16;
pub use consoles::console;
#[cfg(all(
feature = "opengl",
any(
feature = "crossterm",
any(
feature = "curses",
any(feature = "amethyst_engine_vulkan", feature = "amethyst_engine_metal")
)
)
))]
compile_error!("Default features (opengl) must be disabled for other back-ends");
pub mod prelude {
pub use crate::BResult;
pub use crate::bterm::*;
pub use crate::consoles::*;
pub use crate::embedding;
pub use crate::embedding::EMBED;
pub use crate::gamestate::GameState;
pub use crate::hal::{init_raw, BTermPlatform, Font, InitHints, Shader, BACKEND};
pub use crate::initializer::*;
pub use crate::input::{BEvent, Input, INPUT};
pub use crate::rex;
pub use crate::rex::*;
pub use crate::FontCharType;
pub use bracket_color::prelude::*;
pub use bracket_geometry::prelude::*;
pub type BError = std::result::Result<(), Box<dyn std::error::Error + Send + Sync>>;
#[cfg(all(feature = "opengl", not(target_arch = "wasm32")))] | pub use crate::hal::GlCallback;
#[cfg(all(
not(feature = "opengl"),
any(feature = "amethyst_engine_vulkan", feature = "amethyst_engine_metal")
))]
pub use amethyst::input::VirtualKeyCode;
#[cfg(target_arch = "wasm32")]
pub use crate::hal::VirtualKeyCode;
#[cfg(feature = "curses")]
pub use crate::hal::VirtualKeyCode;
#[cfg(feature = "crossterm")]
pub use crate::hal::VirtualKeyCode;
}
#[macro_export]
macro_rules! add_wasm_support {
() => {
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen(start))]
pub fn wasm_main() {
main().expect("Error in main");
}
};
}
#[macro_export]
macro_rules! embedded_resource {
($resource_name : ident, $filename : expr) => {
const $resource_name: &'static [u8] = include_bytes!($filename);
};
}
#[macro_export]
macro_rules! link_resource {
($resource_name : ident, $filename : expr) => {
EMBED
.lock()
.add_resource($filename.to_string(), $resource_name);
};
} | pub use glutin::event::VirtualKeyCode;
#[cfg(all(feature = "opengl", not(target_arch = "wasm32")))] |
delete_message.rs | use crate::{
client::Client,
request::{self, AuditLogReason, AuditLogReasonError, Request},
response::{marker::EmptyBody, ResponseFuture},
routing::Route,
};
use twilight_model::id::{ChannelId, MessageId};
/// Delete a message by [`ChannelId`] and [`MessageId`].
pub struct | <'a> {
channel_id: ChannelId,
http: &'a Client,
message_id: MessageId,
reason: Option<&'a str>,
}
impl<'a> DeleteMessage<'a> {
pub(crate) const fn new(
http: &'a Client,
channel_id: ChannelId,
message_id: MessageId,
) -> Self {
Self {
channel_id,
http,
message_id,
reason: None,
}
}
/// Execute the request, returning a future resolving to a [`Response`].
///
/// [`Response`]: crate::response::Response
pub fn exec(self) -> ResponseFuture<EmptyBody> {
let mut request = Request::builder(&Route::DeleteMessage {
channel_id: self.channel_id.0,
message_id: self.message_id.0,
});
if let Some(reason) = &self.reason {
let header = match request::audit_header(reason) {
Ok(header) => header,
Err(source) => return ResponseFuture::error(source),
};
request = request.headers(header);
}
self.http.request(request.build())
}
}
impl<'a> AuditLogReason<'a> for DeleteMessage<'a> {
fn reason(mut self, reason: &'a str) -> Result<Self, AuditLogReasonError> {
self.reason.replace(AuditLogReasonError::validate(reason)?);
Ok(self)
}
}
| DeleteMessage |
CISCO-MGX82XX-MODULE-RSRC-PART-MIB.py | #
# PySNMP MIB module CISCO-MGX82XX-MODULE-RSRC-PART-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-MGX82XX-MODULE-RSRC-PART-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:50:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint")
cardGeneric, = mibBuilder.importSymbols("BASIS-MIB", "cardGeneric")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Counter32, Unsigned32, TimeTicks, Counter64, ModuleIdentity, Gauge32, Integer32, NotificationType, IpAddress, ObjectIdentity, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Unsigned32", "TimeTicks", "Counter64", "ModuleIdentity", "Gauge32", "Integer32", "NotificationType", "IpAddress", "ObjectIdentity", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoMgx82xxModuleRsrcPartMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 73))
ciscoMgx82xxModuleRsrcPartMIB.setRevisions(('2003-04-18 00:00',))
if mibBuilder.loadTexts: ciscoMgx82xxModuleRsrcPartMIB.setLastUpdated('200304180000Z')
if mibBuilder.loadTexts: ciscoMgx82xxModuleRsrcPartMIB.setOrganization('Cisco Systems, Inc.')
cardResourcePartition = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 2, 9)) | cardResPartGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1), ).setIndexNames((0, "CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartCtrlrNum"))
if mibBuilder.loadTexts: cardResPartGrpEntry.setStatus('current')
cardResPartCtrlrNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("par", 1), ("pnni", 2), ("tag", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cardResPartCtrlrNum.setStatus('current')
cardResPartRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("add", 1), ("del", 2), ("mod", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardResPartRowStatus.setStatus('current')
cardResPartNumOfLcnAvail = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardResPartNumOfLcnAvail.setStatus('current')
cmmRsrcPartMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2))
cmmRsrcPartMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 1))
cmmRsrcPartMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 2))
cmmRsrcPartCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 1, 1)).setObjects(("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cmmRsrcPartGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmmRsrcPartCompliance = cmmRsrcPartCompliance.setStatus('current')
cmmRsrcPartGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 2, 1)).setObjects(("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardLcnPartitionType"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartCtrlrNum"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartRowStatus"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartNumOfLcnAvail"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmmRsrcPartGroup = cmmRsrcPartGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", cardResPartGrpTable=cardResPartGrpTable, ciscoMgx82xxModuleRsrcPartMIB=ciscoMgx82xxModuleRsrcPartMIB, cmmRsrcPartMIBConformance=cmmRsrcPartMIBConformance, cmmRsrcPartMIBCompliances=cmmRsrcPartMIBCompliances, cmmRsrcPartGroup=cmmRsrcPartGroup, cardResPartNumOfLcnAvail=cardResPartNumOfLcnAvail, cardResourcePartition=cardResourcePartition, cmmRsrcPartMIBGroups=cmmRsrcPartMIBGroups, cmmRsrcPartCompliance=cmmRsrcPartCompliance, cardResPartRowStatus=cardResPartRowStatus, cardResPartCtrlrNum=cardResPartCtrlrNum, cardLcnPartitionType=cardLcnPartitionType, PYSNMP_MODULE_ID=ciscoMgx82xxModuleRsrcPartMIB, cardResPartGrpEntry=cardResPartGrpEntry) | cardLcnPartitionType = MibScalar((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noPartition", 1), ("controllerBased", 2), ("portControllerBased", 3))).clone('noPartition')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardLcnPartitionType.setStatus('current')
cardResPartGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2), )
if mibBuilder.loadTexts: cardResPartGrpTable.setStatus('current') |
classpatterntracker_1_1_main_tracker_activity.js | var classpatterntracker_1_1_main_tracker_activity =
[
[ "onCreate", "classpatterntracker_1_1_main_tracker_activity.html#a59f3320e11219df426a770cc7bbaeb03", null ],
[ "onDestroy", "classpatterntracker_1_1_main_tracker_activity.html#aa82ac0c5691edcbab5795bc2375e0395", null ],
[ "onPause", "classpatterntracker_1_1_main_tracker_activity.html#a6d798ce9b45b059dc73e2bbc6de605db", null ],
[ "onResume", "classpatterntracker_1_1_main_tracker_activity.html#a531f86975d1f900036a05dfccae5e0bb", null ],
[ "setDebugButton", "classpatterntracker_1_1_main_tracker_activity.html#ae486f90754f0029f150f4687f6be2a9d", null ],
[ "setUpdateGeometryButton", "classpatterntracker_1_1_main_tracker_activity.html#aaecc165bbc090c54b02b65e9558ed0ce", null ], | [ "updateText", "classpatterntracker_1_1_main_tracker_activity.html#a3c45c2321172c527f14382897030b67a", null ],
[ "mView", "classpatterntracker_1_1_main_tracker_activity.html#a4f7c9749e1f0235a73523c8591c92955", null ],
[ "mWL", "classpatterntracker_1_1_main_tracker_activity.html#aa472485695cc106e4f338a1350f98ead", null ]
]; | |
size.go | // Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package txsizes
import (
"github.com/ltcsuite/ltcd/blockchain"
"github.com/ltcsuite/ltcd/wire"
)
// Worst case script and input/output size estimates.
const (
// RedeemP2PKHSigScriptSize is the worst case (largest) serialize size
// of a transaction input script that redeems a compressed P2PKH output.
// It is calculated as:
//
// - OP_DATA_73
// - 72 bytes DER signature + 1 byte sighash
// - OP_DATA_33
// - 33 bytes serialized compressed pubkey
RedeemP2PKHSigScriptSize = 1 + 73 + 1 + 33
// P2PKHPkScriptSize is the size of a transaction output script that
// pays to a compressed pubkey hash. It is calculated as:
//
// - OP_DUP
// - OP_HASH160
// - OP_DATA_20
// - 20 bytes pubkey hash
// - OP_EQUALVERIFY
// - OP_CHECKSIG
P2PKHPkScriptSize = 1 + 1 + 1 + 20 + 1 + 1
// RedeemP2PKHInputSize is the worst case (largest) serialize size of a
// transaction input redeeming a compressed P2PKH output. It is
// calculated as:
//
// - 32 bytes previous tx
// - 4 bytes output index
// - 1 byte compact int encoding value 107
// - 107 bytes signature script
// - 4 bytes sequence
RedeemP2PKHInputSize = 32 + 4 + 1 + RedeemP2PKHSigScriptSize + 4
// P2PKHOutputSize is the serialize size of a transaction output with a
// P2PKH output script. It is calculated as:
//
// - 8 bytes output value
// - 1 byte compact int encoding value 25
// - 25 bytes P2PKH output script
P2PKHOutputSize = 8 + 1 + P2PKHPkScriptSize
// P2WPKHPkScriptSize is the size of a transaction output script that
// pays to a witness pubkey hash. It is calculated as:
//
// - OP_0
// - OP_DATA_20
// - 20 bytes pubkey hash
P2WPKHPkScriptSize = 1 + 1 + 20
// P2WPKHOutputSize is the serialize size of a transaction output with a
// P2WPKH output script. It is calculated as:
//
// - 8 bytes output value | // - 22 bytes P2PKH output script
P2WPKHOutputSize = 8 + 1 + P2WPKHPkScriptSize
// RedeemP2WPKHScriptSize is the size of a transaction input script
// that spends a pay-to-witness-public-key hash (P2WPKH). The redeem
// script for P2WPKH spends MUST be empty.
RedeemP2WPKHScriptSize = 0
// RedeemP2WPKHInputSize is the worst case size of a transaction
// input redeeming a P2WPKH output. It is calculated as:
//
// - 32 bytes previous tx
// - 4 bytes output index
// - 1 byte encoding empty redeem script
// - 0 bytes redeem script
// - 4 bytes sequence
RedeemP2WPKHInputSize = 32 + 4 + 1 + RedeemP2WPKHScriptSize + 4
// RedeemNestedP2WPKHScriptSize is the worst case size of a transaction
// input script that redeems a pay-to-witness-key hash nested in P2SH
// (P2SH-P2WPKH). It is calculated as:
//
// - 1 byte compact int encoding value 22
// - OP_0
// - 1 byte compact int encoding value 20
// - 20 byte key hash
RedeemNestedP2WPKHScriptSize = 1 + 1 + 1 + 20
// RedeemNestedP2WPKHInputSize is the worst case size of a
// transaction input redeeming a P2SH-P2WPKH output. It is
// calculated as:
//
// - 32 bytes previous tx
// - 4 bytes output index
// - 1 byte compact int encoding value 23
// - 23 bytes redeem script (scriptSig)
// - 4 bytes sequence
RedeemNestedP2WPKHInputSize = 32 + 4 + 1 +
RedeemNestedP2WPKHScriptSize + 4
// RedeemP2WPKHInputWitnessWeight is the worst case weight of
// a witness for spending P2WPKH and nested P2WPKH outputs. It
// is calculated as:
//
// - 1 wu compact int encoding value 2 (number of items)
// - 1 wu compact int encoding value 73
// - 72 wu DER signature + 1 wu sighash
// - 1 wu compact int encoding value 33
// - 33 wu serialized compressed pubkey
RedeemP2WPKHInputWitnessWeight = 1 + 1 + 73 + 1 + 33
)
// SumOutputSerializeSizes sums up the serialized size of the supplied outputs.
func SumOutputSerializeSizes(outputs []*wire.TxOut) (serializeSize int) {
for _, txOut := range outputs {
serializeSize += txOut.SerializeSize()
}
return serializeSize
}
// EstimateSerializeSize returns a worst case serialize size estimate for a
// signed transaction that spends inputCount number of compressed P2PKH outputs
// and contains each transaction output from txOuts. The estimated size is
// incremented for an additional P2PKH change output if addChangeOutput is true.
func EstimateSerializeSize(inputCount int, txOuts []*wire.TxOut, addChangeOutput bool) int {
changeSize := 0
outputCount := len(txOuts)
if addChangeOutput {
changeSize = P2PKHOutputSize
outputCount++
}
// 8 additional bytes are for version and locktime
return 8 + wire.VarIntSerializeSize(uint64(inputCount)) +
wire.VarIntSerializeSize(uint64(outputCount)) +
inputCount*RedeemP2PKHInputSize +
SumOutputSerializeSizes(txOuts) +
changeSize
}
// EstimateVirtualSize returns a worst case virtual size estimate for a
// signed transaction that spends the given number of P2PKH, P2WPKH and
// (nested) P2SH-P2WPKH outputs, and contains each transaction output
// from txOuts. The estimate is incremented for an additional P2PKH
// change output if addChangeOutput is true.
func EstimateVirtualSize(numP2PKHIns, numP2WPKHIns, numNestedP2WPKHIns int,
txOuts []*wire.TxOut, addChangeOutput bool) int {
changeSize := 0
outputCount := len(txOuts)
if addChangeOutput {
// We are always using P2WPKH as change output.
changeSize = P2WPKHOutputSize
outputCount++
}
// Version 4 bytes + LockTime 4 bytes + Serialized var int size for the
// number of transaction inputs and outputs + size of redeem scripts +
// the size out the serialized outputs and change.
baseSize := 8 +
wire.VarIntSerializeSize(
uint64(numP2PKHIns+numP2WPKHIns+numNestedP2WPKHIns)) +
wire.VarIntSerializeSize(uint64(len(txOuts))) +
numP2PKHIns*RedeemP2PKHInputSize +
numP2WPKHIns*RedeemP2WPKHInputSize +
numNestedP2WPKHIns*RedeemNestedP2WPKHInputSize +
SumOutputSerializeSizes(txOuts) +
changeSize
// If this transaction has any witness inputs, we must count the
// witness data.
witnessWeight := 0
if numP2WPKHIns+numNestedP2WPKHIns > 0 {
// Additional 2 weight units for segwit marker + flag.
witnessWeight = 2 +
wire.VarIntSerializeSize(
uint64(numP2WPKHIns+numNestedP2WPKHIns)) +
numP2WPKHIns*RedeemP2WPKHInputWitnessWeight +
numNestedP2WPKHIns*RedeemP2WPKHInputWitnessWeight
}
// We add 3 to the witness weight to make sure the result is
// always rounded up.
return baseSize + (witnessWeight+3)/blockchain.WitnessScaleFactor
} | // - 1 byte compact int encoding value 22 |
download.py | # -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import sys
from command import Command
from error import GitError
CHANGE_RE = re.compile(r'^([1-9][0-9]*)(?:[/\.-]([1-9][0-9]*))?$')
class Download(Command):
common = True
helpSummary = "Download and checkout a change"
helpUsage = """
%prog {[project] change[/patchset]}...
"""
helpDescription = """
The '%prog' command downloads a change from the review system and
makes it available in your project's local working directory.
If no project is specified try to use current directory as a project.
"""
def _Options(self, p):
|
def _ParseChangeIds(self, args):
if not args:
self.Usage()
to_get = []
project = None
for a in args:
m = CHANGE_RE.match(a)
if m:
if not project:
project = self.GetProjects(".")[0]
chg_id = int(m.group(1))
if m.group(2):
ps_id = int(m.group(2))
else:
ps_id = 1
refs = 'refs/changes/%2.2d/%d/' % (chg_id % 100, chg_id)
output = project._LsRemote(refs + '*')
if output:
regex = refs + r'(\d+)'
rcomp = re.compile(regex, re.I)
for line in output.splitlines():
match = rcomp.search(line)
if match:
ps_id = max(int(match.group(1)), ps_id)
to_get.append((project, chg_id, ps_id))
else:
project = self.GetProjects([a])[0]
return to_get
def Execute(self, opt, args):
for project, change_id, ps_id in self._ParseChangeIds(args):
dl = project.DownloadPatchSet(change_id, ps_id)
if not dl:
print('[%s] change %d/%d not found'
% (project.name, change_id, ps_id),
file=sys.stderr)
sys.exit(1)
if not opt.revert and not dl.commits:
print('[%s] change %d/%d has already been merged'
% (project.name, change_id, ps_id),
file=sys.stderr)
continue
if len(dl.commits) > 1:
print('[%s] %d/%d depends on %d unmerged changes:' \
% (project.name, change_id, ps_id, len(dl.commits)),
file=sys.stderr)
for c in dl.commits:
print(' %s' % (c), file=sys.stderr)
if opt.cherrypick:
try:
project._CherryPick(dl.commit)
except GitError:
print('[%s] Could not complete the cherry-pick of %s' \
% (project.name, dl.commit), file=sys.stderr)
sys.exit(1)
elif opt.revert:
project._Revert(dl.commit)
elif opt.ffonly:
project._FastForward(dl.commit, ffonly=True)
else:
project._Checkout(dl.commit)
| p.add_option('-c', '--cherry-pick',
dest='cherrypick', action='store_true',
help="cherry-pick instead of checkout")
p.add_option('-r', '--revert',
dest='revert', action='store_true',
help="revert instead of checkout")
p.add_option('-f', '--ff-only',
dest='ffonly', action='store_true',
help="force fast-forward merge") |
fossil.py | import os
import subprocess
from ..utils import RepoStats, ThreadedSegment, get_subprocess_env
def _get_fossil_branch():
branches = os.popen("fossil branch 2>/dev/null").read().strip().split("\n")
return ''.join([
i.replace('*','').strip()
for i in branches
if i.startswith('*')
])
def parse_fossil_stats(status):
stats = RepoStats()
for line in status:
if line.startswith("ADDED"):
stats.staged += 1
elif line.startswith("EXTRA"):
stats.new += 1
elif line.startswith("CONFLICT"):
stats.conflicted += 1
else:
stats.changed += 1
return stats
def _get_fossil_status():
changes = os.popen("fossil changes 2>/dev/null").read().strip().split("\n")
extra = os.popen("fossil extras 2>/dev/null").read().strip().split("\n")
extra = ["EXTRA " + filename for filename in extra if filename != ""]
status = [line for line in changes + extra if line != '']
return status
def build_stats():
|
class Segment(ThreadedSegment):
def add_to_powerline(self):
self.stats, self.branch = build_stats()
if not self.stats:
return
bg = self.powerline.theme.REPO_CLEAN_BG
fg = self.powerline.theme.REPO_CLEAN_FG
if self.stats.dirty:
bg = self.powerline.theme.REPO_DIRTY_BG
fg = self.powerline.theme.REPO_DIRTY_FG
if self.powerline.segment_conf("vcs", "show_symbol"):
symbol = RepoStats().symbols["fossil"] + " "
else:
symbol = ""
self.powerline.append(" " + symbol + self.branch + " ", fg, bg)
self.stats.add_to_powerline(self.powerline)
| try:
subprocess.Popen(['fossil'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=get_subprocess_env()).communicate()
except OSError:
# Popen will throw an OSError if fossil is not found
return (None, None)
branch = _get_fossil_branch()
if branch == "":
return (None, None)
status = _get_fossil_status()
if status == []:
return (RepoStats(), branch)
stats = parse_fossil_stats(status)
return stats, branch |
user.js | const mongoose = require('mongoose');
const Schema = mongoose.Schema;
const mongoosePaginate = require('mongoose-paginate');
const timestamps = require('mongoose-timestamp');
const utils = require('../main/common/utils');
const Roles = require('../../src/shared/roles');
// Define the User model schema
const UserSchema = new mongoose.Schema({
name: {
type: String,
required: true
},
email: {
type: String,
index: { unique: true }
},
role: {
type: String,
default: Roles.user,
enum: [Roles.user, Roles.siteAdmin] // Accept only these roles
},
company: {
type: Schema.ObjectId,
default: null,
ref: 'Company'
},
password: String
});
UserSchema.plugin(mongoosePaginate);
UserSchema.plugin(timestamps);
/**
* Override default toJSON, remove password field and __v version
*/
UserSchema.methods.toJSON = function() {
var obj = this.toObject();
delete obj.password;
delete obj.__v;
obj.id = obj._id;
delete obj._id;
return obj;
};
/**
* Compare the passed password with the value in the database. A model method.
*
* @param {string} password
* @returns {object} callback
*/
UserSchema.methods.comparePassword = function comparePassword(password, callback) {
utils.compareHash(password, this.password, callback);
};
/**
* The pre-save hook method.
*
* NOTE: pre & post hooks are not executed on update() and findeOneAndUpdate()
* http://mongoosejs.com/docs/middleware.html
*/
UserSchema.pre('save', function saveHook(next) {
const user = this;
// Proceed further only if the password is modified or the user is new
if (!user.isModified('password')) return next();
return utils.hash(user.password, (err, hash) => {
if (err) { return next (err); }
// Replace the password string with hash value
user.password = hash;
return next(); | });
});
module.exports = mongoose.model('User', UserSchema); |
|
pandas_doctests_test.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import pandas as pd
from apache_beam.dataframe import doctests
from apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function
@unittest.skipIf(sys.platform == 'win32', '[BEAM-10626]')
class DoctestTest(unittest.TestCase):
def test_ndframe_tests(self):
# IO methods are tested in io_test.py
skip_writes = {
f'pandas.core.generic.NDFrame.{name}': ['*']
for name in dir(pd.core.generic.NDFrame) if name.startswith('to_')
}
result = doctests.testmod(
pd.core.generic,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.generic.NDFrame.first': ['*'],
'pandas.core.generic.NDFrame.head': ['*'],
'pandas.core.generic.NDFrame.last': ['*'],
'pandas.core.generic.NDFrame.shift': ['*'],
'pandas.core.generic.NDFrame.tail': ['*'],
'pandas.core.generic.NDFrame.take': ['*'],
'pandas.core.generic.NDFrame.values': ['*'],
'pandas.core.generic.NDFrame.tz_localize': [
"s.tz_localize('CET', ambiguous='infer')",
# np.array is not a deferred object. This use-case is possible
# with a deferred Series though, which is tested in
# frames_test.py
"s.tz_localize('CET', ambiguous=np.array([True, True, False]))",
],
'pandas.core.generic.NDFrame.truncate': [
# These inputs rely on tail (wont implement, order
# sensitive) for verification
"df.tail()",
"df.loc['2016-01-05':'2016-01-10', :].tail()",
],
'pandas.core.generic.NDFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a', None)",
],
'pandas.core.generic.NDFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.generic.NDFrame.sort_values': ['*'],
'pandas.core.generic.NDFrame.mask': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.where': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.interpolate': ['*'],
},
not_implemented_ok={
'pandas.core.generic.NDFrame.asof': ['*'],
'pandas.core.generic.NDFrame.at_time': ['*'],
'pandas.core.generic.NDFrame.between_time': ['*'],
'pandas.core.generic.NDFrame.describe': ['*'],
'pandas.core.generic.NDFrame.ewm': ['*'],
'pandas.core.generic.NDFrame.expanding': ['*'],
'pandas.core.generic.NDFrame.flags': ['*'],
'pandas.core.generic.NDFrame.pct_change': ['*'],
'pandas.core.generic.NDFrame.rank': ['*'],
'pandas.core.generic.NDFrame.reindex': ['*'],
'pandas.core.generic.NDFrame.reindex_like': ['*'],
'pandas.core.generic.NDFrame.replace': ['*'],
'pandas.core.generic.NDFrame.resample': ['*'],
'pandas.core.generic.NDFrame.rolling': ['*'],
'pandas.core.generic.NDFrame.sample': ['*'],
'pandas.core.generic.NDFrame.set_flags': ['*'],
'pandas.core.generic.NDFrame.squeeze': ['*'],
'pandas.core.generic.NDFrame.transform': ['*'],
'pandas.core.generic.NDFrame.truncate': ['*'],
'pandas.core.generic.NDFrame.xs': ['*'],
# argsort unimplemented
'pandas.core.generic.NDFrame.abs': [
'df.loc[(df.c - 43).abs().argsort()]',
],
},
skip={
# Internal test
'pandas.core.generic.NDFrame._set_axis_name': ['*'],
# Fails to construct test series. asfreq is not implemented anyway.
'pandas.core.generic.NDFrame.asfreq': ['*'],
'pandas.core.generic.NDFrame.astype': ['*'],
'pandas.core.generic.NDFrame.convert_dtypes': ['*'],
'pandas.core.generic.NDFrame.copy': ['*'],
'pandas.core.generic.NDFrame.droplevel': ['*'],
'pandas.core.generic.NDFrame.infer_objects': ['*'],
'pandas.core.generic.NDFrame.rank': [
# Modified dataframe
'df'
],
'pandas.core.generic.NDFrame.rename': [
# Seems to be an upstream bug. The actual error has a different
# message:
# TypeError: Index(...) must be called with a collection of
# some kind, 2 was passed
# pandas doctests only verify the type of exception
'df.rename(2)'
],
# Tests rely on setting index
'pandas.core.generic.NDFrame.rename_axis': ['*'],
# Raises right exception, but testing framework has matching issues.
'pandas.core.generic.NDFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.generic.NDFrame.squeeze': ['*'],
# NameError
'pandas.core.generic.NDFrame.resample': ['df'],
# Skipped so we don't need to install natsort
'pandas.core.generic.NDFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
**skip_writes
})
self.assertEqual(result.failed, 0)
def test_dataframe_tests(self):
result = doctests.testmod(
pd.core.frame,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.frame.DataFrame.T': ['*'],
'pandas.core.frame.DataFrame.cummax': ['*'],
'pandas.core.frame.DataFrame.cummin': ['*'],
'pandas.core.frame.DataFrame.cumsum': ['*'],
'pandas.core.frame.DataFrame.cumprod': ['*'],
'pandas.core.frame.DataFrame.diff': ['*'],
'pandas.core.frame.DataFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.frame.DataFrame.items': ['*'],
'pandas.core.frame.DataFrame.itertuples': ['*'],
'pandas.core.frame.DataFrame.iterrows': ['*'],
'pandas.core.frame.DataFrame.iteritems': ['*'],
# default keep is 'first'
'pandas.core.frame.DataFrame.nlargest': [
"df.nlargest(3, 'population')",
"df.nlargest(3, ['population', 'GDP'])",
"df.nlargest(3, 'population', keep='last')"
],
'pandas.core.frame.DataFrame.nsmallest': [
"df.nsmallest(3, 'population')",
"df.nsmallest(3, ['population', 'GDP'])",
"df.nsmallest(3, 'population', keep='last')",
],
'pandas.core.frame.DataFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a', None)",
],
'pandas.core.frame.DataFrame.to_records': ['*'],
'pandas.core.frame.DataFrame.to_dict': ['*'],
'pandas.core.frame.DataFrame.to_numpy': ['*'],
'pandas.core.frame.DataFrame.to_string': ['*'],
'pandas.core.frame.DataFrame.transpose': ['*'],
'pandas.core.frame.DataFrame.shape': ['*'],
'pandas.core.frame.DataFrame.shift': [
'df.shift(periods=3, freq="D")',
'df.shift(periods=3, freq="infer")'
],
'pandas.core.frame.DataFrame.unstack': ['*'],
'pandas.core.frame.DataFrame.memory_usage': ['*'],
'pandas.core.frame.DataFrame.info': ['*'],
# Not equal to df.agg('mode', axis='columns', numeric_only=True)
# because there can be multiple columns if a row has more than one
# mode
'pandas.core.frame.DataFrame.mode': [
"df.mode(axis='columns', numeric_only=True)"
],
'pandas.core.frame.DataFrame.append': [
'df.append(df2, ignore_index=True)',
"for i in range(5):\n" +
" df = df.append({'A': i}, ignore_index=True)",
],
'pandas.core.frame.DataFrame.sort_index': ['*'],
'pandas.core.frame.DataFrame.sort_values': ['*'],
'pandas.core.frame.DataFrame.melt': [
"df.melt(id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=['A'], value_vars=['B', 'C'])",
"df.melt(col_level=0, id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"df.melt(id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
]
},
not_implemented_ok={
'pandas.core.frame.DataFrame.transform': ['*'],
'pandas.core.frame.DataFrame.reindex': ['*'],
'pandas.core.frame.DataFrame.reindex_axis': ['*'],
'pandas.core.frame.DataFrame.round': [
'df.round(decimals)',
],
# We should be able to support pivot and pivot_table for categorical
# columns
'pandas.core.frame.DataFrame.pivot': ['*'],
# We can implement this as a zipping operator, but it won't have the
# same capability. The doctest includes an example that branches on
# a deferred result.
'pandas.core.frame.DataFrame.combine': ['*'],
# Can be implemented as a zipping operator
'pandas.core.frame.DataFrame.combine_first': ['*'],
# Difficult to parallelize but should be possible?
'pandas.core.frame.DataFrame.dot': [
# reindex not supported
's2 = s.reindex([1, 0, 2, 3])',
'df.dot(s2)',
],
# Trivially elementwise for axis=columns. Relies on global indexing
# for axis=rows.
# Difficult to determine proxy, need to inspect function
'pandas.core.frame.DataFrame.apply': ['*'],
# Cross-join not implemented
'pandas.core.frame.DataFrame.merge': [
"df1.merge(df2, how='cross')"
],
# TODO(BEAM-11711)
'pandas.core.frame.DataFrame.set_index': [
"df.set_index([s, s**2])",
],
},
skip={
# Throws NotImplementedError when modifying df
'pandas.core.frame.DataFrame.transform': ['df'],
'pandas.core.frame.DataFrame.axes': [
# Returns deferred index.
'df.axes',
],
'pandas.core.frame.DataFrame.compare': ['*'],
'pandas.core.frame.DataFrame.cov': [
# Relies on setting entries ahead of time.
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
'df.cov(min_periods=12)',
],
'pandas.core.frame.DataFrame.drop_duplicates': ['*'],
'pandas.core.frame.DataFrame.duplicated': ['*'],
'pandas.core.frame.DataFrame.idxmax': ['*'],
'pandas.core.frame.DataFrame.idxmin': ['*'],
'pandas.core.frame.DataFrame.rename': [
# Returns deferred index.
'df.index',
'df.rename(index=str).index',
],
'pandas.core.frame.DataFrame.set_index': [
# TODO(BEAM-11711): This could pass in the index as
# a DeferredIndex, and we should fail it as order-sensitive.
"df.set_index([pd.Index([1, 2, 3, 4]), 'year'])",
],
'pandas.core.frame.DataFrame.set_axis': ['*'],
'pandas.core.frame.DataFrame.to_markdown': ['*'],
'pandas.core.frame.DataFrame.to_parquet': ['*'],
'pandas.core.frame.DataFrame.value_counts': ['*'],
'pandas.core.frame.DataFrame.to_records': [
'df.index = df.index.rename("I")',
'index_dtypes = f"<S{df.index.str.len().max()}"', # 1.x
'index_dtypes = "<S{}".format(df.index.str.len().max())', #0.x
'df.to_records(index_dtypes=index_dtypes)',
],
# These tests use the static method pd.pivot_table, which doesn't
# actually raise NotImplementedError
'pandas.core.frame.DataFrame.pivot_table': ['*'],
# Expected to raise a ValueError, but we raise NotImplementedError
'pandas.core.frame.DataFrame.pivot': [
"df.pivot(index='foo', columns='bar', values='baz')"
],
'pandas.core.frame.DataFrame.append': [
'df',
# pylint: disable=line-too-long
"pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n"
" ignore_index=True)"
],
'pandas.core.frame.DataFrame.eval': ['df'],
'pandas.core.frame.DataFrame.melt': [
"df.columns = [list('ABC'), list('DEF')]", "df"
],
'pandas.core.frame.DataFrame.merge': [
# Order-sensitive index, checked in frames_test.py.
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Raises right exception, but testing framework has matching issues.
'pandas.core.frame.DataFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.frame.DataFrame.to_sparse': ['type(df)'],
# Skipped because "seen_wont_implement" is reset before getting to
# these calls, so the NameError they raise is not ignored.
'pandas.core.frame.DataFrame.T': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
'pandas.core.frame.DataFrame.transpose': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
# Skipped because the relies on iloc to set a cell to NA. Test is
# replicated in frames_test::DeferredFrameTest::test_applymap.
'pandas.core.frame.DataFrame.applymap': [
'df_copy.iloc[0, 0] = pd.NA',
"df_copy.applymap(lambda x: len(str(x)), na_action='ignore')",
],
# Skipped so we don't need to install natsort
'pandas.core.frame.DataFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
# Mode that we don't yet support, documentation added in pandas
# 1.2.0 (https://github.com/pandas-dev/pandas/issues/35912)
'pandas.core.frame.DataFrame.aggregate': [
"df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))"
],
})
self.assertEqual(result.failed, 0)
def test_series_tests(self):
result = doctests.testmod(
pd.core.series,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.series.Series.__array__': ['*'],
'pandas.core.series.Series.array': ['*'],
'pandas.core.series.Series.cummax': ['*'],
'pandas.core.series.Series.cummin': ['*'],
'pandas.core.series.Series.cumsum': ['*'],
'pandas.core.series.Series.cumprod': ['*'],
'pandas.core.series.Series.diff': ['*'],
'pandas.core.series.Series.dot': [
's.dot(arr)', # non-deferred result
],
'pandas.core.series.Series.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.series.Series.items': ['*'],
'pandas.core.series.Series.iteritems': ['*'],
# default keep is 'first'
'pandas.core.series.Series.nlargest': [
"s.nlargest()",
"s.nlargest(3)",
"s.nlargest(3, keep='last')",
],
'pandas.core.series.Series.memory_usage': ['*'],
'pandas.core.series.Series.nsmallest': [
"s.nsmallest()",
"s.nsmallest(3)",
"s.nsmallest(3, keep='last')",
],
'pandas.core.series.Series.pop': ['*'],
'pandas.core.series.Series.searchsorted': ['*'],
'pandas.core.series.Series.shift': ['*'],
'pandas.core.series.Series.take': ['*'],
'pandas.core.series.Series.to_dict': ['*'],
'pandas.core.series.Series.unique': ['*'],
'pandas.core.series.Series.unstack': ['*'],
'pandas.core.series.Series.values': ['*'],
'pandas.core.series.Series.view': ['*'],
'pandas.core.series.Series.append': [
's1.append(s2, ignore_index=True)',
],
'pandas.core.series.Series.sort_index': ['*'],
'pandas.core.series.Series.sort_values': ['*'],
'pandas.core.series.Series.argmax': ['*'],
'pandas.core.series.Series.argmin': ['*'],
},
not_implemented_ok={
'pandas.core.series.Series.transform': ['*'],
'pandas.core.series.Series.groupby': [
'ser.groupby(["a", "b", "a", "b"]).mean()',
'ser.groupby(["a", "b", "a", np.nan]).mean()',
'ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()',
# Grouping by a series is not supported
'ser.groupby(ser > 100).mean()',
],
'pandas.core.series.Series.reindex': ['*'],
},
skip={
# error formatting
'pandas.core.series.Series.append': [
's1.append(s2, verify_integrity=True)',
],
# Throws NotImplementedError when modifying df
'pandas.core.series.Series.transform': ['df'],
'pandas.core.series.Series.autocorr': ['*'],
'pandas.core.series.Series.combine': ['*'],
'pandas.core.series.Series.combine_first': ['*'],
'pandas.core.series.Series.compare': ['*'],
'pandas.core.series.Series.cov': [
# Differs in LSB on jenkins.
"s1.cov(s2)",
],
'pandas.core.series.Series.drop_duplicates': ['*'],
'pandas.core.series.Series.duplicated': ['*'],
'pandas.core.series.Series.explode': ['*'],
'pandas.core.series.Series.idxmax': ['*'],
'pandas.core.series.Series.idxmin': ['*'],
'pandas.core.series.Series.nonzero': ['*'],
'pandas.core.series.Series.quantile': ['*'],
'pandas.core.series.Series.pop': ['ser'], # testing side effect
'pandas.core.series.Series.repeat': ['*'],
'pandas.core.series.Series.replace': ['*'],
'pandas.core.series.Series.reset_index': ['*'],
'pandas.core.series.Series.searchsorted': [
# This doctest seems to be incorrectly parsed.
"x = pd.Categorical(['apple', 'bread', 'bread',"
],
'pandas.core.series.Series.set_axis': ['*'],
'pandas.core.series.Series.to_csv': ['*'],
'pandas.core.series.Series.to_markdown': ['*'],
'pandas.core.series.Series.update': ['*'],
'pandas.core.series.Series.view': [
# Inspection after modification.
's'
],
})
self.assertEqual(result.failed, 0)
def test_string_tests(self):
PD_VERSION = tuple(int(v) for v in pd.__version__.split('.'))
if PD_VERSION < (1, 2, 0):
module = pd.core.strings
else:
# Definitions were moved to accessor in pandas 1.2.0
module = pd.core.strings.accessor
module_name = module.__name__
result = doctests.testmod(
module,
use_beam=False,
wont_implement_ok={
# These methods can accept deferred series objects, but not lists
f'{module_name}.StringMethods.cat': [
"s.str.cat(['A', 'B', 'C', 'D'], sep=',')",
"s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')",
"s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')"
],
f'{module_name}.StringMethods.repeat': [
's.str.repeat(repeats=[1, 2, 3])'
],
f'{module_name}.str_repeat': ['s.str.repeat(repeats=[1, 2, 3])'],
f'{module_name}.StringMethods.get_dummies': ['*'],
f'{module_name}.str_get_dummies': ['*'],
},
skip={
# count() on Series with a NaN produces mismatched type if we
# have a NaN-only partition.
f'{module_name}.StringMethods.count': ["s.str.count('a')"],
f'{module_name}.str_count': ["s.str.count('a')"],
# Produce None instead of NaN, see
# frames_test.py::DeferredFrameTest::test_str_split
f'{module_name}.StringMethods.rsplit': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
f'{module_name}.StringMethods.split': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
# Bad test strings in pandas 1.1.x
f'{module_name}.str_replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
f'{module_name}.StringMethods.replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
# output has incorrect formatting in 1.2.x
f'{module_name}.StringMethods.extractall': ['*']
})
self.assertEqual(result.failed, 0)
def test_datetime_tests(self):
# TODO(BEAM-10721)
datetimelike_result = doctests.testmod(
pd.core.arrays.datetimelike,
use_beam=False,
skip={
'pandas.core.arrays.datetimelike.AttributesMixin._unbox_scalar': [
'*'
],
'pandas.core.arrays.datetimelike.TimelikeOps.ceil': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.floor': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.round': ['*'],
})
datetime_result = doctests.testmod(
pd.core.arrays.datetimes,
use_beam=False,
skip={
'pandas.core.arrays.datetimes.DatetimeArray.day': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.hour': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.microsecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.minute': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.month': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.nanosecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.second': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_leap_year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_start': [
'*'
],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.to_period': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.tz_localize': ['*'],
})
self.assertEqual(datetimelike_result.failed, 0)
self.assertEqual(datetime_result.failed, 0)
def test_indexing_tests(self):
result = doctests.testmod(
pd.core.indexing,
use_beam=False,
skip={
'pandas.core.indexing._IndexSlice': ['*'],
'pandas.core.indexing.IndexingMixin.at': ['*'],
'pandas.core.indexing.IndexingMixin.iat': ['*'],
'pandas.core.indexing.IndexingMixin.iloc': ['*'],
'pandas.core.indexing.IndexingMixin.loc': ['*'],
'pandas.core.indexing._AtIndexer': ['*'],
'pandas.core.indexing._LocIndexer': ['*'],
'pandas.core.indexing._iAtIndexer': ['*'],
'pandas.core.indexing._iLocIndexer': ['*'],
})
self.assertEqual(result.failed, 0)
def test_groupby_tests(self):
result = doctests.testmod(
pd.core.groupby.groupby,
use_beam=False,
wont_implement_ok={
'pandas.core.groupby.groupby.GroupBy.head': ['*'],
'pandas.core.groupby.groupby.GroupBy.tail': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': ['*'],
'pandas.core.groupby.groupby.GroupBy.cumcount': ['*'],
},
not_implemented_ok={
'pandas.core.groupby.groupby.GroupBy.describe': ['*'],
'pandas.core.groupby.groupby.GroupBy.ngroup': ['*'],
'pandas.core.groupby.groupby.GroupBy.resample': ['*'],
'pandas.core.groupby.groupby.GroupBy.sample': ['*'],
'pandas.core.groupby.groupby.GroupBy.quantile': ['*'],
'pandas.core.groupby.groupby.BaseGroupBy.pipe': ['*'],
# pipe tests are in a different location in pandas 1.1.x
'pandas.core.groupby.groupby._GroupBy.pipe': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': [
"df.groupby('A', as_index=False).nth(1)",
],
},
skip={
# Uses iloc to mutate a DataFrame
'pandas.core.groupby.groupby.GroupBy.resample': [
'df.iloc[2, 0] = 5',
'df',
],
# TODO: Raise wont implement for list passed as a grouping column
# Currently raises unhashable type: list
'pandas.core.groupby.groupby.GroupBy.ngroup': [
'df.groupby(["A", [1,1,2,3,2,1]]).ngroup()'
],
})
self.assertEqual(result.failed, 0)
result = doctests.testmod(
pd.core.groupby.generic,
use_beam=False,
wont_implement_ok={
# Returns an array by default, not a Series. WontImplement
# (non-deferred)
'pandas.core.groupby.generic.SeriesGroupBy.unique': ['*'],
# TODO: Is take actually deprecated?
'pandas.core.groupby.generic.DataFrameGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.nsmallest': [
"s.nsmallest(3, keep='last')",
"s.nsmallest(3)",
"s.nsmallest()",
],
'pandas.core.groupby.generic.SeriesGroupBy.nlargest': [
"s.nlargest(3, keep='last')",
"s.nlargest(3)",
"s.nlargest()",
],
'pandas.core.groupby.generic.DataFrameGroupBy.diff': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.diff': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.hist': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.groupby.generic.SeriesGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
},
not_implemented_ok={
'pandas.core.groupby.generic.DataFrameGroupBy.transform': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.filter': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.nunique': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.transform': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.filter': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.describe': ['*'],
},
skip={
'pandas.core.groupby.generic.SeriesGroupBy.cov': [
# Floating point comparison fails
's1.cov(s2)',
],
'pandas.core.groupby.generic.DataFrameGroupBy.cov': [
# Mutates input DataFrame with loc
# TODO: Replicate in frames_test.py
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
"df.cov(min_periods=12)",
],
# These examples rely on grouping by a list
'pandas.core.groupby.generic.SeriesGroupBy.aggregate': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.aggregate': ['*'],
})
self.assertEqual(result.failed, 0)
def | (self):
tests = {
name: func.__doc__
for (name, func) in pd.__dict__.items()
if _is_top_level_function(func) and getattr(func, '__doc__', None)
}
# IO methods are tested in io_test.py
skip_reads = {name: ['*'] for name in dir(pd) if name.startswith('read_')}
result = doctests.teststrings(
tests,
use_beam=False,
report=True,
not_implemented_ok={
'concat': ['pd.concat([s1, s2], ignore_index=True)'],
'crosstab': ['*'],
'cut': ['*'],
'eval': ['*'],
'factorize': ['*'],
'get_dummies': ['*'],
'infer_freq': ['*'],
'lreshape': ['*'],
'melt': ['*'],
'merge': ["df1.merge(df2, how='cross')"],
'merge_asof': ['*'],
'pivot': ['*'],
'pivot_table': ['*'],
'qcut': ['*'],
'reset_option': ['*'],
'set_eng_float_format': ['*'],
'set_option': ['*'],
'to_numeric': ['*'],
'to_timedelta': ['*'],
'unique': ['*'],
'value_counts': ['*'],
'wide_to_long': ['*'],
},
wont_implement_ok={
'to_datetime': ['s.head()'],
'to_pickle': ['*'],
'melt': [
"pd.melt(df, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])",
"pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"pd.melt(df, id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
],
},
skip={
# error formatting
'concat': ['pd.concat([df5, df6], verify_integrity=True)'],
# doctest DeprecationWarning
'melt': ['df'],
# Order-sensitive re-indexing.
'merge': [
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Not an actual test.
'option_context': ['*'],
'factorize': ['codes', 'uniques'],
# Bad top-level use of un-imported function.
'merge_ordered': [
'merge_ordered(df1, df2, fill_method="ffill", left_by="group")'
],
# Expected error.
'pivot': ["df.pivot(index='foo', columns='bar', values='baz')"],
# Never written.
'to_pickle': ['os.remove("./dummy.pkl")'],
**skip_reads
})
self.assertEqual(result.failed, 0)
if __name__ == '__main__':
unittest.main()
| test_top_level |
middlewares.py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html |
from scrapy.conf import settings
import os
import random
class RandomUserAgentMiddleware(object):
def process_request(self, request, spider):
ua = random.choice(settings.get('USER_AGENT_LIST'))
if ua:
request.headers.setdefault('User-Agent', ua) | |
mdbook-plantuml.rs | extern crate clap;
extern crate mdbook;
extern crate mdbook_plantuml;
#[macro_use]
extern crate log;
extern crate log4rs;
use clap::{App, Arg, ArgMatches, SubCommand};
use mdbook::errors::Error as MDBookError;
use mdbook::preprocess::{CmdPreprocessor, Preprocessor};
use mdbook_plantuml::PlantUMLPreprocessor;
use std::error::Error;
use std::io;
use std::process;
pub fn make_app() -> App<'static, 'static> {
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
App::new("mdBook PlantUML preprocessor")
.version(VERSION)
.author("Sytse Reitsma")
.about("A mdbook preprocessor which renders PlantUML code blocks to inline SVG diagrams")
.arg(
Arg::with_name("log")
.short("l")
.help("Log to './output.log' (may help troubleshooting rendering issues)."),
)
.subcommand(
SubCommand::with_name("supports")
.arg(Arg::with_name("renderer").required(true))
.about("Check whether a renderer is supported by this preprocessor"),
)
}
fn main() {
let matches = make_app().get_matches();
let preprocessor = PlantUMLPreprocessor;
if let Some(sub_args) = matches.subcommand_matches("supports") {
handle_supports(&preprocessor, sub_args);
} else {
if matches.is_present("log") {
if let Err(e) = setup_logging() {
eprintln!("{}", e);
process::exit(2);
}
}
if let Err(e) = handle_preprocessing(&preprocessor) {
eprintln!("{}", e);
process::exit(1);
}
}
}
fn handle_preprocessing(pre: &dyn Preprocessor) -> Result<(), MDBookError> {
let (ctx, book) = CmdPreprocessor::parse_input(io::stdin())?;
if ctx.mdbook_version != mdbook::MDBOOK_VERSION {
// We should probably use the `semver` crate to check compatibility
// here...
eprintln!(
"Warning: The {} plugin was built against version {} of mdbook, \
but we're being called from version {}",
pre.name(),
mdbook::MDBOOK_VERSION,
ctx.mdbook_version
);
}
let processed_book = pre.run(&ctx, book)?;
serde_json::to_writer(io::stdout(), &processed_book)?;
Ok(())
}
fn | (pre: &dyn Preprocessor, sub_args: &ArgMatches) -> ! {
let renderer = sub_args.value_of("renderer").expect("Required argument");
let supported = pre.supports_renderer(&renderer);
// Signal whether the renderer is supported by exiting with 1 or 0.
if supported {
process::exit(0);
} else {
process::exit(1);
}
}
fn setup_logging() -> Result<(), Box<dyn Error>> {
use log::LevelFilter;
use log4rs::append::file::FileAppender;
use log4rs::config::{Appender, Config, Root};
use log4rs::encode::pattern::PatternEncoder;
let logfile = FileAppender::builder()
.encoder(Box::new(PatternEncoder::new("{l} - {m}\n")))
.build("output.log")?;
let config = Config::builder()
.appender(Appender::builder().build("logfile", Box::new(logfile)))
.build(
Root::builder()
.appender("logfile")
.build(LevelFilter::Debug),
)?;
log4rs::init_config(config)?;
info!("--- Started preprocessor ---");
Ok(())
}
| handle_supports |
types.ts | import type { Definition, DefinitionType } from '@yozora/ast'
import type {
BaseBlockTokenizerProps,
PartialYastBlockToken,
PhrasingContentLine,
} from '@yozora/core-tokenizer'
import type { LinkDestinationCollectingState } from './util/link-destination'
import type { LinkLabelCollectingState } from './util/link-label'
import type { LinkTitleCollectingState } from './util/link-title'
export type T = DefinitionType
export type Node = Definition
export const uniqueName = '@yozora/tokenizer-definition'
export interface Token extends PartialYastBlockToken<T> {
/**
*
*/
lines: Array<Readonly<PhrasingContentLine>>
/**
* Link label
* Trimmed, Case-Insensitive
*/
label: LinkLabelCollectingState
/**
* Link destination
*/
destination: LinkDestinationCollectingState | null
/**
* Link title
*/
title: LinkTitleCollectingState | null
/**
* The line number of the first matched character of the link label
*/
lineNoOfLabel: number
/**
* The line number of the first matched character of the link destination
*/
lineNoOfDestination: number
/**
* The line number of the first matched character of the link title
*/
lineNoOfTitle: number
/**
* Resolved definition label.
*/
_label?: string
/**
* Resolved definition identifier.
*/
_identifier?: string
} |
export type TokenizerProps = Partial<BaseBlockTokenizerProps> | |
about.py | # encoding: utf-8
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
from connectors import users
from telegram.ext import CommandHandler
def main(dispatcher):
about_handler = CommandHandler('about', __about)
dispatcher.add_handler(about_handler)
def | (bot, update):
update.message.reply_html(
('This bot has been developed by @alesanmed. '
'If you are a user and have any problem, you '
'can contact him for resolving it. If you are '
'a developer and want to contribute to the bot, '
'please refer to the bot '
'<a href="https://github.com/alesanmed/YourEnglishTeacher_Bot">GitHub repository</a>.')
) | __about |
ready.spec.ts | import { componentOnReady } from '../helpers';
describe('componentOnReady()', () => {
it('should correctly call callback for a custom element', (done) => {
customElements.define('hello-world', class extends HTMLElement { | constructor() {
super();
}
});
const component = document.createElement('hello-world');
componentOnReady(component, (el) => {
expect(el).toBe(component);
done();
})
});
it('should correctly call callback for a lazy loaded component', (done) => {
const cb = jest.fn((el) => {
return new Promise((resolve) => {
setTimeout(() => resolve(el), 250);
});
});
customElements.define('hello-world', class extends HTMLElement {
constructor() {
super();
}
componentOnReady() {
return cb(this);
}
});
const component = document.createElement('hello-world');
componentOnReady(component, (el) => {
expect(el).toBe(component);
expect(cb).toHaveBeenCalledTimes(1);
done();
})
});
}); | |
launch_benchmark.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import signal
import subprocess
import sys
from argparse import ArgumentParser
from common import base_benchmark_util
from common import platform_util
from common.utils.validators import check_no_spaces, check_volume_mount, check_shm_size
from common.base_model_init import BaseModelInitializer
class | (base_benchmark_util.BaseBenchmarkUtil):
"""Launches benchmarking job based on the specified args """
def __init__(self, *args, **kwargs):
super(LaunchBenchmark, self).__init__(*args, **kwargs)
self.args, self.unknown_args = self.parse_args()
try:
self.validate_args()
except (IOError, ValueError) as e:
sys.exit("\nError: {}".format(e))
def main(self):
benchmark_scripts = os.path.dirname(os.path.realpath(__file__))
use_case = self.get_model_use_case(benchmark_scripts)
intelai_models = self.get_model_dir(benchmark_scripts, use_case)
intelai_models_common = self.get_model_dir(benchmark_scripts, "common")
env_var_dict = self.get_env_vars(benchmark_scripts, use_case, intelai_models,
intelai_models_common)
if self.args.docker_image:
if self.args.framework == 'tensorflow_serving':
self.run_bare_metal(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
elif self.args.framework == 'tensorflow':
self.run_docker_container(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
else:
self.run_bare_metal(benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict)
def parse_args(self):
# Additional args that are only used with the launch script
arg_parser = ArgumentParser(
parents=[self._common_arg_parser],
description="Parse args for benchmark interface")
arg_parser.add_argument(
"--docker-image",
help="Specify the docker image/tag to use when running benchmarking within a container."
"If no docker image is specified, then no docker container will be used.",
dest="docker_image", default=None, type=check_no_spaces)
arg_parser.add_argument(
"--volume",
help="Specify a custom volume to mount in the container, which follows the same format as the "
"docker --volume flag (https://docs.docker.com/storage/volumes/). "
"This argument can only be used in conjunction with a --docker-image.",
action="append", dest="custom_volumes", type=check_volume_mount)
arg_parser.add_argument(
"--shm-size",
help="Specify the size of docker /dev/shm. The format is <number><unit>. "
"number must be greater than 0. Unit is optional and can be b (bytes), k (kilobytes), "
"m (megabytes), or g (gigabytes).",
dest="shm_size", default="64m", type=check_shm_size)
arg_parser.add_argument(
"--debug", help="Launches debug mode which doesn't execute "
"start.sh when running in a docker container.", action="store_true")
arg_parser.add_argument(
"--noinstall",
help="whether to install packages for a given model when running in docker "
"(default --noinstall='False') or on bare metal (default --noinstall='True')",
dest="noinstall", action="store_true", default=None)
return arg_parser.parse_known_args()
def validate_args(self):
"""validate the args"""
# validate that we support this framework by checking folder names
benchmark_dir = os.path.dirname(os.path.realpath(__file__))
if glob.glob("{}/*/{}".format(benchmark_dir, self.args.framework)) == []:
raise ValueError("The specified framework is not supported: {}".
format(self.args.framework))
# if neither benchmark_only or accuracy_only are specified, then enable
# benchmark_only as the default
if not self.args.benchmark_only and not self.args.accuracy_only:
self.args.benchmark_only = True
# default disable_tcmalloc=False for int8 and disable_tcmalloc=True for other precisions
if not self.args.disable_tcmalloc:
self.args.disable_tcmalloc = str(self.args.precision != "int8")
if self.args.custom_volumes and not self.args.docker_image:
raise ValueError("Volume mounts can only be used when running in a docker container "
"(a --docker-image must be specified when using --volume).")
if self.args.mode == "inference" and self.args.checkpoint:
print("Warning: The --checkpoint argument is being deprecated in favor of using frozen graphs.")
def get_model_use_case(self, benchmark_scripts):
"""
Infers the use case based on the directory structure for the specified model.
"""
args = self.args
# find the path to the model's benchmarks folder
search_path = os.path.join(
benchmark_scripts, "*", args.framework, args.model_name,
args.mode, args.precision)
matches = glob.glob(search_path)
error_str = ""
if len(matches) > 1:
error_str = "Found multiple model locations for {} {} {}"
elif len(matches) == 0:
error_str = "No model was found for {} {} {}"
if error_str:
raise ValueError(error_str.format(args.framework, args.model_name, args.precision))
# use the benchmarks directory path to find the use case
dir_list = matches[0].split("/")
# find the last occurrence of framework in the list, then return
# the element before it in the path, which is the use case
return next(dir_list[elem - 1] for elem in range(len(dir_list) - 1, -1, -1)
if dir_list[elem] == args.framework)
def get_model_dir(self, benchmark_scripts, use_case):
"""
Finds the path to the optimized model directory in this repo, if it exists.
"""
# use the models directory as a default
intelai_models = os.path.join(benchmark_scripts, os.pardir, "models")
if use_case == "common":
return os.path.join(intelai_models, "common", self.args.framework)
# find the intelai_optimized model directory
args = self.args
optimized_model_dir = os.path.join(
benchmark_scripts, os.pardir, "models", use_case,
args.framework, args.model_name)
# if we find an optimized model, then we will use that path
if os.path.isdir(optimized_model_dir):
intelai_models = optimized_model_dir
return intelai_models
def get_env_vars(self, benchmark_scripts, use_case, intelai_models,
intelai_models_common):
"""
Sets up dictionary of standard env vars that are used by start.sh
"""
# Standard env vars
args = self.args
env_var_dict = {
"ACCURACY_ONLY": args.accuracy_only,
"BACKBONE_MODEL_DIRECTORY_VOL": args.backbone_model,
"BATCH_SIZE": args.batch_size,
"BENCHMARK_ONLY": args.benchmark_only,
"BENCHMARK_SCRIPTS": benchmark_scripts,
"CHECKPOINT_DIRECTORY_VOL": args.checkpoint,
"DATASET_LOCATION_VOL": args.data_location,
"DATA_NUM_INTER_THREADS": args.data_num_inter_threads,
"DATA_NUM_INTRA_THREADS": args.data_num_intra_threads,
"DISABLE_TCMALLOC": args.disable_tcmalloc,
"DOCKER": args.docker_image or str(args.docker_image is not None),
"EXTERNAL_MODELS_SOURCE_DIRECTORY": args.model_source_dir,
"FRAMEWORK": args.framework,
"INTELAI_MODELS": intelai_models,
"INTELAI_MODELS_COMMON": intelai_models_common,
"MODE": args.mode,
"MODEL_NAME": args.model_name,
"MPI_HOSTNAMES": args.mpi_hostnames,
"MPI_NUM_PROCESSES": args.mpi,
"MPI_NUM_PROCESSES_PER_SOCKET": args.num_mpi,
"NOINSTALL": str(args.noinstall) if args.noinstall is not None else "True" if not args.docker_image else "False", # noqa: E501
"NUM_CORES": args.num_cores,
"NUM_INTER_THREADS": args.num_inter_threads,
"NUM_INTRA_THREADS": args.num_intra_threads,
"NUM_TRAIN_STEPS": args.num_train_steps,
"OUTPUT_RESULTS": args.output_results,
"PRECISION": args.precision,
"PYTHON_EXE": sys.executable if not args.docker_image else "python",
"SOCKET_ID": args.socket_id,
"TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD": args.tcmalloc_large_alloc_report_threshold,
"TF_SERVING_VERSION": args.tf_serving_version,
"USE_CASE": use_case,
"VERBOSE": args.verbose
}
# Add custom model args as env vars)
for custom_arg in args.model_args + self.unknown_args:
if "=" not in custom_arg:
raise ValueError("Expected model args in the format "
"`name=value` but received: {}".
format(custom_arg))
split_arg = custom_arg.split("=")
split_arg[0] = split_arg[0].replace("-", "_").lstrip('_')
env_var_dict[split_arg[0]] = split_arg[1]
return env_var_dict
def run_bare_metal(self, benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict):
"""
Runs the model without a container
"""
# setup volume directories to be the local system directories, since we aren't
# mounting volumes when running bare metal, but start.sh expects these args
args = self.args
workspace = os.path.join(benchmark_scripts, "common", args.framework)
mount_benchmark = benchmark_scripts
in_graph_path = args.input_graph
checkpoint_path = args.checkpoint
backbone_model_path = args.backbone_model
dataset_path = args.data_location
mount_external_models_source = args.model_source_dir
mount_intelai_models = intelai_models
# To Launch Tensorflow Serving benchmark we need only --in-graph arg.
# It does not support checkpoint files.
if args.framework == "tensorflow_serving":
if checkpoint_path:
raise ValueError("--checkpoint-path arg is not supported with tensorflow serving benchmarking")
if args.mode != "inference":
raise ValueError("--mode arg should be set to inference")
if in_graph_path:
env_var_dict["IN_GRAPH"] = in_graph_path
else:
raise ValueError("--in-graph arg is required to run tensorflow serving benchmarking")
for env_var_name in env_var_dict:
os.environ[env_var_name] = str(env_var_dict[env_var_name])
# We need this env to be set for the platform util
os.environ["PYTHON_EXE"] = str(sys.executable if not args.docker_image else "python")
# Get Platformutil
platform_util_obj = None or platform_util.PlatformUtil(self.args)
# Configure num_inter_threads and num_intra_threads
base_obj = BaseModelInitializer(args=self.args, custom_args=[], platform_util=platform_util_obj)
base_obj.set_num_inter_intra_threads()
# Update num_inter_threads and num_intra_threads in env dictionary
env_var_dict["NUM_INTER_THREADS"] = self.args.num_inter_threads
env_var_dict["NUM_INTRA_THREADS"] = self.args.num_intra_threads
# Set OMP_NUM_THREADS
env_var_dict["OMP_NUM_THREADS"] = self.args.num_intra_threads
else:
mount_external_models_source = args.model_source_dir
mount_intelai_models = intelai_models
mount_intelai_models_common = intelai_models_common
# Add env vars with bare metal settings
env_var_dict["MOUNT_EXTERNAL_MODELS_SOURCE"] = mount_external_models_source
env_var_dict["MOUNT_INTELAI_MODELS_SOURCE"] = mount_intelai_models
env_var_dict["MOUNT_INTELAI_MODELS_COMMON_SOURCE"] = mount_intelai_models_common
if in_graph_path:
env_var_dict["IN_GRAPH"] = in_graph_path
if checkpoint_path:
env_var_dict["CHECKPOINT_DIRECTORY"] = checkpoint_path
if backbone_model_path:
env_var_dict["BACKBONE_MODEL_DIRECTORY"] = backbone_model_path
if dataset_path:
env_var_dict["DATASET_LOCATION"] = dataset_path
# if using the default output directory, get the full path
if args.output_dir == "/models/benchmarks/common/tensorflow/logs":
args.output_dir = os.path.join(workspace, "logs")
# Add env vars with bare metal settings
env_var_dict["WORKSPACE"] = workspace
env_var_dict["MOUNT_BENCHMARK"] = mount_benchmark
env_var_dict["OUTPUT_DIR"] = args.output_dir
# Set env vars for bare metal
for env_var_name in env_var_dict:
os.environ[env_var_name] = str(env_var_dict[env_var_name])
# Run the start script
start_script = os.path.join(workspace, "start.sh")
self._launch_command(["bash", start_script])
def run_docker_container(self, benchmark_scripts, intelai_models,
intelai_models_common, env_var_dict):
"""
Runs a docker container with the specified image and environment
variables to start running the benchmarking job.
"""
args = self.args
mount_benchmark = "/workspace/benchmarks"
mount_external_models_source = "/workspace/models"
mount_intelai_models = "/workspace/intelai_models"
mount_intelai_models_common = "/workspace/intelai_models_common"
workspace = os.path.join(mount_benchmark, "common", args.framework)
mount_output_dir = False
output_dir = os.path.join(workspace, 'logs')
if args.output_dir != "/models/benchmarks/common/tensorflow/logs":
# we don't need to mount log dir otherwise since default is workspace folder
mount_output_dir = True
output_dir = args.output_dir
in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \
else ""
in_graph_filename = os.path.basename(args.input_graph) if \
args.input_graph else ""
# env vars with docker settings
env_vars = ["--env", "WORKSPACE={}".format(workspace),
"--env", "MOUNT_BENCHMARK={}".format(mount_benchmark),
"--env", "MOUNT_EXTERNAL_MODELS_SOURCE={}".format(mount_external_models_source),
"--env", "MOUNT_INTELAI_MODELS_SOURCE={}".format(mount_intelai_models),
"--env", "MOUNT_INTELAI_MODELS_COMMON_SOURCE={}".format(mount_intelai_models_common),
"--env", "OUTPUT_DIR={}".format(output_dir)]
if args.input_graph:
env_vars += ["--env", "IN_GRAPH=/in_graph/{}".format(in_graph_filename)]
if args.data_location:
env_vars += ["--env", "DATASET_LOCATION=/dataset"]
if args.checkpoint:
env_vars += ["--env", "CHECKPOINT_DIRECTORY=/checkpoints"]
if args.backbone_model:
env_vars += ["--env", "BACKBONE_MODEL_DIRECTORY=/backbone_model"]
# Add env vars with common settings
for env_var_name in env_var_dict:
env_vars += ["--env", "{}={}".format(env_var_name, env_var_dict[env_var_name])]
# Add proxy to env variables if any set on host
for environment_proxy_setting in [
"http_proxy",
"ftp_proxy",
"https_proxy",
"no_proxy",
]:
if not os.environ.get(environment_proxy_setting):
continue
env_vars.append("--env")
env_vars.append("{}={}".format(
environment_proxy_setting,
os.environ.get(environment_proxy_setting)
))
volume_mounts = ["--volume", "{}:{}".format(benchmark_scripts, mount_benchmark),
"--volume", "{}:{}".format(args.model_source_dir, mount_external_models_source),
"--volume", "{}:{}".format(intelai_models, mount_intelai_models),
"--volume", "{}:{}".format(intelai_models_common, mount_intelai_models_common)]
if mount_output_dir:
volume_mounts.extend([
"--volume", "{}:{}".format(output_dir, output_dir)])
if args.data_location:
volume_mounts.extend([
"--volume", "{}:{}".format(args.data_location, "/dataset")])
if args.checkpoint:
volume_mounts.extend([
"--volume", "{}:{}".format(args.checkpoint, "/checkpoints")])
if args.backbone_model:
volume_mounts.extend([
"--volume", "{}:{}".format(args.backbone_model, "/backbone_model")])
if in_graph_dir:
volume_mounts.extend([
"--volume", "{}:{}".format(in_graph_dir, "/in_graph")])
if args.custom_volumes:
for custom_volume in args.custom_volumes:
volume_mounts.extend(["--volume", custom_volume])
docker_run_cmd = ["docker", "run"]
# only use -it when debugging, otherwise we might get TTY error
if args.debug:
docker_run_cmd.append("-it")
docker_shm_size = "--shm-size={}".format(args.shm_size)
docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [
docker_shm_size, "--privileged", "-u", "root:root", "-w",
workspace, args.docker_image, "/bin/bash"]
if not args.debug:
docker_run_cmd.append("start.sh")
if args.verbose:
print("Docker run command:\n{}".format(docker_run_cmd))
self._launch_command(docker_run_cmd)
def _launch_command(self, run_cmd):
"""runs command that runs the start script in a container or on bare metal and exits on ctrl c"""
p = subprocess.Popen(run_cmd, preexec_fn=os.setsid)
try:
p.communicate()
except KeyboardInterrupt:
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
if __name__ == "__main__":
util = LaunchBenchmark()
util.main()
| LaunchBenchmark |
station_model.go | // Code generated by go-swagger; DO NOT EDIT.
package cli
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"github.com/byxorna/nycmesh-tool/generated/go/uisp/models"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/spf13/cobra"
)
// Schema cli for Station
// register flags to command
func registerModelStationFlags(depth int, cmdPrefix string, cmd *cobra.Command) error {
if err := registerStationConnected(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationDeviceIdentification(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationDistance(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationDownlinkCapacity(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationFirmware(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationInterfaceID(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationIPAddress(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationLatency(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationMac(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationModel(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationName(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationNoiseFloor(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationRadio(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationRxBytes(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationRxChain(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationRxChainMask(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationRxModulation(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationRxRate(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationRxSignal(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationStatistics(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationTimestamp(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationTxBytes(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationTxChain(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationTxChainMask(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationTxModulation(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationTxRate(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationTxSignal(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationUplinkCapacity(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationUptime(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerStationVendor(depth, cmdPrefix, cmd); err != nil {
return err
}
return nil
}
func registerStationConnected(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
connectedDescription := ``
var connectedFlagName string
if cmdPrefix == "" {
connectedFlagName = "connected"
} else {
connectedFlagName = fmt.Sprintf("%v.connected", cmdPrefix)
}
var connectedFlagDefault bool
_ = cmd.PersistentFlags().Bool(connectedFlagName, connectedFlagDefault, connectedDescription)
return nil
}
func registerStationDeviceIdentification(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
var deviceIdentificationFlagName string
if cmdPrefix == "" {
deviceIdentificationFlagName = "deviceIdentification"
} else {
deviceIdentificationFlagName = fmt.Sprintf("%v.deviceIdentification", cmdPrefix)
}
if err := registerModelDeviceIdentification1Flags(depth+1, deviceIdentificationFlagName, cmd); err != nil {
return err
}
return nil
}
func registerStationDistance(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
distanceDescription := `Distance in meters.`
var distanceFlagName string
if cmdPrefix == "" {
distanceFlagName = "distance"
} else {
distanceFlagName = fmt.Sprintf("%v.distance", cmdPrefix)
}
var distanceFlagDefault int64
_ = cmd.PersistentFlags().Int64(distanceFlagName, distanceFlagDefault, distanceDescription)
return nil
}
func registerStationDownlinkCapacity(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
downlinkCapacityDescription := ``
var downlinkCapacityFlagName string
if cmdPrefix == "" {
downlinkCapacityFlagName = "downlinkCapacity"
} else {
downlinkCapacityFlagName = fmt.Sprintf("%v.downlinkCapacity", cmdPrefix)
}
var downlinkCapacityFlagDefault int64
_ = cmd.PersistentFlags().Int64(downlinkCapacityFlagName, downlinkCapacityFlagDefault, downlinkCapacityDescription)
return nil
}
func registerStationFirmware(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
var firmwareFlagName string
if cmdPrefix == "" {
firmwareFlagName = "firmware"
} else {
firmwareFlagName = fmt.Sprintf("%v.firmware", cmdPrefix)
}
if err := registerModelDeviceFirmware1Flags(depth+1, firmwareFlagName, cmd); err != nil {
return err
}
return nil
}
func registerStationInterfaceID(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
interfaceIdDescription := `Interface name, where the station is connected.`
var interfaceIdFlagName string
if cmdPrefix == "" {
interfaceIdFlagName = "interfaceId"
} else {
interfaceIdFlagName = fmt.Sprintf("%v.interfaceId", cmdPrefix)
}
var interfaceIdFlagDefault string
_ = cmd.PersistentFlags().String(interfaceIdFlagName, interfaceIdFlagDefault, interfaceIdDescription)
return nil
}
func registerStationIPAddress(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
ipAddressDescription := `Custom IP address in IPv4 or IPv6 format.`
var ipAddressFlagName string
if cmdPrefix == "" {
ipAddressFlagName = "ipAddress"
} else {
ipAddressFlagName = fmt.Sprintf("%v.ipAddress", cmdPrefix)
}
var ipAddressFlagDefault string
_ = cmd.PersistentFlags().String(ipAddressFlagName, ipAddressFlagDefault, ipAddressDescription)
return nil
}
func registerStationLatency(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
latencyDescription := `Latency in milliseconds.`
var latencyFlagName string
if cmdPrefix == "" {
latencyFlagName = "latency"
} else {
latencyFlagName = fmt.Sprintf("%v.latency", cmdPrefix)
}
var latencyFlagDefault int64
_ = cmd.PersistentFlags().Int64(latencyFlagName, latencyFlagDefault, latencyDescription)
return nil
}
func registerStationMac(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
macDescription := ``
var macFlagName string
if cmdPrefix == "" {
macFlagName = "mac"
} else {
macFlagName = fmt.Sprintf("%v.mac", cmdPrefix)
}
var macFlagDefault string
_ = cmd.PersistentFlags().String(macFlagName, macFlagDefault, macDescription)
return nil
}
func registerStationModel(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
modelDescription := `Enum: ["UF-Nano","UF-Loco","UF-Wifi","UF-Instant","UF-OLT","UF-OLT4","UISP-R-Pro","UISP-R-Lite","UNMS-S-Lite","UISP-S-Lite","UISP-S-Pro","UISP-P-Lite","UISP-LTE","ER-X","ER-X-SFP","ERLite-3","ERPoe-5","ERPro-8","ER-8","ER-8-XG","ER-4","ER-6P","ER-12","ER-12P","ER-10X","EP-R8","EP-R6","EP-S16","ES-12F","ES-16-150W","ES-24-250W","ES-24-500W","ES-24-Lite","ES-48-500W","ES-48-750W","ES-48-Lite","ES-8-150W","ES-16-XG","ES-10XP","ES-10X","ES-18X","ES-26X","EP-54V-150W","EP-24V-72W","EP-54V-72W","TSW-PoE","TSW-PoE PRO","ACB-AC","ACB-ISP","ACB-LOCO","AF11FX","AF24","AF24HD","AF2X","AF3X","AF4X","AF5","AF5U","AF5X","AF-5XHD","AF-LTU","LTU-LITE","AF-LTU5","LTU-Rocket","AFLTULR","AF60","AF60-LR","WaveAP","WaveCPE","GBE-LR","GBE","GBE-Plus","GBE-AP","R2N","R2T","R5N","R6N","R36-GPS","RM3-GPS","R2N-GPS","R5N-GPS","R9N-GPS","R5T-GPS","RM3","R36","R9N","N2N","N5N","N6N","NS3","N36","N9N","N9S","LM2","LM5","B2N","B2T","B5N","B5T","BAC","AG2","AG2-HP","AG5","AG5-HP","p2N","p5N","M25","P2B-400","P5B-300","P5B-300-ISO","P5B-400","P5B-400-ISO","P5B-620","LB5-120","LB5","N5B","N5B-16","N5B-19","N5B-300","N5B-400","N5B-Client","N2B","N2B-13","N2B-400","PAP","LAP-HP","LAP","AGW","AGW-LR","AGW-Pro","AGW-Installer","PB5","PB3","P36","PBM10","NB5","NB2","NB3","B36","NB9","SM5","WM5","IS-M5","Loco5AC","NS-5AC","R5AC-PTMP","R5AC-PTP","R5AC-Lite","R5AC-PRISM","R2AC-Prism","R2AC-Gen2","RP-5AC-Gen2","NBE-2AC-13","NBE-5AC-16","NBE-5AC-19","NBE-5AC-Gen2","PBE-5AC-300","PBE-5AC-300-ISO","PBE-5AC-400","PBE-5AC-400-ISO","PBE-5AC-500","PBE-5AC-500-ISO","PBE-5AC-620","PBE-5AC-620-ISO","PBE-2AC-400","PBE-2AC-400-ISO","PBE-5AC-X-Gen2","PBE-5AC-Gen2","PBE-5AC-ISO-Gen2","PBE-5AC-400-ISO-Gen2","LBE-5AC-16-120","LAP-120","LBE-5AC-23","LBE-5AC-Gen2","LBE-5AC-LR","LAP-GPS","IS-5AC","PS-5AC","SolarSwitch","SolarPoint","BulletAC-IP67","B-DB-AC","UNKNOWN"]. Short names, for example UF-OLT.`
var modelFlagName string
if cmdPrefix == "" {
modelFlagName = "model"
} else {
modelFlagName = fmt.Sprintf("%v.model", cmdPrefix)
}
var modelFlagDefault string
_ = cmd.PersistentFlags().String(modelFlagName, modelFlagDefault, modelDescription)
if err := cmd.RegisterFlagCompletionFunc(modelFlagName,
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var res []string
if err := json.Unmarshal([]byte(`["UF-Nano","UF-Loco","UF-Wifi","UF-Instant","UF-OLT","UF-OLT4","UISP-R-Pro","UISP-R-Lite","UNMS-S-Lite","UISP-S-Lite","UISP-S-Pro","UISP-P-Lite","UISP-LTE","ER-X","ER-X-SFP","ERLite-3","ERPoe-5","ERPro-8","ER-8","ER-8-XG","ER-4","ER-6P","ER-12","ER-12P","ER-10X","EP-R8","EP-R6","EP-S16","ES-12F","ES-16-150W","ES-24-250W","ES-24-500W","ES-24-Lite","ES-48-500W","ES-48-750W","ES-48-Lite","ES-8-150W","ES-16-XG","ES-10XP","ES-10X","ES-18X","ES-26X","EP-54V-150W","EP-24V-72W","EP-54V-72W","TSW-PoE","TSW-PoE PRO","ACB-AC","ACB-ISP","ACB-LOCO","AF11FX","AF24","AF24HD","AF2X","AF3X","AF4X","AF5","AF5U","AF5X","AF-5XHD","AF-LTU","LTU-LITE","AF-LTU5","LTU-Rocket","AFLTULR","AF60","AF60-LR","WaveAP","WaveCPE","GBE-LR","GBE","GBE-Plus","GBE-AP","R2N","R2T","R5N","R6N","R36-GPS","RM3-GPS","R2N-GPS","R5N-GPS","R9N-GPS","R5T-GPS","RM3","R36","R9N","N2N","N5N","N6N","NS3","N36","N9N","N9S","LM2","LM5","B2N","B2T","B5N","B5T","BAC","AG2","AG2-HP","AG5","AG5-HP","p2N","p5N","M25","P2B-400","P5B-300","P5B-300-ISO","P5B-400","P5B-400-ISO","P5B-620","LB5-120","LB5","N5B","N5B-16","N5B-19","N5B-300","N5B-400","N5B-Client","N2B","N2B-13","N2B-400","PAP","LAP-HP","LAP","AGW","AGW-LR","AGW-Pro","AGW-Installer","PB5","PB3","P36","PBM10","NB5","NB2","NB3","B36","NB9","SM5","WM5","IS-M5","Loco5AC","NS-5AC","R5AC-PTMP","R5AC-PTP","R5AC-Lite","R5AC-PRISM","R2AC-Prism","R2AC-Gen2","RP-5AC-Gen2","NBE-2AC-13","NBE-5AC-16","NBE-5AC-19","NBE-5AC-Gen2","PBE-5AC-300","PBE-5AC-300-ISO","PBE-5AC-400","PBE-5AC-400-ISO","PBE-5AC-500","PBE-5AC-500-ISO","PBE-5AC-620","PBE-5AC-620-ISO","PBE-2AC-400","PBE-2AC-400-ISO","PBE-5AC-X-Gen2","PBE-5AC-Gen2","PBE-5AC-ISO-Gen2","PBE-5AC-400-ISO-Gen2","LBE-5AC-16-120","LAP-120","LBE-5AC-23","LBE-5AC-Gen2","LBE-5AC-LR","LAP-GPS","IS-5AC","PS-5AC","SolarSwitch","SolarPoint","BulletAC-IP67","B-DB-AC","UNKNOWN"]`), &res); err != nil {
panic(err)
}
return res, cobra.ShellCompDirectiveDefault
}); err != nil {
return err
}
return nil
}
func registerStationName(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
nameDescription := ``
var nameFlagName string
if cmdPrefix == "" {
nameFlagName = "name"
} else {
nameFlagName = fmt.Sprintf("%v.name", cmdPrefix)
}
var nameFlagDefault string
_ = cmd.PersistentFlags().String(nameFlagName, nameFlagDefault, nameDescription)
return nil
}
func registerStationNoiseFloor(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
noiseFloorDescription := `Required. Wireless noise level in dBm`
var noiseFloorFlagName string
if cmdPrefix == "" {
noiseFloorFlagName = "noiseFloor"
} else {
noiseFloorFlagName = fmt.Sprintf("%v.noiseFloor", cmdPrefix)
}
var noiseFloorFlagDefault int64
_ = cmd.PersistentFlags().Int64(noiseFloorFlagName, noiseFloorFlagDefault, noiseFloorDescription)
return nil
}
func registerStationRadio(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
radioDescription := `Enum: ["2.4GHz","3GHz","4GHz","5GHz","11GHz","24GHz","60GHz"]. `
var radioFlagName string
if cmdPrefix == "" {
radioFlagName = "radio"
} else {
radioFlagName = fmt.Sprintf("%v.radio", cmdPrefix)
}
var radioFlagDefault string
_ = cmd.PersistentFlags().String(radioFlagName, radioFlagDefault, radioDescription)
if err := cmd.RegisterFlagCompletionFunc(radioFlagName,
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var res []string
if err := json.Unmarshal([]byte(`["2.4GHz","3GHz","4GHz","5GHz","11GHz","24GHz","60GHz"]`), &res); err != nil {
panic(err)
}
return res, cobra.ShellCompDirectiveDefault
}); err != nil {
return err
}
return nil
}
func registerStationRxBytes(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
rxBytesDescription := `Received bytes.`
var rxBytesFlagName string
if cmdPrefix == "" {
rxBytesFlagName = "rxBytes"
} else {
rxBytesFlagName = fmt.Sprintf("%v.rxBytes", cmdPrefix)
}
var rxBytesFlagDefault int64
_ = cmd.PersistentFlags().Int64(rxBytesFlagName, rxBytesFlagDefault, rxBytesDescription)
return nil
}
func registerStationRxChain(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
// warning: rxChain RxChain array type is not supported by go-swagger cli yet
return nil
}
func registerStationRxChainMask(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
rxChainMaskDescription := `Required. `
var rxChainMaskFlagName string
if cmdPrefix == "" {
rxChainMaskFlagName = "rxChainMask"
} else {
rxChainMaskFlagName = fmt.Sprintf("%v.rxChainMask", cmdPrefix)
}
var rxChainMaskFlagDefault int64
_ = cmd.PersistentFlags().Int64(rxChainMaskFlagName, rxChainMaskFlagDefault, rxChainMaskDescription)
return nil
}
func registerStationRxModulation(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
rxModulationDescription := `Local Rx data rate.`
var rxModulationFlagName string
if cmdPrefix == "" {
rxModulationFlagName = "rxModulation"
} else {
rxModulationFlagName = fmt.Sprintf("%v.rxModulation", cmdPrefix)
}
var rxModulationFlagDefault string
_ = cmd.PersistentFlags().String(rxModulationFlagName, rxModulationFlagDefault, rxModulationDescription)
return nil
}
func registerStationRxRate(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
rxRateDescription := `Current download speed in bps.`
var rxRateFlagName string
if cmdPrefix == "" {
rxRateFlagName = "rxRate"
} else {
rxRateFlagName = fmt.Sprintf("%v.rxRate", cmdPrefix)
}
var rxRateFlagDefault int64
_ = cmd.PersistentFlags().Int64(rxRateFlagName, rxRateFlagDefault, rxRateDescription)
return nil
}
func registerStationRxSignal(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
rxSignalDescription := `Local Signal in dBm.`
var rxSignalFlagName string
if cmdPrefix == "" {
rxSignalFlagName = "rxSignal"
} else {
rxSignalFlagName = fmt.Sprintf("%v.rxSignal", cmdPrefix)
}
var rxSignalFlagDefault int64
_ = cmd.PersistentFlags().Int64(rxSignalFlagName, rxSignalFlagDefault, rxSignalDescription)
return nil
}
func registerStationStatistics(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
var statisticsFlagName string
if cmdPrefix == "" {
statisticsFlagName = "statistics"
} else {
statisticsFlagName = fmt.Sprintf("%v.statistics", cmdPrefix)
}
if err := registerModelStatisticsFlags(depth+1, statisticsFlagName, cmd); err != nil {
return err
}
return nil
}
func registerStationTimestamp(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
timestampDescription := ``
var timestampFlagName string
if cmdPrefix == "" {
timestampFlagName = "timestamp"
} else {
timestampFlagName = fmt.Sprintf("%v.timestamp", cmdPrefix)
}
_ = cmd.PersistentFlags().String(timestampFlagName, "", timestampDescription)
return nil
}
func registerStationTxBytes(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
txBytesDescription := `Transmitted/Sent bytes.`
var txBytesFlagName string
if cmdPrefix == "" {
txBytesFlagName = "txBytes"
} else {
txBytesFlagName = fmt.Sprintf("%v.txBytes", cmdPrefix)
}
var txBytesFlagDefault int64
_ = cmd.PersistentFlags().Int64(txBytesFlagName, txBytesFlagDefault, txBytesDescription)
return nil
}
func registerStationTxChain(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
// warning: txChain TxChain array type is not supported by go-swagger cli yet
return nil
}
func registerStationTxChainMask(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
txChainMaskDescription := `Required. `
var txChainMaskFlagName string
if cmdPrefix == "" {
txChainMaskFlagName = "txChainMask"
} else {
txChainMaskFlagName = fmt.Sprintf("%v.txChainMask", cmdPrefix)
}
var txChainMaskFlagDefault int64
_ = cmd.PersistentFlags().Int64(txChainMaskFlagName, txChainMaskFlagDefault, txChainMaskDescription)
return nil
}
func registerStationTxModulation(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
txModulationDescription := `Remote Rx data rate.`
var txModulationFlagName string
if cmdPrefix == "" {
txModulationFlagName = "txModulation"
} else {
txModulationFlagName = fmt.Sprintf("%v.txModulation", cmdPrefix)
}
var txModulationFlagDefault string
_ = cmd.PersistentFlags().String(txModulationFlagName, txModulationFlagDefault, txModulationDescription)
return nil
}
func registerStationTxRate(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
txRateDescription := `Current download speed in bps.`
var txRateFlagName string
if cmdPrefix == "" {
txRateFlagName = "txRate"
} else {
txRateFlagName = fmt.Sprintf("%v.txRate", cmdPrefix)
}
var txRateFlagDefault int64
_ = cmd.PersistentFlags().Int64(txRateFlagName, txRateFlagDefault, txRateDescription)
return nil
}
func registerStationTxSignal(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
txSignalDescription := `Remote Signal in dBm.`
var txSignalFlagName string
if cmdPrefix == "" {
txSignalFlagName = "txSignal"
} else {
txSignalFlagName = fmt.Sprintf("%v.txSignal", cmdPrefix)
}
var txSignalFlagDefault int64
_ = cmd.PersistentFlags().Int64(txSignalFlagName, txSignalFlagDefault, txSignalDescription)
return nil
}
func registerStationUplinkCapacity(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
uplinkCapacityDescription := ``
var uplinkCapacityFlagName string
if cmdPrefix == "" {
uplinkCapacityFlagName = "uplinkCapacity"
} else {
uplinkCapacityFlagName = fmt.Sprintf("%v.uplinkCapacity", cmdPrefix)
}
var uplinkCapacityFlagDefault int64
_ = cmd.PersistentFlags().Int64(uplinkCapacityFlagName, uplinkCapacityFlagDefault, uplinkCapacityDescription)
return nil
}
func registerStationUptime(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
uptimeDescription := ``
var uptimeFlagName string
if cmdPrefix == "" {
uptimeFlagName = "uptime"
} else {
uptimeFlagName = fmt.Sprintf("%v.uptime", cmdPrefix)
}
var uptimeFlagDefault int64
_ = cmd.PersistentFlags().Int64(uptimeFlagName, uptimeFlagDefault, uptimeDescription)
return nil
}
func registerStationVendor(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
vendorDescription := ``
var vendorFlagName string
if cmdPrefix == "" {
vendorFlagName = "vendor"
} else {
vendorFlagName = fmt.Sprintf("%v.vendor", cmdPrefix)
}
var vendorFlagDefault string
_ = cmd.PersistentFlags().String(vendorFlagName, vendorFlagDefault, vendorDescription)
return nil
}
// retrieve flags from commands, and set value in model. Return true if any flag is passed by user to fill model field.
func retrieveModelStationFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
retAdded := false
err, connectedAdded := retrieveStationConnectedFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || connectedAdded
err, deviceIdentificationAdded := retrieveStationDeviceIdentificationFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || deviceIdentificationAdded
err, distanceAdded := retrieveStationDistanceFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || distanceAdded
err, downlinkCapacityAdded := retrieveStationDownlinkCapacityFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || downlinkCapacityAdded
err, firmwareAdded := retrieveStationFirmwareFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || firmwareAdded
err, interfaceIdAdded := retrieveStationInterfaceIDFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || interfaceIdAdded
err, ipAddressAdded := retrieveStationIPAddressFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || ipAddressAdded
err, latencyAdded := retrieveStationLatencyFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || latencyAdded
err, macAdded := retrieveStationMacFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || macAdded
err, modelAdded := retrieveStationModelFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || modelAdded
err, nameAdded := retrieveStationNameFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || nameAdded
err, noiseFloorAdded := retrieveStationNoiseFloorFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || noiseFloorAdded
err, radioAdded := retrieveStationRadioFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || radioAdded
err, rxBytesAdded := retrieveStationRxBytesFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || rxBytesAdded
err, rxChainAdded := retrieveStationRxChainFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || rxChainAdded
err, rxChainMaskAdded := retrieveStationRxChainMaskFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || rxChainMaskAdded
err, rxModulationAdded := retrieveStationRxModulationFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || rxModulationAdded
err, rxRateAdded := retrieveStationRxRateFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || rxRateAdded
err, rxSignalAdded := retrieveStationRxSignalFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || rxSignalAdded
err, statisticsAdded := retrieveStationStatisticsFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || statisticsAdded
err, timestampAdded := retrieveStationTimestampFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || timestampAdded
err, txBytesAdded := retrieveStationTxBytesFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || txBytesAdded
err, txChainAdded := retrieveStationTxChainFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || txChainAdded
err, txChainMaskAdded := retrieveStationTxChainMaskFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || txChainMaskAdded
err, txModulationAdded := retrieveStationTxModulationFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || txModulationAdded
err, txRateAdded := retrieveStationTxRateFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || txRateAdded
err, txSignalAdded := retrieveStationTxSignalFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || txSignalAdded
err, uplinkCapacityAdded := retrieveStationUplinkCapacityFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || uplinkCapacityAdded
err, uptimeAdded := retrieveStationUptimeFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || uptimeAdded
err, vendorAdded := retrieveStationVendorFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || vendorAdded
return nil, retAdded
}
func retrieveStationConnectedFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
connectedFlagName := fmt.Sprintf("%v.connected", cmdPrefix)
if cmd.Flags().Changed(connectedFlagName) {
var connectedFlagName string
if cmdPrefix == "" {
connectedFlagName = "connected"
} else {
connectedFlagName = fmt.Sprintf("%v.connected", cmdPrefix)
}
connectedFlagValue, err := cmd.Flags().GetBool(connectedFlagName)
if err != nil {
return err, false
}
m.Connected = connectedFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationDeviceIdentificationFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
deviceIdentificationFlagName := fmt.Sprintf("%v.deviceIdentification", cmdPrefix)
if cmd.Flags().Changed(deviceIdentificationFlagName) {
// info: complex object deviceIdentification DeviceIdentification1 is retrieved outside this Changed() block
}
deviceIdentificationFlagValue := m.DeviceIdentification
if swag.IsZero(deviceIdentificationFlagValue) {
deviceIdentificationFlagValue = &models.DeviceIdentification1{}
}
err, deviceIdentificationAdded := retrieveModelDeviceIdentification1Flags(depth+1, deviceIdentificationFlagValue, deviceIdentificationFlagName, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || deviceIdentificationAdded
if deviceIdentificationAdded {
m.DeviceIdentification = deviceIdentificationFlagValue
}
return nil, retAdded
}
func retrieveStationDistanceFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
distanceFlagName := fmt.Sprintf("%v.distance", cmdPrefix)
if cmd.Flags().Changed(distanceFlagName) {
var distanceFlagName string
if cmdPrefix == "" {
distanceFlagName = "distance"
} else {
distanceFlagName = fmt.Sprintf("%v.distance", cmdPrefix)
}
distanceFlagValue, err := cmd.Flags().GetInt64(distanceFlagName)
if err != nil {
return err, false
}
m.Distance = distanceFlagValue
retAdded = true
}
return nil, retAdded
}
func | (depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
downlinkCapacityFlagName := fmt.Sprintf("%v.downlinkCapacity", cmdPrefix)
if cmd.Flags().Changed(downlinkCapacityFlagName) {
var downlinkCapacityFlagName string
if cmdPrefix == "" {
downlinkCapacityFlagName = "downlinkCapacity"
} else {
downlinkCapacityFlagName = fmt.Sprintf("%v.downlinkCapacity", cmdPrefix)
}
downlinkCapacityFlagValue, err := cmd.Flags().GetInt64(downlinkCapacityFlagName)
if err != nil {
return err, false
}
m.DownlinkCapacity = downlinkCapacityFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationFirmwareFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
firmwareFlagName := fmt.Sprintf("%v.firmware", cmdPrefix)
if cmd.Flags().Changed(firmwareFlagName) {
// info: complex object firmware DeviceFirmware1 is retrieved outside this Changed() block
}
firmwareFlagValue := m.Firmware
if swag.IsZero(firmwareFlagValue) {
firmwareFlagValue = &models.DeviceFirmware1{}
}
err, firmwareAdded := retrieveModelDeviceFirmware1Flags(depth+1, firmwareFlagValue, firmwareFlagName, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || firmwareAdded
if firmwareAdded {
m.Firmware = firmwareFlagValue
}
return nil, retAdded
}
func retrieveStationInterfaceIDFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
interfaceIdFlagName := fmt.Sprintf("%v.interfaceId", cmdPrefix)
if cmd.Flags().Changed(interfaceIdFlagName) {
var interfaceIdFlagName string
if cmdPrefix == "" {
interfaceIdFlagName = "interfaceId"
} else {
interfaceIdFlagName = fmt.Sprintf("%v.interfaceId", cmdPrefix)
}
interfaceIdFlagValue, err := cmd.Flags().GetString(interfaceIdFlagName)
if err != nil {
return err, false
}
m.InterfaceID = interfaceIdFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationIPAddressFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
ipAddressFlagName := fmt.Sprintf("%v.ipAddress", cmdPrefix)
if cmd.Flags().Changed(ipAddressFlagName) {
var ipAddressFlagName string
if cmdPrefix == "" {
ipAddressFlagName = "ipAddress"
} else {
ipAddressFlagName = fmt.Sprintf("%v.ipAddress", cmdPrefix)
}
ipAddressFlagValue, err := cmd.Flags().GetString(ipAddressFlagName)
if err != nil {
return err, false
}
m.IPAddress = ipAddressFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationLatencyFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
latencyFlagName := fmt.Sprintf("%v.latency", cmdPrefix)
if cmd.Flags().Changed(latencyFlagName) {
var latencyFlagName string
if cmdPrefix == "" {
latencyFlagName = "latency"
} else {
latencyFlagName = fmt.Sprintf("%v.latency", cmdPrefix)
}
latencyFlagValue, err := cmd.Flags().GetInt64(latencyFlagName)
if err != nil {
return err, false
}
m.Latency = latencyFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationMacFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
macFlagName := fmt.Sprintf("%v.mac", cmdPrefix)
if cmd.Flags().Changed(macFlagName) {
var macFlagName string
if cmdPrefix == "" {
macFlagName = "mac"
} else {
macFlagName = fmt.Sprintf("%v.mac", cmdPrefix)
}
macFlagValue, err := cmd.Flags().GetString(macFlagName)
if err != nil {
return err, false
}
m.Mac = macFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationModelFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
modelFlagName := fmt.Sprintf("%v.model", cmdPrefix)
if cmd.Flags().Changed(modelFlagName) {
var modelFlagName string
if cmdPrefix == "" {
modelFlagName = "model"
} else {
modelFlagName = fmt.Sprintf("%v.model", cmdPrefix)
}
modelFlagValue, err := cmd.Flags().GetString(modelFlagName)
if err != nil {
return err, false
}
m.Model = modelFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationNameFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
nameFlagName := fmt.Sprintf("%v.name", cmdPrefix)
if cmd.Flags().Changed(nameFlagName) {
var nameFlagName string
if cmdPrefix == "" {
nameFlagName = "name"
} else {
nameFlagName = fmt.Sprintf("%v.name", cmdPrefix)
}
nameFlagValue, err := cmd.Flags().GetString(nameFlagName)
if err != nil {
return err, false
}
m.Name = nameFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationNoiseFloorFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
noiseFloorFlagName := fmt.Sprintf("%v.noiseFloor", cmdPrefix)
if cmd.Flags().Changed(noiseFloorFlagName) {
var noiseFloorFlagName string
if cmdPrefix == "" {
noiseFloorFlagName = "noiseFloor"
} else {
noiseFloorFlagName = fmt.Sprintf("%v.noiseFloor", cmdPrefix)
}
noiseFloorFlagValue, err := cmd.Flags().GetInt64(noiseFloorFlagName)
if err != nil {
return err, false
}
m.NoiseFloor = &noiseFloorFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationRadioFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
radioFlagName := fmt.Sprintf("%v.radio", cmdPrefix)
if cmd.Flags().Changed(radioFlagName) {
var radioFlagName string
if cmdPrefix == "" {
radioFlagName = "radio"
} else {
radioFlagName = fmt.Sprintf("%v.radio", cmdPrefix)
}
radioFlagValue, err := cmd.Flags().GetString(radioFlagName)
if err != nil {
return err, false
}
m.Radio = radioFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationRxBytesFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
rxBytesFlagName := fmt.Sprintf("%v.rxBytes", cmdPrefix)
if cmd.Flags().Changed(rxBytesFlagName) {
var rxBytesFlagName string
if cmdPrefix == "" {
rxBytesFlagName = "rxBytes"
} else {
rxBytesFlagName = fmt.Sprintf("%v.rxBytes", cmdPrefix)
}
rxBytesFlagValue, err := cmd.Flags().GetInt64(rxBytesFlagName)
if err != nil {
return err, false
}
m.RxBytes = rxBytesFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationRxChainFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
rxChainFlagName := fmt.Sprintf("%v.rxChain", cmdPrefix)
if cmd.Flags().Changed(rxChainFlagName) {
// warning: rxChain array type RxChain is not supported by go-swagger cli yet
}
return nil, retAdded
}
func retrieveStationRxChainMaskFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
rxChainMaskFlagName := fmt.Sprintf("%v.rxChainMask", cmdPrefix)
if cmd.Flags().Changed(rxChainMaskFlagName) {
var rxChainMaskFlagName string
if cmdPrefix == "" {
rxChainMaskFlagName = "rxChainMask"
} else {
rxChainMaskFlagName = fmt.Sprintf("%v.rxChainMask", cmdPrefix)
}
rxChainMaskFlagValue, err := cmd.Flags().GetInt64(rxChainMaskFlagName)
if err != nil {
return err, false
}
m.RxChainMask = &rxChainMaskFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationRxModulationFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
rxModulationFlagName := fmt.Sprintf("%v.rxModulation", cmdPrefix)
if cmd.Flags().Changed(rxModulationFlagName) {
var rxModulationFlagName string
if cmdPrefix == "" {
rxModulationFlagName = "rxModulation"
} else {
rxModulationFlagName = fmt.Sprintf("%v.rxModulation", cmdPrefix)
}
rxModulationFlagValue, err := cmd.Flags().GetString(rxModulationFlagName)
if err != nil {
return err, false
}
m.RxModulation = rxModulationFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationRxRateFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
rxRateFlagName := fmt.Sprintf("%v.rxRate", cmdPrefix)
if cmd.Flags().Changed(rxRateFlagName) {
var rxRateFlagName string
if cmdPrefix == "" {
rxRateFlagName = "rxRate"
} else {
rxRateFlagName = fmt.Sprintf("%v.rxRate", cmdPrefix)
}
rxRateFlagValue, err := cmd.Flags().GetInt64(rxRateFlagName)
if err != nil {
return err, false
}
m.RxRate = rxRateFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationRxSignalFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
rxSignalFlagName := fmt.Sprintf("%v.rxSignal", cmdPrefix)
if cmd.Flags().Changed(rxSignalFlagName) {
var rxSignalFlagName string
if cmdPrefix == "" {
rxSignalFlagName = "rxSignal"
} else {
rxSignalFlagName = fmt.Sprintf("%v.rxSignal", cmdPrefix)
}
rxSignalFlagValue, err := cmd.Flags().GetInt64(rxSignalFlagName)
if err != nil {
return err, false
}
m.RxSignal = rxSignalFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationStatisticsFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
statisticsFlagName := fmt.Sprintf("%v.statistics", cmdPrefix)
if cmd.Flags().Changed(statisticsFlagName) {
// info: complex object statistics Statistics is retrieved outside this Changed() block
}
statisticsFlagValue := m.Statistics
if swag.IsZero(statisticsFlagValue) {
statisticsFlagValue = &models.Statistics{}
}
err, statisticsAdded := retrieveModelStatisticsFlags(depth+1, statisticsFlagValue, statisticsFlagName, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || statisticsAdded
if statisticsAdded {
m.Statistics = statisticsFlagValue
}
return nil, retAdded
}
func retrieveStationTimestampFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
timestampFlagName := fmt.Sprintf("%v.timestamp", cmdPrefix)
if cmd.Flags().Changed(timestampFlagName) {
var timestampFlagName string
if cmdPrefix == "" {
timestampFlagName = "timestamp"
} else {
timestampFlagName = fmt.Sprintf("%v.timestamp", cmdPrefix)
}
timestampFlagValueStr, err := cmd.Flags().GetString(timestampFlagName)
if err != nil {
return err, false
}
var timestampFlagValue strfmt.DateTime
if err := timestampFlagValue.UnmarshalText([]byte(timestampFlagValueStr)); err != nil {
return err, false
}
m.Timestamp = timestampFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationTxBytesFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
txBytesFlagName := fmt.Sprintf("%v.txBytes", cmdPrefix)
if cmd.Flags().Changed(txBytesFlagName) {
var txBytesFlagName string
if cmdPrefix == "" {
txBytesFlagName = "txBytes"
} else {
txBytesFlagName = fmt.Sprintf("%v.txBytes", cmdPrefix)
}
txBytesFlagValue, err := cmd.Flags().GetInt64(txBytesFlagName)
if err != nil {
return err, false
}
m.TxBytes = txBytesFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationTxChainFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
txChainFlagName := fmt.Sprintf("%v.txChain", cmdPrefix)
if cmd.Flags().Changed(txChainFlagName) {
// warning: txChain array type TxChain is not supported by go-swagger cli yet
}
return nil, retAdded
}
func retrieveStationTxChainMaskFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
txChainMaskFlagName := fmt.Sprintf("%v.txChainMask", cmdPrefix)
if cmd.Flags().Changed(txChainMaskFlagName) {
var txChainMaskFlagName string
if cmdPrefix == "" {
txChainMaskFlagName = "txChainMask"
} else {
txChainMaskFlagName = fmt.Sprintf("%v.txChainMask", cmdPrefix)
}
txChainMaskFlagValue, err := cmd.Flags().GetInt64(txChainMaskFlagName)
if err != nil {
return err, false
}
m.TxChainMask = &txChainMaskFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationTxModulationFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
txModulationFlagName := fmt.Sprintf("%v.txModulation", cmdPrefix)
if cmd.Flags().Changed(txModulationFlagName) {
var txModulationFlagName string
if cmdPrefix == "" {
txModulationFlagName = "txModulation"
} else {
txModulationFlagName = fmt.Sprintf("%v.txModulation", cmdPrefix)
}
txModulationFlagValue, err := cmd.Flags().GetString(txModulationFlagName)
if err != nil {
return err, false
}
m.TxModulation = txModulationFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationTxRateFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
txRateFlagName := fmt.Sprintf("%v.txRate", cmdPrefix)
if cmd.Flags().Changed(txRateFlagName) {
var txRateFlagName string
if cmdPrefix == "" {
txRateFlagName = "txRate"
} else {
txRateFlagName = fmt.Sprintf("%v.txRate", cmdPrefix)
}
txRateFlagValue, err := cmd.Flags().GetInt64(txRateFlagName)
if err != nil {
return err, false
}
m.TxRate = txRateFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationTxSignalFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
txSignalFlagName := fmt.Sprintf("%v.txSignal", cmdPrefix)
if cmd.Flags().Changed(txSignalFlagName) {
var txSignalFlagName string
if cmdPrefix == "" {
txSignalFlagName = "txSignal"
} else {
txSignalFlagName = fmt.Sprintf("%v.txSignal", cmdPrefix)
}
txSignalFlagValue, err := cmd.Flags().GetInt64(txSignalFlagName)
if err != nil {
return err, false
}
m.TxSignal = txSignalFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationUplinkCapacityFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
uplinkCapacityFlagName := fmt.Sprintf("%v.uplinkCapacity", cmdPrefix)
if cmd.Flags().Changed(uplinkCapacityFlagName) {
var uplinkCapacityFlagName string
if cmdPrefix == "" {
uplinkCapacityFlagName = "uplinkCapacity"
} else {
uplinkCapacityFlagName = fmt.Sprintf("%v.uplinkCapacity", cmdPrefix)
}
uplinkCapacityFlagValue, err := cmd.Flags().GetInt64(uplinkCapacityFlagName)
if err != nil {
return err, false
}
m.UplinkCapacity = uplinkCapacityFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationUptimeFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
uptimeFlagName := fmt.Sprintf("%v.uptime", cmdPrefix)
if cmd.Flags().Changed(uptimeFlagName) {
var uptimeFlagName string
if cmdPrefix == "" {
uptimeFlagName = "uptime"
} else {
uptimeFlagName = fmt.Sprintf("%v.uptime", cmdPrefix)
}
uptimeFlagValue, err := cmd.Flags().GetInt64(uptimeFlagName)
if err != nil {
return err, false
}
m.Uptime = uptimeFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveStationVendorFlags(depth int, m *models.Station, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
vendorFlagName := fmt.Sprintf("%v.vendor", cmdPrefix)
if cmd.Flags().Changed(vendorFlagName) {
var vendorFlagName string
if cmdPrefix == "" {
vendorFlagName = "vendor"
} else {
vendorFlagName = fmt.Sprintf("%v.vendor", cmdPrefix)
}
vendorFlagValue, err := cmd.Flags().GetString(vendorFlagName)
if err != nil {
return err, false
}
m.Vendor = vendorFlagValue
retAdded = true
}
return nil, retAdded
}
| retrieveStationDownlinkCapacityFlags |
online_learner.py | # Copyright (c) 2019 Georgia Tech Robot Learning Lab
# Licensed under the MIT License.
from abc import ABC, abstractmethod
class OnlineLearner(ABC):
""" An abstract interface of iterative algorithms. """ | @abstractmethod
def update(self, *args, **kwargs):
""" Update the state given feedback. """
@property
@abstractmethod
def decision(self):
""" Return the (stochastic) decision. """ | |
index.ts | import { createParser, comparePathParserScore } from '@egoist/path-parser'
import type { PathParser } from '@egoist/path-parser'
export type HTTPMethod =
| 'ACL'
| 'BIND'
| 'CHECKOUT'
| 'CONNECT'
| 'COPY'
| 'DELETE'
| 'GET'
| 'HEAD'
| 'LINK'
| 'LOCK'
| 'M-SEARCH'
| 'MERGE'
| 'MKACTIVITY'
| 'MKCALENDAR'
| 'MKCOL'
| 'MOVE'
| 'NOTIFY'
| 'OPTIONS'
| 'PATCH'
| 'POST'
| 'PROPFIND'
| 'PROPPATCH'
| 'PURGE'
| 'PUT'
| 'REBIND'
| 'REPORT'
| 'SEARCH'
| 'SOURCE'
| 'SUBSCRIBE'
| 'TRACE'
| 'UNBIND'
| 'UNLINK'
| 'UNLOCK'
| 'UNSUBSCRIBE'
export interface Route<THandler> {
parser: PathParser
method: string
handlers: THandler[]
}
export { createParser, comparePathParserScore }
const normalizeRoutePath = (path: string) => {
return path.replace('/*', '/:wild(.*)')
}
export type Options = {
/** Sort routes by specificity */
sortRoutes?: boolean |
export class Router<THandler = any> {
routes: Route<THandler>[]
constructor(private opts: Options = {}) {
this.routes = []
}
all = this.add.bind(this, '')
get = this.add.bind(this, 'GET')
head = this.add.bind(this, 'HEAD')
patch = this.add.bind(this, 'PATCH')
options = this.add.bind(this, 'OPTIONS')
connect = this.add.bind(this, 'CONNECT')
delete = this.add.bind(this, 'DELETE')
trace = this.add.bind(this, 'TRACE')
post = this.add.bind(this, 'POST')
put = this.add.bind(this, 'PUT')
use(path: string, ...handlers: THandler[]) {
const parser = createParser(normalizeRoutePath(path))
this.routes.push({ parser, method: '', handlers })
this.sortRoutes()
return this
}
add(method: HTTPMethod | '', path: string, ...handlers: THandler[]) {
const parser = createParser(normalizeRoutePath(path))
this.routes.push({ parser, method, handlers })
this.sortRoutes()
return this
}
sortRoutes() {
if (!this.opts.sortRoutes) return
this.routes = this.routes.sort((a, b) =>
comparePathParserScore(a.parser, b.parser),
)
}
find(
method: HTTPMethod,
url: string,
{ exitOnFirstMatch }: { exitOnFirstMatch?: boolean } = {},
) {
const isHEAD = method === 'HEAD'
const arr = this.routes
const routes: {
handler: THandler
params: Record<string, string | string[]>
}[] = []
for (let i = 0; i < arr.length; i++) {
const tmp = arr[i]
if (
tmp.method.length === 0 ||
tmp.method === method ||
(isHEAD && tmp.method === 'GET')
) {
const match = tmp.parser.parse(url)
if (match) {
for (const handler of tmp.handlers) {
routes.push({ params: match, handler })
}
if (exitOnFirstMatch) {
break
}
}
}
}
return routes
}
} | } |
kube_handler.go | /*
Copyright (c) 2019 Bitnami
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/kubeapps/kubeapps/cmd/apprepository-controller/pkg/apis/apprepository/v1alpha1"
apprepoclientset "github.com/kubeapps/kubeapps/cmd/apprepository-controller/pkg/client/clientset/versioned"
v1alpha1typed "github.com/kubeapps/kubeapps/cmd/apprepository-controller/pkg/client/clientset/versioned/typed/apprepository/v1alpha1"
log "github.com/sirupsen/logrus"
authorizationapi "k8s.io/api/authorization/v1"
corev1 "k8s.io/api/core/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
corev1typed "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
// combinedClientsetInterface provides both the app repository clientset and the corev1 clientset.
type combinedClientsetInterface interface {
KubeappsV1alpha1() v1alpha1typed.KubeappsV1alpha1Interface
CoreV1() corev1typed.CoreV1Interface
AuthorizationV1() authorizationv1.AuthorizationV1Interface
RestClient() rest.Interface
}
// Need to use a type alias to embed the two Clientset's without a name clash.
type kubeClientsetAlias = apprepoclientset.Clientset
type combinedClientset struct {
*kubeClientsetAlias
*kubernetes.Clientset
restCli rest.Interface
}
func (c *combinedClientset) RestClient() rest.Interface {
return c.restCli
}
// kubeHandler handles http requests for operating on app repositories and k8s resources
// in Kubeapps, without exposing implementation details to 3rd party integrations.
type kubeHandler struct {
// The config set internally here cannot be used on its own as a valid
// token is required. Call-sites use configForToken to obtain a valid
// config with a specific token.
config rest.Config
// The namespace in which (currently) app repositories are created.
kubeappsNamespace string
// clientset using the pod serviceaccount
svcClientset combinedClientsetInterface
// clientsetForConfig is a field on the struct only so it can be switched
// for a fake version when testing. NewAppRepositoryhandler sets it to the
// proper function below so that production code always has the real
// version (and since this is a private struct, external code cannot change
// the function).
clientsetForConfig func(*rest.Config) (combinedClientsetInterface, error)
}
// userHandler is an extension of kubeHandler for a specific service account
type userHandler struct {
// The namespace in which (currently) app repositories are created.
kubeappsNamespace string
// clientset using the pod serviceaccount
svcClientset combinedClientsetInterface
// clientset for the given serviceccount
clientset combinedClientsetInterface
}
// ValidationResponse represents the response after validating a repo
type ValidationResponse struct {
Code int `json:"code"`
Message string `json:"message"`
}
// This interface is explicitly private so that it cannot be used in function
// args, so that call-sites cannot accidentally pass a service handler in place
// of a user handler.
// TODO(mnelson): We could instead just create a UserHandler interface which embeds
// this one and adds one method, to force call-sites to explicitly use a UserHandler
// or ServiceHandler.
type handler interface {
CreateAppRepository(appRepoBody io.ReadCloser, requestNamespace string) (*v1alpha1.AppRepository, error)
UpdateAppRepository(appRepoBody io.ReadCloser, requestNamespace string) (*v1alpha1.AppRepository, error)
DeleteAppRepository(name, namespace string) error
GetNamespaces() ([]corev1.Namespace, error)
GetSecret(name, namespace string) (*corev1.Secret, error)
GetAppRepository(repoName, repoNamespace string) (*v1alpha1.AppRepository, error)
ValidateAppRepository(appRepoBody io.ReadCloser, requestNamespace string) (*ValidationResponse, error)
GetOperatorLogo(namespace, name string) ([]byte, error)
}
// AuthHandler exposes Handler functionality as a user or the current serviceaccount
type AuthHandler interface {
AsUser(token string) handler
AsSVC() handler
}
func (a *kubeHandler) AsUser(token string) handler {
clientset, err := a.clientsetForConfig(a.configForToken(token))
if err != nil {
log.Errorf("unable to create clientset: %v", err)
}
return &userHandler{
kubeappsNamespace: a.kubeappsNamespace,
svcClientset: a.svcClientset,
clientset: clientset,
}
}
func (a *kubeHandler) AsSVC() handler {
return &userHandler{
kubeappsNamespace: a.kubeappsNamespace,
svcClientset: a.svcClientset,
clientset: a.svcClientset,
}
}
// appRepositoryRequest is used to parse the JSON request
type appRepositoryRequest struct {
AppRepository appRepositoryRequestDetails `json:"appRepository"`
}
type appRepositoryRequestDetails struct {
Name string `json:"name"`
RepoURL string `json:"repoURL"`
AuthHeader string `json:"authHeader"`
CustomCA string `json:"customCA"`
RegistrySecrets []string `json:"registrySecrets"`
SyncJobPodTemplate corev1.PodTemplateSpec `json:"syncJobPodTemplate"`
ResyncRequests uint `json:"resyncRequests"`
}
// ErrGlobalRepositoryWithSecrets defines the error returned when an attempt is
// made to create registry secrets for a global repo.
var ErrGlobalRepositoryWithSecrets = fmt.Errorf("docker registry secrets cannot be set for app repositories available in all namespaces")
// NewHandler returns an AppRepositories and Kubernetes handler configured with
// the in-cluster config but overriding the token with an empty string, so that
// configForToken must be called to obtain a valid config.
func NewHandler(kubeappsNamespace string) (AuthHandler, error) {
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(),
&clientcmd.ConfigOverrides{
AuthInfo: clientcmdapi.AuthInfo{
// These three override their respective file or string
// data.
ClientCertificateData: []byte{},
ClientKeyData: []byte{},
// A non empty value is required to override, it seems.
TokenFile: " ",
},
},
)
config, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
svcRestConfig, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
svcKubeClient, err := kubernetes.NewForConfig(svcRestConfig)
if err != nil {
return nil, err
}
svcAppRepoClient, err := apprepoclientset.NewForConfig(svcRestConfig)
if err != nil {
return nil, err
}
return &kubeHandler{
config: *config,
kubeappsNamespace: kubeappsNamespace,
// See comment in the struct defn above.
clientsetForConfig: clientsetForConfig,
svcClientset: &combinedClientset{svcAppRepoClient, svcKubeClient, svcKubeClient.RESTClient()},
}, nil
}
// clientsetForConfig returns a clientset using the provided config.
func clientsetForConfig(config *rest.Config) (combinedClientsetInterface, error) {
arclientset, err := apprepoclientset.NewForConfig(config)
if err != nil {
return nil, err
}
coreclientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return &combinedClientset{arclientset, coreclientset, coreclientset.RESTClient()}, nil
}
// configForToken returns a new config for a given auth token.
func (a *kubeHandler) configForToken(token string) *rest.Config {
configCopy := a.config
configCopy.BearerToken = token
return &configCopy
}
func (a *kubeHandler) clientsetForRequest(token string) (combinedClientsetInterface, error) {
clientset, err := a.clientsetForConfig(a.configForToken(token))
if err != nil {
log.Errorf("unable to create clientset: %v", err)
}
return clientset, err
}
func parseRepoRequest(appRepoBody io.ReadCloser) (*appRepositoryRequest, error) {
var appRepoRequest appRepositoryRequest
err := json.NewDecoder(appRepoBody).Decode(&appRepoRequest)
if err != nil {
log.Infof("unable to decode: %v", err)
return nil, err
}
return &appRepoRequest, nil
}
func (a *userHandler) applyAppRepositorySecret(repoSecret *corev1.Secret, requestNamespace string, appRepo *v1alpha1.AppRepository) error {
// TODO: pass request context through from user request to clientset.
_, err := a.clientset.CoreV1().Secrets(requestNamespace).Create(context.TODO(), repoSecret, metav1.CreateOptions{})
if err != nil && k8sErrors.IsAlreadyExists(err) {
_, err = a.clientset.CoreV1().Secrets(requestNamespace).Update(context.TODO(), repoSecret, metav1.UpdateOptions{})
}
if err != nil {
return err
}
// TODO(#1647): Move app repo sync to namespaces so secret copy not required.
if requestNamespace != a.kubeappsNamespace {
repoSecret.ObjectMeta.Name = KubeappsSecretNameForRepo(appRepo.ObjectMeta.Name, appRepo.ObjectMeta.Namespace)
repoSecret.ObjectMeta.OwnerReferences = nil
_, err = a.svcClientset.CoreV1().Secrets(a.kubeappsNamespace).Create(context.TODO(), repoSecret, metav1.CreateOptions{})
if err != nil && k8sErrors.IsAlreadyExists(err) {
_, err = a.clientset.CoreV1().Secrets(a.kubeappsNamespace).Update(context.TODO(), repoSecret, metav1.UpdateOptions{})
}
if err != nil {
return err
}
}
return nil
}
// CreateAppRepository creates an AppRepository resource based on the request data
func (a *userHandler) CreateAppRepository(appRepoBody io.ReadCloser, requestNamespace string) (*v1alpha1.AppRepository, error) {
if a.kubeappsNamespace == "" {
log.Errorf("attempt to use app repositories handler without kubeappsNamespace configured")
return nil, fmt.Errorf("kubeappsNamespace must be configured to enable app repository handler")
}
appRepoRequest, err := parseRepoRequest(appRepoBody)
if err != nil {
return nil, err
}
appRepo := appRepositoryForRequest(appRepoRequest)
if err != nil {
return nil, err
}
if len(appRepo.Spec.DockerRegistrySecrets) > 0 && requestNamespace == a.kubeappsNamespace {
return nil, ErrGlobalRepositoryWithSecrets
}
appRepo, err = a.clientset.KubeappsV1alpha1().AppRepositories(requestNamespace).Create(context.TODO(), appRepo, metav1.CreateOptions{})
if err != nil {
return nil, err
}
repoSecret := secretForRequest(appRepoRequest, appRepo)
if repoSecret != nil {
a.applyAppRepositorySecret(repoSecret, requestNamespace, appRepo)
if err != nil {
return nil, err
}
}
return appRepo, nil
}
// UpdateAppRepository updates an AppRepository resource based on the request data
func (a *userHandler) UpdateAppRepository(appRepoBody io.ReadCloser, requestNamespace string) (*v1alpha1.AppRepository, error) {
if a.kubeappsNamespace == "" {
log.Errorf("attempt to use app repositories handler without kubeappsNamespace configured")
return nil, fmt.Errorf("kubeappsNamespace must be configured to enable app repository handler")
}
appRepoRequest, err := parseRepoRequest(appRepoBody)
if err != nil {
return nil, err
}
appRepo := appRepositoryForRequest(appRepoRequest)
if err != nil {
return nil, err
}
if len(appRepo.Spec.DockerRegistrySecrets) > 0 && requestNamespace == a.kubeappsNamespace {
return nil, ErrGlobalRepositoryWithSecrets
}
existingAppRepo, err := a.clientset.KubeappsV1alpha1().AppRepositories(requestNamespace).Get(context.TODO(), appRepo.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// Update existing repo with the new spec
existingAppRepo.Spec = appRepo.Spec
appRepo, err = a.clientset.KubeappsV1alpha1().AppRepositories(requestNamespace).Update(context.TODO(), existingAppRepo, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
repoSecret := secretForRequest(appRepoRequest, appRepo)
if repoSecret != nil {
a.applyAppRepositorySecret(repoSecret, requestNamespace, appRepo)
if err != nil {
return nil, err
}
}
return appRepo, nil
}
// DeleteAppRepository deletes an AppRepository resource from a namespace.
func (a *userHandler) DeleteAppRepository(repoName, repoNamespace string) error {
appRepo, err := a.clientset.KubeappsV1alpha1().AppRepositories(repoNamespace).Get(context.TODO(), repoName, metav1.GetOptions{})
if err != nil {
return err
}
hasCredentials := appRepo.Spec.Auth.Header != nil || appRepo.Spec.Auth.CustomCA != nil
err = a.clientset.KubeappsV1alpha1().AppRepositories(repoNamespace).Delete(context.TODO(), repoName, metav1.DeleteOptions{})
if err != nil {
return err
}
// If the app repo was in a namespace other than the kubeapps one, we also delete the copy of
// the repository credentials kept in the kubeapps namespace (the repo credentials in the actual
// namespace should be deleted when the owning app repo is deleted).
if hasCredentials && repoNamespace != a.kubeappsNamespace {
err = a.clientset.CoreV1().Secrets(a.kubeappsNamespace).Delete(context.TODO(), KubeappsSecretNameForRepo(repoName, repoNamespace), metav1.DeleteOptions{})
}
return err
}
func getValidationCliAndReq(appRepoBody io.ReadCloser, requestNamespace, kubeappsNamespace string) (HTTPClient, *http.Request, error) {
appRepoRequest, err := parseRepoRequest(appRepoBody)
if err != nil {
return nil, nil, err
}
appRepo := appRepositoryForRequest(appRepoRequest)
if err != nil {
return nil, nil, err
}
if len(appRepo.Spec.DockerRegistrySecrets) > 0 && requestNamespace == kubeappsNamespace {
// TODO(mnelson): we may also want to validate that any docker registry secrets listed
// already exist in the namespace.
return nil, nil, ErrGlobalRepositoryWithSecrets
}
repoSecret := secretForRequest(appRepoRequest, appRepo)
cli, err := InitNetClient(appRepo, repoSecret, repoSecret, nil)
if err != nil {
return nil, nil, fmt.Errorf("Unable to create HTTP client: %w", err)
}
indexURL := strings.TrimSuffix(strings.TrimSpace(appRepo.Spec.URL), "/") + "/index.yaml"
req, err := http.NewRequest("GET", indexURL, nil)
if err != nil {
return nil, nil, err
}
return cli, req, nil
}
func doValidationRequest(cli HTTPClient, req *http.Request) (*ValidationResponse, error) {
res, err := cli.Do(req)
if err != nil {
// If the request fail, it's not an internal error
return &ValidationResponse{Code: 400, Message: err.Error()}, nil
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("Unable to parse validation response. Got: %v", err)
}
return &ValidationResponse{Code: res.StatusCode, Message: string(body)}, nil
}
func (a *userHandler) ValidateAppRepository(appRepoBody io.ReadCloser, requestNamespace string) (*ValidationResponse, error) {
// Split body parsing to a different function for ease testing
cli, req, err := getValidationCliAndReq(appRepoBody, requestNamespace, a.kubeappsNamespace)
if err != nil {
return nil, err
}
return doValidationRequest(cli, req)
}
// GetAppRepository returns an AppRepository resource from a namespace.
// Optionally set a token to get the AppRepository using a custom serviceaccount
func (a *userHandler) GetAppRepository(repoName, repoNamespace string) (*v1alpha1.AppRepository, error) {
return a.clientset.KubeappsV1alpha1().AppRepositories(repoNamespace).Get(context.TODO(), repoName, metav1.GetOptions{})
}
// appRepositoryForRequest takes care of parsing the request data into an AppRepository.
func appRepositoryForRequest(appRepoRequest *appRepositoryRequest) *v1alpha1.AppRepository {
appRepo := appRepoRequest.AppRepository
var auth v1alpha1.AppRepositoryAuth
if appRepo.AuthHeader != "" || appRepo.CustomCA != "" {
secretName := secretNameForRepo(appRepo.Name)
if appRepo.AuthHeader != "" {
auth.Header = &v1alpha1.AppRepositoryAuthHeader{
SecretKeyRef: corev1.SecretKeySelector{
Key: "authorizationHeader",
LocalObjectReference: corev1.LocalObjectReference{
Name: secretName,
},
},
}
}
if appRepo.CustomCA != "" {
auth.CustomCA = &v1alpha1.AppRepositoryCustomCA{
SecretKeyRef: corev1.SecretKeySelector{
Key: "ca.crt",
LocalObjectReference: corev1.LocalObjectReference{
Name: secretName,
},
},
}
}
}
return &v1alpha1.AppRepository{
ObjectMeta: metav1.ObjectMeta{
Name: appRepo.Name,
},
Spec: v1alpha1.AppRepositorySpec{
URL: appRepo.RepoURL,
Type: "helm",
Auth: auth,
DockerRegistrySecrets: appRepo.RegistrySecrets,
SyncJobPodTemplate: appRepo.SyncJobPodTemplate,
ResyncRequests: appRepo.ResyncRequests,
},
}
}
// secretForRequest takes care of parsing the request data into a secret for an AppRepository.
func secretForRequest(appRepoRequest *appRepositoryRequest, appRepo *v1alpha1.AppRepository) *corev1.Secret {
appRepoDetails := appRepoRequest.AppRepository
secrets := map[string]string{}
if appRepoDetails.AuthHeader != "" {
secrets["authorizationHeader"] = appRepoDetails.AuthHeader
}
if appRepoDetails.CustomCA != "" {
secrets["ca.crt"] = appRepoDetails.CustomCA
}
if len(secrets) == 0 {
return nil
}
blockOwnerDeletion := true
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretNameForRepo(appRepo.Name),
OwnerReferences: []metav1.OwnerReference{
metav1.OwnerReference{
APIVersion: "kubeapps.com/v1alpha1",
Kind: "AppRepository",
Name: appRepo.ObjectMeta.Name,
UID: appRepo.ObjectMeta.UID,
BlockOwnerDeletion: &blockOwnerDeletion,
},
},
},
StringData: secrets,
}
}
func secretNameForRepo(repoName string) string {
return fmt.Sprintf("apprepo-%s", repoName)
}
// KubeappsSecretNameForRepo returns a name suitable for recording a copy of
// a per-namespace repository secret in the kubeapps namespace.
func KubeappsSecretNameForRepo(repoName, namespace string) string {
return fmt.Sprintf("%s-%s", namespace, secretNameForRepo(repoName))
}
func | (userClientset combinedClientsetInterface, namespaces *corev1.NamespaceList) ([]corev1.Namespace, error) {
allowedNamespaces := []corev1.Namespace{}
for _, namespace := range namespaces.Items {
res, err := userClientset.AuthorizationV1().SelfSubjectAccessReviews().Create(context.TODO(), &authorizationapi.SelfSubjectAccessReview{
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Group: "",
Resource: "secrets",
Verb: "get",
Namespace: namespace.Name,
},
},
}, metav1.CreateOptions{})
if err != nil {
return nil, err
}
if res.Status.Allowed {
allowedNamespaces = append(allowedNamespaces, namespace)
}
}
return allowedNamespaces, nil
}
// GetNamespaces return the list of namespaces that the user has permission to access
func (a *userHandler) GetNamespaces() ([]corev1.Namespace, error) {
// Try to list namespaces with the user token, for backward compatibility
namespaces, err := a.clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
if k8sErrors.IsForbidden(err) {
// The user doesn't have permissions to list namespaces, use the current serviceaccount
namespaces, err = a.svcClientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
}
if err != nil {
return nil, err
}
}
allowedNamespaces, err := filterAllowedNamespaces(a.clientset, namespaces)
if err != nil {
return nil, err
}
return allowedNamespaces, nil
}
// GetSecret return the a secret from a namespace using a token if given
func (a *userHandler) GetSecret(name, namespace string) (*corev1.Secret, error) {
return a.clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
// GetNamespaces return the list of namespaces that the user has permission to access
func (a *userHandler) GetOperatorLogo(namespace, name string) ([]byte, error) {
return a.clientset.RestClient().Get().AbsPath(fmt.Sprintf("/apis/packages.operators.coreos.com/v1/namespaces/%s/packagemanifests/%s/icon", namespace, name)).Do(context.TODO()).Raw()
}
| filterAllowedNamespaces |
alert_request_conditions.rs | /* | *
* Client Portal Web API
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRequestConditions {
/// Types: 1-Price, 3-Time, 4-Margin, 5-Trade, 6-Volume, 7: MTA market 8: MTA Position, 9: MTA Acc. Daily PN&
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub _type: Option<i32>,
/// format, conid@exchange
#[serde(rename = "conidex", skip_serializing_if = "Option::is_none")]
pub conidex: Option<String>,
/// optional, operator for the current condition, can be >= or <=
#[serde(rename = "operator", skip_serializing_if = "Option::is_none")]
pub operator: Option<String>,
/// optional, only some type of conditions have triggerMethod
#[serde(rename = "triggerMethod", skip_serializing_if = "Option::is_none")]
pub trigger_method: Option<String>,
/// can not be empty, can pass default value \"*\"
#[serde(rename = "value", skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
/// \"a\" means \"AND\", \"o\" means \"OR\", \"n\" means \"END\", the last one condition in the condition array should \"n\"
#[serde(rename = "logicBind", skip_serializing_if = "Option::is_none")]
pub logic_bind: Option<String>,
/// only needed for some MTA alert condition
#[serde(rename = "timeZone", skip_serializing_if = "Option::is_none")]
pub time_zone: Option<String>,
}
impl AlertRequestConditions {
pub fn new() -> AlertRequestConditions {
AlertRequestConditions {
_type: None,
conidex: None,
operator: None,
trigger_method: None,
value: None,
logic_bind: None,
time_zone: None,
}
}
} | * Client Portal Web API |
test_run.py | #!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from datetime import datetime
import mock
import os
import pytest
import tempfile
import sys
from sacred.run import Run
from sacred.config.config_summary import ConfigSummary
from sacred.utils import (ObserverError, SacredInterrupt, TimeoutInterrupt,
apply_backspaces_and_linefeeds)
@pytest.fixture
def run():
config = {'a': 17, 'foo': {'bar': True, 'baz': False}, 'seed': 1234}
config_mod = ConfigSummary()
signature = mock.Mock()
signature.name = 'main_func'
main_func = mock.Mock(return_value=123, prefix='', signature=signature)
logger = mock.Mock()
observer = [mock.Mock(priority=10)]
return Run(config, config_mod, main_func, observer, logger, logger, {},
{}, [], [])
def test_run_attributes(run):
assert isinstance(run.config, dict)
assert isinstance(run.config_modifications, ConfigSummary)
assert isinstance(run.experiment_info, dict)
assert isinstance(run.host_info, dict)
assert isinstance(run.info, dict)
def test_run_state_attributes(run):
assert run.start_time is None
assert run.stop_time is None
assert run.captured_out == ''
assert run.result is None
def test_run_run(run):
assert run() == 123
assert (run.start_time - datetime.utcnow()).total_seconds() < 1
assert (run.stop_time - datetime.utcnow()).total_seconds() < 1
assert run.result == 123
assert run.captured_out == ''
def test_run_emits_events_if_successful(run):
run()
observer = run.observers[0]
assert observer.started_event.called
assert observer.heartbeat_event.called
assert observer.completed_event.called
assert not observer.interrupted_event.called
assert not observer.failed_event.called
@pytest.mark.parametrize('exception,status', [
(KeyboardInterrupt, 'INTERRUPTED'),
(SacredInterrupt, 'INTERRUPTED'),
(TimeoutInterrupt, 'TIMEOUT'),
])
def test_run_emits_events_if_interrupted(run, exception, status):
observer = run.observers[0]
run.main_function.side_effect = exception
with pytest.raises(exception):
run()
assert observer.started_event.called
assert observer.heartbeat_event.called
assert not observer.completed_event.called
assert observer.interrupted_event.called
observer.interrupted_event.assert_called_with(
interrupt_time=run.stop_time,
status=status)
assert not observer.failed_event.called
def test_run_emits_events_if_failed(run):
observer = run.observers[0]
run.main_function.side_effect = TypeError
with pytest.raises(TypeError):
run()
assert observer.started_event.called
assert observer.heartbeat_event.called
assert not observer.completed_event.called
assert not observer.interrupted_event.called
assert observer.failed_event.called
def test_run_started_event(run):
observer = run.observers[0]
run()
observer.started_event.assert_called_with(
command='main_func',
ex_info=run.experiment_info,
host_info=run.host_info,
start_time=run.start_time,
config=run.config,
meta_info={},
_id=None
)
def test_run_completed_event(run):
observer = run.observers[0]
run()
observer.completed_event.assert_called_with(
stop_time=run.stop_time,
result=run.result
)
def test_run_heartbeat_event(run):
observer = run.observers[0]
run.info['test'] = 321
run()
call_args, call_kwargs = observer.heartbeat_event.call_args_list[0]
assert call_kwargs['info'] == run.info
assert call_kwargs['captured_out'] == ""
assert (call_kwargs['beat_time'] - datetime.utcnow()).total_seconds() < 1
def test_run_artifact_event(run):
observer = run.observers[0]
handle, f_name = tempfile.mkstemp()
metadata = {'testkey': 42}
run.add_artifact(f_name, name='foobar', metadata=metadata)
observer.artifact_event.assert_called_with(filename=f_name, name='foobar', metadata=metadata)
os.close(handle)
os.remove(f_name)
def test_run_resource_event(run):
observer = run.observers[0]
handle, f_name = tempfile.mkstemp()
run.open_resource(f_name)
observer.resource_event.assert_called_with(filename=f_name)
os.close(handle)
os.remove(f_name)
def test_run_cannot_be_started_twice(run):
run()
with pytest.raises(RuntimeError):
run()
def test_run_observer_failure_on_startup_not_caught(run):
observer = run.observers[0]
observer.started_event.side_effect = ObserverError
with pytest.raises(ObserverError):
run()
def test_run_observer_error_in_heartbeat_is_caught(run):
observer = run.observers[0]
observer.heartbeat_event.side_effect = TypeError
run()
assert observer in run._failed_observers
assert observer.started_event.called
assert observer.heartbeat_event.called
assert observer.completed_event.called
def test_run_exception_in_completed_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.completed_event.side_effect = TypeError
run()
assert observer.completed_event.called
assert observer2.completed_event.called
def | (run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.interrupted_event.side_effect = TypeError
run.main_function.side_effect = KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
run()
assert observer.interrupted_event.called
assert observer2.interrupted_event.called
def test_run_exception_in_failed_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.failed_event.side_effect = TypeError
run.main_function.side_effect = AttributeError
with pytest.raises(AttributeError):
run()
assert observer.failed_event.called
assert observer2.failed_event.called
def test_unobserved_run_doesnt_emit(run):
observer = run.observers[0]
run.unobserved = True
run()
assert not observer.started_event.called
assert not observer.heartbeat_event.called
assert not observer.completed_event.called
assert not observer.interrupted_event.called
assert not observer.failed_event.called
def test_stdout_capturing_no(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "no"
with capsys.disabled():
run()
assert run.captured_out == ''
def test_stdout_capturing_sys(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "sys"
with capsys.disabled():
run()
assert run.captured_out == '0123456789'
# @pytest.mark.skipif(sys.platform.startswith('win'),
# reason="does not work on windows")
@pytest.mark.skip('Breaks randomly on test server')
def test_stdout_capturing_fd(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "fd"
with capsys.disabled():
run()
assert run.captured_out == '0123456789'
def test_captured_out_filter(run, capsys):
def print_mock_progress():
sys.stdout.write('progress 0')
sys.stdout.flush()
for i in range(10):
sys.stdout.write('\b')
sys.stdout.write(str(i))
sys.stdout.flush()
run.captured_out_filter = apply_backspaces_and_linefeeds
run.main_function.side_effect = print_mock_progress
run.capture_mode = "sys"
with capsys.disabled():
run()
sys.stdout.flush()
assert run.captured_out == 'progress 9'
| test_run_exception_in_interrupted_event_is_caught |
cli_utils.go | package got
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
)
// EnsureCoverage via report file generated from, for example:
// go test -coverprofile=coverage.out
// Return error if any file's coverage is less than min, min is a percentage value.
func | (path string, min float64) error {
tmp, _ := ioutil.TempFile("", "")
report := tmp.Name()
defer func() { _ = os.Remove(report) }()
_ = tmp.Close()
_, err := exec.Command("go", "tool", "cover", "-html", path, "-o", report).CombinedOutput()
if err != nil {
return err
}
list := parseReport(report)
rejected := []string{}
for _, c := range list {
if c.coverage < min {
rejected = append(rejected, fmt.Sprintf(" %s (%0.1f%%)", c.path, c.coverage))
}
}
if len(rejected) > 0 {
return fmt.Errorf(
"Test coverage for these files should be greater than %.2f%%:\n%s",
min,
strings.Join(rejected, "\n"),
)
}
return nil
}
type cov struct {
path string
coverage float64
}
var regCov = regexp.MustCompile(`<option value="file\d+">(.+) \((\d+\.\d+)%\)</option>`)
func parseReport(path string) []cov {
out, _ := ioutil.ReadFile(path)
ms := regCov.FindAllStringSubmatch(string(out), -1)
list := []cov{}
for _, m := range ms {
c, _ := strconv.ParseFloat(m[2], 32)
list = append(list, cov{m[1], c})
}
return list
}
| EnsureCoverage |
app.go | package app
import (
"encoding/json"
"github.com/tendermint/tendermint/libs/log"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/x/auth"
"github.com/cosmos/cosmos-sdk/x/bank"
"github.com/cosmos/cosmos-sdk/x/params"
"github.com/cosmos/cosmos-sdk/x/staking"
"github.com/hot3246624/TCPNetwork/x/tcp"
bam "github.com/cosmos/cosmos-sdk/baseapp"
sdk "github.com/cosmos/cosmos-sdk/types"
abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
tmtypes "github.com/tendermint/tendermint/types"
)
const (
appName = "tcp"
| type tcpApp struct {
*bam.BaseApp
cdc *codec.Codec
keyMain *sdk.KVStoreKey
keyAccount *sdk.KVStoreKey
keyTCP *sdk.KVStoreKey
keyFeeCollection *sdk.KVStoreKey
keyParams *sdk.KVStoreKey
tkeyParams *sdk.TransientStoreKey
accountKeeper auth.AccountKeeper
bankKeeper bank.Keeper
feeCollectionKeeper auth.FeeCollectionKeeper
paramsKeeper params.Keeper
tcpKeeper tcp.Keeper
}
// NewTCPApp is a constructor function for tcpApp
func NewTCPApp(logger log.Logger, db dbm.DB) *tcpApp {
// First define the top level codec that will be shared by the different modules
cdc := MakeCodec()
// BaseApp handles interactions with Tendermint through the ABCI protocol
bApp := bam.NewBaseApp(appName, logger, db, auth.DefaultTxDecoder(cdc))
// Here you initialize your application with the store keys it requires
var app = &tcpApp{
BaseApp: bApp,
cdc: cdc,
keyMain: sdk.NewKVStoreKey("main"),
keyAccount: sdk.NewKVStoreKey("acc"),
keyTCP: sdk.NewKVStoreKey("tcp"),
keyFeeCollection: sdk.NewKVStoreKey("fee_collection"),
keyParams: sdk.NewKVStoreKey("params"),
tkeyParams: sdk.NewTransientStoreKey("transient_params"),
}
// The ParamsKeeper handles parameter storage for the application
app.paramsKeeper = params.NewKeeper(app.cdc, app.keyParams, app.tkeyParams)
// The AccountKeeper handles address -> account lookups
app.accountKeeper = auth.NewAccountKeeper(
app.cdc,
app.keyAccount,
app.paramsKeeper.Subspace(auth.DefaultParamspace),
auth.ProtoBaseAccount,
)
// The BankKeeper allows you perform sdk.Coins interactions
app.bankKeeper = bank.NewBaseKeeper(
app.accountKeeper,
app.paramsKeeper.Subspace(bank.DefaultParamspace),
bank.DefaultCodespace,
)
// The FeeCollectionKeeper collects transaction fees and renders them to the fee distribution module
app.feeCollectionKeeper = auth.NewFeeCollectionKeeper(cdc, app.keyFeeCollection)
// The TCPKeeper is the Keeper from the module for this tutorial
// It handles interactions with the tcp
app.tcpKeeper = tcp.NewKeeper(
app.bankKeeper,
app.keyTCP,
app.cdc,
)
// The AnteHandler handles signature verification and transaction pre-processing
app.SetAnteHandler(auth.NewAnteHandler(app.accountKeeper, app.feeCollectionKeeper))
// The app.Router is the main transaction router where each module registers its routes
// Register the bank and tcp routes here
app.Router().
AddRoute("bank", bank.NewHandler(app.bankKeeper)).
AddRoute("tcp", tcp.NewHandler(app.tcpKeeper))
// The app.QueryRouter is the main query router where each module registers its routes
app.QueryRouter().
AddRoute("acc", auth.NewQuerier(app.accountKeeper))
//// The app.QueryRouter is the main query router where each module registers its routes
//app.QueryRouter().
// AddRoute("tcp", tcp.NewQuerier(app.tcpKeeper)).
// AddRoute("acc", auth.NewQuerier(app.accountKeeper))
// The initChainer handles translating the genesis.json file into initial state for the network
app.SetInitChainer(app.initChainer)
app.MountStores(
app.keyMain,
app.keyAccount,
app.keyTCP,
app.keyFeeCollection,
app.keyParams,
app.tkeyParams,
)
err := app.LoadLatestVersion(app.keyMain)
if err != nil {
cmn.Exit(err.Error())
}
return app
}
// GenesisState represents chain state at the start of the chain. Any initial state (account balances) are stored here.
type GenesisState struct {
AuthData auth.GenesisState `json:"auth"`
BankData bank.GenesisState `json:"bank"`
Accounts []*auth.BaseAccount `json:"accounts"`
}
func (app *tcpApp) initChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain {
stateJSON := req.AppStateBytes
genesisState := new(GenesisState)
err := app.cdc.UnmarshalJSON(stateJSON, genesisState)
if err != nil {
panic(err)
}
for _, acc := range genesisState.Accounts {
acc.AccountNumber = app.accountKeeper.GetNextAccountNumber(ctx)
app.accountKeeper.SetAccount(ctx, acc)
}
auth.InitGenesis(ctx, app.accountKeeper, app.feeCollectionKeeper, genesisState.AuthData)
bank.InitGenesis(ctx, app.bankKeeper, genesisState.BankData)
return abci.ResponseInitChain{}
}
// ExportAppStateAndValidators does the things
func (app *tcpApp) ExportAppStateAndValidators() (appState json.RawMessage, validators []tmtypes.GenesisValidator, err error) {
ctx := app.NewContext(true, abci.Header{})
accounts := []*auth.BaseAccount{}
appendAccountsFn := func(acc auth.Account) bool {
account := &auth.BaseAccount{
Address: acc.GetAddress(),
Coins: acc.GetCoins(),
}
accounts = append(accounts, account)
return false
}
app.accountKeeper.IterateAccounts(ctx, appendAccountsFn)
genState := GenesisState{
Accounts: accounts,
AuthData: auth.DefaultGenesisState(),
BankData: bank.DefaultGenesisState(),
}
appState, err = codec.MarshalJSONIndent(app.cdc, genState)
if err != nil {
return nil, nil, err
}
return appState, validators, err
}
// MakeCodec generates the necessary codecs for Amino
func MakeCodec() *codec.Codec {
var cdc = codec.New()
auth.RegisterCodec(cdc)
bank.RegisterCodec(cdc)
tcp.RegisterCodec(cdc)
staking.RegisterCodec(cdc)
sdk.RegisterCodec(cdc)
codec.RegisterCrypto(cdc)
return cdc
} | )
|
setting_v2.go | package api
import (
"github.com/teambition/gear"
"github.com/teambition/urbs-setting/src/tpl"
)
// AssignV2 ..
func (a *Setting) AssignV2(ctx *gear.Context) error {
req := tpl.ProductModuleSettingURL{}
if err := ctx.ParseURL(&req); err != nil {
return err
}
body := tpl.UsersGroupsBodyV2{}
if err := ctx.ParseBody(&body); err != nil {
return err
}
res, err := a.blls.Setting.Assign(ctx, req.Product, req.Module, req.Setting, body.Value, body.Users, body.Groups)
if err != nil |
return ctx.OkJSON(tpl.SettingReleaseInfoRes{Result: *res})
}
| {
return err
} |
dictionary.py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for IO functions
defined in :py:class:`monai.transforms.io.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
from pathlib import Path
from typing import Optional, Union
import numpy as np
from monai.config import DtypeLike, KeysCollection
from monai.data.image_reader import ImageReader
from monai.transforms.io.array import LoadImage, SaveImage
from monai.transforms.transform import MapTransform
from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, ensure_tuple, ensure_tuple_rep
__all__ = ["LoadImaged", "LoadImageD", "LoadImageDict", "SaveImaged", "SaveImageD", "SaveImageDict"]
class LoadImaged(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.LoadImage`,
It can load both image data and metadata. When loading a list of files in one key,
the arrays will be stacked and a new dimension will be added as the first dimension
In this case, the meta data of the first image will be used to represent the stacked result.
The affine transform of all the stacked images should be same.
The output metadata field will be created as ``meta_keys`` or ``key_{meta_key_postfix}``.
If reader is not specified, this class automatically chooses readers
based on the supported suffixes and in the following order:
- User-specified reader at runtime when calling this loader.
- User-specified reader in the constructor of `LoadImage`.
- Readers from the last to the first in the registered list.
- Current default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader),
(npz, npy -> NumpyReader), (others -> ITKReader).
Note:
- If `reader` is specified, the loader will attempt to use the specified readers and the default supported
readers. This might introduce overheads when handling the exceptions of trying the incompatible loaders.
In this case, it is therefore recommended to set the most appropriate reader as
the last item of the `reader` parameter.
See also:
- tutorial: https://github.com/Project-MONAI/tutorials/blob/master/modules/load_medical_images.ipynb
"""
def __init__(
self,
keys: KeysCollection,
reader: Optional[Union[ImageReader, str]] = None,
dtype: DtypeLike = np.float32,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
overwriting: bool = False,
image_only: bool = False,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
reader: register reader to load image file and meta data, if None, still can register readers
at runtime or use the default readers. If a string of reader name provided, will construct
a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader",
"PILReader", "ITKReader", "NumpyReader".
dtype: if not None convert the loaded image data to this data type.
meta_keys: explicitly indicate the key to store the corresponding meta data dictionary.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None, use `key_{postfix}` to store the metadata of the nifti image,
default is `meta_dict`. The meta data is a dictionary object.
For example, load nifti file for `image`, store the metadata into `image_meta_dict`.
overwriting: whether allow to overwrite existing meta data of same key.
default is False, which will raise exception if encountering existing key.
image_only: if True return dictionary containing just only the image volumes, otherwise return
dictionary containing image data array and header dict per input key.
allow_missing_keys: don't raise exception if key is missing.
args: additional parameters for reader if providing a reader name.
kwargs: additional parameters for reader if providing a reader name.
"""
super().__init__(keys, allow_missing_keys)
self._loader = LoadImage(reader, image_only, dtype, *args, **kwargs)
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self.overwriting = overwriting
def register(self, reader: ImageReader):
self._loader.register(reader)
def __call__(self, data, reader: Optional[ImageReader] = None):
"""
Raises:
KeyError: When not ``self.overwriting`` and key already exists in ``data``.
"""
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
data = self._loader(d[key], reader)
if self._loader.image_only:
if not isinstance(data, np.ndarray):
raise ValueError("loader must return a numpy array (because image_only=True was used).")
d[key] = data
else:
if not isinstance(data, (tuple, list)):
raise ValueError("loader must return a tuple or list (because image_only=False was used).")
d[key] = data[0]
if not isinstance(data[1], dict):
raise ValueError("metadata must be a dict.")
meta_key = meta_key or f"{key}_{meta_key_postfix}"
if meta_key in d and not self.overwriting:
raise KeyError(f"Meta data with key {meta_key} already exists and overwriting=False.")
d[meta_key] = data[1]
return d
class SaveImaged(MapTransform):
|
LoadImageD = LoadImageDict = LoadImaged
SaveImageD = SaveImageDict = SaveImaged
| """
Dictionary-based wrapper of :py:class:`monai.transforms.SaveImage`.
Note:
Image should be channel-first shape: [C,H,W,[D]].
If the data is a patch of big image, will append the patch index to filename.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None and `key_{postfix}` was used to store the metadata in `LoadImaged`.
need the key to extract metadata to save images, default is `meta_dict`.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, affine, original_shape, etc.
if no corresponding metadata, set to `None`.
output_dir: output image directory.
output_postfix: a string appended to all output file names, default to `trans`.
output_ext: output file extension name, available extensions: `.nii.gz`, `.nii`, `.png`.
resample: whether to resample before saving the data array.
if saving PNG format image, based on the `spatial_shape` from metadata.
if saving NIfTI format image, based on the `original_affine` from metadata.
mode: This option is used when ``resample = True``. Defaults to ``"nearest"``.
- NIfTI files {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
- PNG files {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
padding_mode: This option is used when ``resample = True``. Defaults to ``"border"``.
- NIfTI files {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
- PNG files
This option is ignored.
scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling
[0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.
it's used for PNG format only.
dtype: data type during resampling computation. Defaults to ``np.float64`` for best precision.
if None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
it's used for NIfTI format only.
output_dtype: data type for saving data. Defaults to ``np.float32``.
it's used for NIfTI format only.
allow_missing_keys: don't raise exception if key is missing.
squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel
has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and
then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false,
image will always be saved as (H,W,D,C).
it's used for NIfTI format only.
data_root_dir: if not empty, it specifies the beginning parts of the input file's
absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from
`data_root_dir` to preserve folder structure when saving in case there are files in different
folders with the same file names. for example:
input_file_name: /foo/bar/test1/image.nii,
output_postfix: seg
output_ext: nii.gz
output_dir: /output,
data_root_dir: /foo/bar,
output will be: /output/test1/image/image_seg.nii.gz
separate_folder: whether to save every file in a separate folder, for example: if input filename is
`image.nii`, postfix is `seg` and folder_path is `output`, if `True`, save as:
`output/image/image_seg.nii`, if `False`, save as `output/image_seg.nii`. default to `True`.
print_log: whether to print log about the saved file path, etc. default to `True`.
"""
def __init__(
self,
keys: KeysCollection,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
output_dir: Union[Path, str] = "./",
output_postfix: str = "trans",
output_ext: str = ".nii.gz",
resample: bool = True,
mode: Union[GridSampleMode, InterpolateMode, str] = "nearest",
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
scale: Optional[int] = None,
dtype: DtypeLike = np.float64,
output_dtype: DtypeLike = np.float32,
allow_missing_keys: bool = False,
squeeze_end_dims: bool = True,
data_root_dir: str = "",
separate_folder: bool = True,
print_log: bool = True,
) -> None:
super().__init__(keys, allow_missing_keys)
self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys))
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self._saver = SaveImage(
output_dir=output_dir,
output_postfix=output_postfix,
output_ext=output_ext,
resample=resample,
mode=mode,
padding_mode=padding_mode,
scale=scale,
dtype=dtype,
output_dtype=output_dtype,
squeeze_end_dims=squeeze_end_dims,
data_root_dir=data_root_dir,
separate_folder=separate_folder,
print_log=print_log,
)
def __call__(self, data):
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
if meta_key is None and meta_key_postfix is not None:
meta_key = f"{key}_{meta_key_postfix}"
meta_data = d[meta_key] if meta_key is not None else None
self._saver(img=d[key], meta_data=meta_data)
return d |
validate-example.js | /**
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"use strict";
const through = require('through2');
const gutil = require('gulp-util');
const PluginError = gutil.PluginError;
const os = require('os');
const fs = require('fs');
const cp = require('child_process');
const path = require('path');
/**
* Create an empty example.
*/
module.exports = function() {
return through.obj(function(file, encoding, callback) {
if (file.isNull()) {
return callback(null, file);
}
if (file.isStream()) {
this.emit('error', new PluginError('validate-example',
'Streams not supported!'));
} else if (file.isBuffer()) {
// skip over experiments which will fail validation
if (file.metadata &&
(file.metadata.experiment || file.metadata.skipValidation)) {
gutil.log('Validating ' + file.relative +
': ' + gutil.colors.yellow('IGNORED'));
return callback(null, file);
}
// write file to disk, invoke validator, capture output & cleanup
const inputFilename = path.basename(file.path);
const tmpFile = path.join(os.tmpdir(), inputFilename);
const self = this;
fs.writeFile(tmpFile, file.contents, encoding, function(err) {
if (err) {
return callback(err);
}
const child = cp.spawn(
path.join(__dirname, '../node_modules/.bin/amp-validator'),
['-o', 'json', inputFilename],
{cwd: os.tmpdir()}
);
let output = '';
let error = false;
let timeout = false;
child.stderr.on('data', function(data) {
output += data.toString();
if (output === 'undefined:1') {
timeout = true;
}
});
child.stdout.on('data', function(data) {
output += data.toString().trim(); | return self.emit('error', new PluginError('validate-example',
'Timeout occured while fetching AMP for validation. Try again'));
}
let printedOutput = '';
const parsedOutput = JSON.parse(output);
const exampleKey = 'http://localhost:30000/' + inputFilename;
if (parsedOutput[exampleKey].success) {
printedOutput = gutil.colors.green('PASSED');
} else {
const errorList = parsedOutput[exampleKey].errors;
printedOutput = gutil.colors.red('FAILED\n\n');
errorList.forEach(function(item) {
printedOutput += item.line + ': ' + item.reason + '\n';
});
}
gutil.log('Validating ' + file.relative + ': ' +
printedOutput);
if (!error) {
fs.unlink(tmpFile, function() {
if (parsedOutput[exampleKey].success) {
callback();
} else {
self.emit('error', new PluginError('validate-example',
'Example has failed AMP validation'));
}
});
}
});
child.on('error', function() {
error = true;
self.emit('error', new PluginError('validate-example',
'Error invoking amp-validate process'));
});
});
}
});
}; | });
child.on('exit', function() {
if (timeout) { |
explode.rs | use anyhow::{anyhow, Result};
use git_features::progress::{self, Progress};
use git_object::{owned, HashKind};
use git_odb::{loose, pack, Write};
use std::{fs, io::Read, path::Path};
#[derive(PartialEq, Debug)]
pub enum SafetyCheck {
SkipFileChecksumVerification,
SkipFileAndObjectChecksumVerification,
SkipFileAndObjectChecksumVerificationAndNoAbortOnDecodeError,
All,
}
impl Default for SafetyCheck {
fn default() -> Self {
SafetyCheck::All
}
}
impl SafetyCheck {
pub fn variants() -> &'static [&'static str] |
}
impl std::str::FromStr for SafetyCheck {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"skip-file-checksum" => SafetyCheck::SkipFileChecksumVerification,
"skip-file-and-object-checksum" => SafetyCheck::SkipFileAndObjectChecksumVerification,
"skip-file-and-object-checksum-and-no-abort-on-decode" => {
SafetyCheck::SkipFileAndObjectChecksumVerificationAndNoAbortOnDecodeError
}
"all" => SafetyCheck::All,
_ => return Err(format!("Unknown value for safety check: '{}'", s)),
})
}
}
impl From<SafetyCheck> for pack::index::traverse::SafetyCheck {
fn from(v: SafetyCheck) -> Self {
use pack::index::traverse::SafetyCheck::*;
match v {
SafetyCheck::All => All,
SafetyCheck::SkipFileChecksumVerification => SkipFileChecksumVerification,
SafetyCheck::SkipFileAndObjectChecksumVerification => SkipFileAndObjectChecksumVerification,
SafetyCheck::SkipFileAndObjectChecksumVerificationAndNoAbortOnDecodeError => {
SkipFileAndObjectChecksumVerificationAndNoAbortOnDecodeError
}
}
}
}
use quick_error::quick_error;
quick_error! {
#[derive(Debug)]
enum Error {
Io(err: std::io::Error) {
display("An IO error occurred while writing an object")
source(err)
from()
}
OdbWrite(err: loose::db::write::Error) {
display("An object could not be written to the database")
source(err)
from()
}
Write(err: Box<dyn std::error::Error + Send + Sync>, kind: git_object::Kind, id: owned::Id) {
display("Failed to write {} object {}", kind, id)
source(&**err)
}
Verify(err: loose::object::verify::Error) {
display("Object didn't verify after right after writing it")
source(err)
from()
}
ObjectEncodeMismatch(kind: git_object::Kind, actual: owned::Id, expected: owned::Id) {
display("{} object {} wasn't re-encoded without change - new hash is {}", kind, expected, actual)
}
WrittenFileMissing(id: owned::Id) {
display("The recently written file for loose object {} could not be found", id)
}
WrittenFileCorrupt(err: loose::db::locate::Error, id: owned::Id) {
display("The recently written file for loose object {} cold not be read", id)
source(err)
}
}
}
#[allow(clippy::large_enum_variant)]
enum OutputWriter {
Loose(loose::Db),
Sink(git_odb::Sink),
}
impl git_odb::Write for OutputWriter {
type Error = Error;
fn write_buf(&self, kind: git_object::Kind, from: &[u8], hash: HashKind) -> Result<owned::Id, Self::Error> {
match self {
OutputWriter::Loose(db) => db.write_buf(kind, from, hash).map_err(Into::into),
OutputWriter::Sink(db) => db.write_buf(kind, from, hash).map_err(Into::into),
}
}
fn write_stream(
&self,
kind: git_object::Kind,
size: u64,
from: impl Read,
hash: HashKind,
) -> Result<owned::Id, Self::Error> {
match self {
OutputWriter::Loose(db) => db.write_stream(kind, size, from, hash).map_err(Into::into),
OutputWriter::Sink(db) => db.write_stream(kind, size, from, hash).map_err(Into::into),
}
}
}
impl OutputWriter {
fn new(path: Option<impl AsRef<Path>>, compress: bool) -> Self {
match path {
Some(path) => OutputWriter::Loose(loose::Db::at(path.as_ref())),
None => OutputWriter::Sink(git_odb::sink().compress(compress)),
}
}
}
#[derive(Default)]
pub struct Context {
pub thread_limit: Option<usize>,
pub delete_pack: bool,
pub sink_compress: bool,
pub verify: bool,
}
pub fn pack_or_pack_index(
pack_path: impl AsRef<Path>,
object_path: Option<impl AsRef<Path>>,
check: SafetyCheck,
progress: Option<impl Progress>,
Context {
thread_limit,
delete_pack,
sink_compress,
verify,
}: Context,
) -> Result<()> {
use anyhow::Context;
let path = pack_path.as_ref();
let bundle = pack::Bundle::at(path).with_context(|| {
format!(
"Could not find .idx or .pack file from given file at '{}'",
path.display()
)
})?;
if !object_path.as_ref().map(|p| p.as_ref().is_dir()).unwrap_or(true) {
return Err(anyhow!(
"The object directory at '{}' is inaccessible",
object_path
.expect("path present if no directory on disk")
.as_ref()
.display()
));
}
let algorithm = object_path
.as_ref()
.map(|_| pack::index::traverse::Algorithm::Lookup)
.unwrap_or_else(|| {
if sink_compress {
pack::index::traverse::Algorithm::Lookup
} else {
pack::index::traverse::Algorithm::DeltaTreeLookup
}
});
let mut progress = bundle.index.traverse(
&bundle.pack,
progress,
{
let object_path = object_path.map(|p| p.as_ref().to_owned());
move || {
let out = OutputWriter::new(object_path.clone(), sink_compress);
let object_verifier = if verify {
object_path.as_ref().map(loose::Db::at)
} else {
None
};
move |object_kind, buf, index_entry, progress| {
let written_id = out
.write_buf(object_kind, buf, HashKind::Sha1)
.map_err(|err| Error::Write(Box::new(err) as Box<dyn std::error::Error + Send + Sync>, object_kind, index_entry.oid))?;
if written_id != index_entry.oid {
if let git_object::Kind::Tree = object_kind {
progress.info(format!("The tree in pack named {} was written as {} due to modes 100664 and 100640 rewritten as 100644.", index_entry.oid, written_id));
} else {
return Err(Error::ObjectEncodeMismatch(object_kind, index_entry.oid, written_id))
}
}
if let Some(verifier) = object_verifier.as_ref() {
let mut obj = verifier.locate(written_id.to_borrowed())
.ok_or_else(|| Error::WrittenFileMissing(written_id))?
.map_err(|err| Error::WrittenFileCorrupt(err, written_id))?;
obj.verify_checksum(written_id.to_borrowed())?;
}
Ok(())
}
}
},
pack::cache::DecodeEntryLRU::default,
pack::index::traverse::Options {
algorithm,
thread_limit,
check: check.into(),
},
).map(|(_,_,c)|progress::DoOrDiscard::from(c)).with_context(|| "Failed to explode the entire pack - some loose objects may have been created nonetheless")?;
let (index_path, data_path) = (bundle.index.path().to_owned(), bundle.pack.path().to_owned());
drop(bundle);
if delete_pack {
fs::remove_file(&index_path)
.and_then(|_| fs::remove_file(&data_path))
.with_context(|| {
format!(
"Failed to delete pack index file at '{} or data file at '{}'",
index_path.display(),
data_path.display()
)
})?;
progress.info(format!(
"Removed '{}' and '{}'",
index_path.display(),
data_path.display()
));
}
Ok(())
}
| {
&[
"all",
"skip-file-checksum",
"skip-file-and-object-checksum",
"skip-file-and-object-checksum-and-no-abort-on-decode",
]
} |
serializer.go | // Code generated by counterfeiter. DO NOT EDIT.
package fakes
import (
"io"
"code.cloudfoundry.org/lib/serial"
"sync"
)
type Serializer struct {
DecodeAllStub func(file io.ReadSeeker, outData interface{}) error
decodeAllMutex sync.RWMutex
decodeAllArgsForCall []struct {
file io.ReadSeeker
outData interface{}
}
decodeAllReturns struct {
result1 error
}
decodeAllReturnsOnCall map[int]struct {
result1 error
}
EncodeAndOverwriteStub func(file serial.OverwriteableFile, outData interface{}) error
encodeAndOverwriteMutex sync.RWMutex
encodeAndOverwriteArgsForCall []struct {
file serial.OverwriteableFile
outData interface{}
}
encodeAndOverwriteReturns struct {
result1 error
}
encodeAndOverwriteReturnsOnCall map[int]struct {
result1 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *Serializer) DecodeAll(file io.ReadSeeker, outData interface{}) error {
fake.decodeAllMutex.Lock()
ret, specificReturn := fake.decodeAllReturnsOnCall[len(fake.decodeAllArgsForCall)]
fake.decodeAllArgsForCall = append(fake.decodeAllArgsForCall, struct {
file io.ReadSeeker
outData interface{}
}{file, outData})
fake.recordInvocation("DecodeAll", []interface{}{file, outData})
fake.decodeAllMutex.Unlock()
if fake.DecodeAllStub != nil {
return fake.DecodeAllStub(file, outData)
}
if specificReturn {
return ret.result1
}
return fake.decodeAllReturns.result1
}
func (fake *Serializer) DecodeAllCallCount() int {
fake.decodeAllMutex.RLock()
defer fake.decodeAllMutex.RUnlock()
return len(fake.decodeAllArgsForCall)
}
func (fake *Serializer) DecodeAllArgsForCall(i int) (io.ReadSeeker, interface{}) {
fake.decodeAllMutex.RLock()
defer fake.decodeAllMutex.RUnlock()
return fake.decodeAllArgsForCall[i].file, fake.decodeAllArgsForCall[i].outData
}
func (fake *Serializer) DecodeAllReturns(result1 error) {
fake.DecodeAllStub = nil
fake.decodeAllReturns = struct {
result1 error
}{result1}
}
func (fake *Serializer) DecodeAllReturnsOnCall(i int, result1 error) {
fake.DecodeAllStub = nil
if fake.decodeAllReturnsOnCall == nil {
fake.decodeAllReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.decodeAllReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *Serializer) EncodeAndOverwrite(file serial.OverwriteableFile, outData interface{}) error {
fake.encodeAndOverwriteMutex.Lock()
ret, specificReturn := fake.encodeAndOverwriteReturnsOnCall[len(fake.encodeAndOverwriteArgsForCall)]
fake.encodeAndOverwriteArgsForCall = append(fake.encodeAndOverwriteArgsForCall, struct {
file serial.OverwriteableFile
outData interface{}
}{file, outData})
fake.recordInvocation("EncodeAndOverwrite", []interface{}{file, outData})
fake.encodeAndOverwriteMutex.Unlock()
if fake.EncodeAndOverwriteStub != nil {
return fake.EncodeAndOverwriteStub(file, outData)
}
if specificReturn {
return ret.result1
}
return fake.encodeAndOverwriteReturns.result1
}
func (fake *Serializer) EncodeAndOverwriteCallCount() int {
fake.encodeAndOverwriteMutex.RLock()
defer fake.encodeAndOverwriteMutex.RUnlock()
return len(fake.encodeAndOverwriteArgsForCall)
}
func (fake *Serializer) EncodeAndOverwriteArgsForCall(i int) (serial.OverwriteableFile, interface{}) {
fake.encodeAndOverwriteMutex.RLock()
defer fake.encodeAndOverwriteMutex.RUnlock()
return fake.encodeAndOverwriteArgsForCall[i].file, fake.encodeAndOverwriteArgsForCall[i].outData
}
func (fake *Serializer) EncodeAndOverwriteReturns(result1 error) {
fake.EncodeAndOverwriteStub = nil
fake.encodeAndOverwriteReturns = struct { | result1 error
}{result1}
}
func (fake *Serializer) EncodeAndOverwriteReturnsOnCall(i int, result1 error) {
fake.EncodeAndOverwriteStub = nil
if fake.encodeAndOverwriteReturnsOnCall == nil {
fake.encodeAndOverwriteReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.encodeAndOverwriteReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *Serializer) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.decodeAllMutex.RLock()
defer fake.decodeAllMutex.RUnlock()
fake.encodeAndOverwriteMutex.RLock()
defer fake.encodeAndOverwriteMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *Serializer) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ serial.Serializer = new(Serializer) | |
ejercicio8.py | def votoElecciones():
print("Como saber si puedes votar por tu edad")
mensaje =""
edadP=int(input("ingrese la edad que tiene:")) | if edadP>=18:
mensaje ="Usted esta apto para votar"
else:
mensaje ="Usted no cumple con la edadad minima y no esta apto para votar"
print(mensaje)
votoElecciones() | |
unnecessary_to_owned.rs | use super::implicit_clone::is_clone_like;
use super::unnecessary_iter_cloned::{self, is_into_iter};
use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::{get_associated_type, get_iterator_item_ty, implements_trait, is_copy, peel_mid_ty_refs};
use clippy_utils::{fn_def_id, get_parent_expr, is_diag_item_method, is_diag_trait_item};
use rustc_errors::Applicability;
use rustc_hir::{def_id::DefId, BorrowKind, Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::mir::Mutability;
use rustc_middle::ty::adjustment::{Adjust, Adjustment, OverloadedDeref};
use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
use rustc_middle::ty::{self, PredicateKind, ProjectionPredicate, TraitPredicate, Ty};
use rustc_span::{sym, Symbol};
use std::cmp::max;
use super::UNNECESSARY_TO_OWNED;
pub fn check<'tcx>(cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>, method_name: Symbol, args: &'tcx [Expr<'tcx>]) {
if_chain! {
if let Some(method_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id);
if let [receiver] = args;
then {
if is_cloned_or_copied(cx, method_name, method_def_id) {
unnecessary_iter_cloned::check(cx, expr, method_name, receiver);
} else if is_to_owned_like(cx, method_name, method_def_id) {
// At this point, we know the call is of a `to_owned`-like function. The functions
// `check_addr_of_expr` and `check_call_arg` determine whether the call is unnecessary
// based on its context, that is, whether it is a referent in an `AddrOf` expression, an
// argument in a `into_iter` call, or an argument in the call of some other function.
if check_addr_of_expr(cx, expr, method_name, method_def_id, receiver) {
return;
}
if check_into_iter_call_arg(cx, expr, method_name, receiver) {
return;
}
check_other_call_arg(cx, expr, method_name, receiver);
}
}
}
}
/// Checks whether `expr` is a referent in an `AddrOf` expression and, if so, determines whether its
/// call of a `to_owned`-like function is unnecessary.
#[allow(clippy::too_many_lines)]
fn check_addr_of_expr(
cx: &LateContext<'_>,
expr: &Expr<'_>,
method_name: Symbol,
method_def_id: DefId,
receiver: &Expr<'_>,
) -> bool {
if_chain! {
if let Some(parent) = get_parent_expr(cx, expr);
if let ExprKind::AddrOf(BorrowKind::Ref, Mutability::Not, _) = parent.kind;
let adjustments = cx.typeck_results().expr_adjustments(parent).iter().collect::<Vec<_>>();
if let Some(target_ty) = match adjustments[..]
{
// For matching uses of `Cow::from`
[
Adjustment {
kind: Adjust::Deref(None),
..
},
Adjustment {
kind: Adjust::Borrow(_),
target: target_ty,
},
]
// For matching uses of arrays
| [
Adjustment {
kind: Adjust::Deref(None),
..
},
Adjustment {
kind: Adjust::Borrow(_),
..
},
Adjustment {
kind: Adjust::Pointer(_),
target: target_ty,
},
]
// For matching everything else
| [
Adjustment {
kind: Adjust::Deref(None),
..
},
Adjustment {
kind: Adjust::Deref(Some(OverloadedDeref { .. })),
..
},
Adjustment {
kind: Adjust::Borrow(_),
target: target_ty,
},
] => Some(target_ty),
_ => None,
};
let receiver_ty = cx.typeck_results().expr_ty(receiver);
// Only flag cases where the receiver is copyable or the method is `Cow::into_owned`. This
// restriction is to ensure there is not overlap between `redundant_clone` and this lint.
if is_copy(cx, receiver_ty) || is_cow_into_owned(cx, method_name, method_def_id);
if let Some(receiver_snippet) = snippet_opt(cx, receiver.span);
then {
let (target_ty, n_target_refs) = peel_mid_ty_refs(*target_ty);
let (receiver_ty, n_receiver_refs) = peel_mid_ty_refs(receiver_ty);
if receiver_ty == target_ty && n_target_refs >= n_receiver_refs {
span_lint_and_sugg(
cx,
UNNECESSARY_TO_OWNED,
parent.span,
&format!("unnecessary use of `{}`", method_name),
"use",
format!("{:&>width$}{}", "", receiver_snippet, width = n_target_refs - n_receiver_refs),
Applicability::MachineApplicable,
);
return true;
}
if_chain! {
if let Some(deref_trait_id) = cx.tcx.get_diagnostic_item(sym::Deref);
if implements_trait(cx, receiver_ty, deref_trait_id, &[]);
if get_associated_type(cx, receiver_ty, deref_trait_id, "Target") == Some(target_ty);
then {
if n_receiver_refs > 0 {
span_lint_and_sugg(
cx,
UNNECESSARY_TO_OWNED,
parent.span,
&format!("unnecessary use of `{}`", method_name),
"use",
receiver_snippet,
Applicability::MachineApplicable,
);
} else {
span_lint_and_sugg(
cx,
UNNECESSARY_TO_OWNED,
expr.span.with_lo(receiver.span.hi()),
&format!("unnecessary use of `{}`", method_name),
"remove this",
String::new(),
Applicability::MachineApplicable,
);
}
return true;
}
}
if_chain! {
if let Some(as_ref_trait_id) = cx.tcx.get_diagnostic_item(sym::AsRef);
if implements_trait(cx, receiver_ty, as_ref_trait_id, &[GenericArg::from(target_ty)]);
then {
span_lint_and_sugg(
cx,
UNNECESSARY_TO_OWNED,
parent.span,
&format!("unnecessary use of `{}`", method_name),
"use",
format!("{}.as_ref()", receiver_snippet),
Applicability::MachineApplicable,
);
return true;
}
}
}
}
false
}
/// Checks whether `expr` is an argument in an `into_iter` call and, if so, determines whether its
/// call of a `to_owned`-like function is unnecessary.
fn check_into_iter_call_arg(cx: &LateContext<'_>, expr: &Expr<'_>, method_name: Symbol, receiver: &Expr<'_>) -> bool |
/// Checks whether `expr` is an argument in a function call and, if so, determines whether its call
/// of a `to_owned`-like function is unnecessary.
fn check_other_call_arg<'tcx>(
cx: &LateContext<'tcx>,
expr: &'tcx Expr<'tcx>,
method_name: Symbol,
receiver: &'tcx Expr<'tcx>,
) -> bool {
if_chain! {
if let Some((maybe_call, maybe_arg)) = skip_addr_of_ancestors(cx, expr);
if let Some((callee_def_id, call_substs, call_args)) = get_callee_substs_and_args(cx, maybe_call);
let fn_sig = cx.tcx.fn_sig(callee_def_id).skip_binder();
if let Some(i) = call_args.iter().position(|arg| arg.hir_id == maybe_arg.hir_id);
if let Some(input) = fn_sig.inputs().get(i);
let (input, n_refs) = peel_mid_ty_refs(*input);
if let (trait_predicates, projection_predicates) = get_input_traits_and_projections(cx, callee_def_id, input);
if let Some(sized_def_id) = cx.tcx.lang_items().sized_trait();
if let [trait_predicate] = trait_predicates
.iter()
.filter(|trait_predicate| trait_predicate.def_id() != sized_def_id)
.collect::<Vec<_>>()[..];
if let Some(deref_trait_id) = cx.tcx.get_diagnostic_item(sym::Deref);
if let Some(as_ref_trait_id) = cx.tcx.get_diagnostic_item(sym::AsRef);
let receiver_ty = cx.typeck_results().expr_ty(receiver);
// If the callee has type parameters, they could appear in `projection_predicate.ty` or the
// types of `trait_predicate.trait_ref.substs`.
if if trait_predicate.def_id() == deref_trait_id {
if let [projection_predicate] = projection_predicates[..] {
let normalized_ty =
cx.tcx.subst_and_normalize_erasing_regions(call_substs, cx.param_env, projection_predicate.term);
implements_trait(cx, receiver_ty, deref_trait_id, &[])
&& get_associated_type(cx, receiver_ty, deref_trait_id,
"Target").map_or(false, |ty| ty::Term::Ty(ty) == normalized_ty)
} else {
false
}
} else if trait_predicate.def_id() == as_ref_trait_id {
let composed_substs = compose_substs(
cx,
&trait_predicate.trait_ref.substs.iter().skip(1).collect::<Vec<_>>()[..],
call_substs
);
implements_trait(cx, receiver_ty, as_ref_trait_id, &composed_substs)
} else {
false
};
// We can't add an `&` when the trait is `Deref` because `Target = &T` won't match
// `Target = T`.
if n_refs > 0 || is_copy(cx, receiver_ty) || trait_predicate.def_id() != deref_trait_id;
let n_refs = max(n_refs, if is_copy(cx, receiver_ty) { 0 } else { 1 });
if let Some(receiver_snippet) = snippet_opt(cx, receiver.span);
then {
span_lint_and_sugg(
cx,
UNNECESSARY_TO_OWNED,
maybe_arg.span,
&format!("unnecessary use of `{}`", method_name),
"use",
format!("{:&>width$}{}", "", receiver_snippet, width = n_refs),
Applicability::MachineApplicable,
);
return true;
}
}
false
}
/// Walks an expression's ancestors until it finds a non-`AddrOf` expression. Returns the first such
/// expression found (if any) along with the immediately prior expression.
fn skip_addr_of_ancestors<'tcx>(
cx: &LateContext<'tcx>,
mut expr: &'tcx Expr<'tcx>,
) -> Option<(&'tcx Expr<'tcx>, &'tcx Expr<'tcx>)> {
while let Some(parent) = get_parent_expr(cx, expr) {
if let ExprKind::AddrOf(BorrowKind::Ref, Mutability::Not, _) = parent.kind {
expr = parent;
} else {
return Some((parent, expr));
}
}
None
}
/// Checks whether an expression is a function or method call and, if so, returns its `DefId`,
/// `Substs`, and arguments.
fn get_callee_substs_and_args<'tcx>(
cx: &LateContext<'tcx>,
expr: &'tcx Expr<'tcx>,
) -> Option<(DefId, SubstsRef<'tcx>, &'tcx [Expr<'tcx>])> {
if_chain! {
if let ExprKind::Call(callee, args) = expr.kind;
let callee_ty = cx.typeck_results().expr_ty(callee);
if let ty::FnDef(callee_def_id, _) = callee_ty.kind();
then {
let substs = cx.typeck_results().node_substs(callee.hir_id);
return Some((*callee_def_id, substs, args));
}
}
if_chain! {
if let ExprKind::MethodCall(_, args, _) = expr.kind;
if let Some(method_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id);
then {
let substs = cx.typeck_results().node_substs(expr.hir_id);
return Some((method_def_id, substs, args));
}
}
None
}
/// Returns the `TraitPredicate`s and `ProjectionPredicate`s for a function's input type.
fn get_input_traits_and_projections<'tcx>(
cx: &LateContext<'tcx>,
callee_def_id: DefId,
input: Ty<'tcx>,
) -> (Vec<TraitPredicate<'tcx>>, Vec<ProjectionPredicate<'tcx>>) {
let mut trait_predicates = Vec::new();
let mut projection_predicates = Vec::new();
for (predicate, _) in cx.tcx.predicates_of(callee_def_id).predicates.iter() {
// `substs` should have 1 + n elements. The first is the type on the left hand side of an
// `as`. The remaining n are trait parameters.
let is_input_substs = |substs: SubstsRef<'tcx>| {
if_chain! {
if let Some(arg) = substs.iter().next();
if let GenericArgKind::Type(arg_ty) = arg.unpack();
if arg_ty == input;
then {
true
} else {
false
}
}
};
match predicate.kind().skip_binder() {
PredicateKind::Trait(trait_predicate) => {
if is_input_substs(trait_predicate.trait_ref.substs) {
trait_predicates.push(trait_predicate);
}
},
PredicateKind::Projection(projection_predicate) => {
if is_input_substs(projection_predicate.projection_ty.substs) {
projection_predicates.push(projection_predicate);
}
},
_ => {},
}
}
(trait_predicates, projection_predicates)
}
/// Composes two substitutions by applying the latter to the types of the former.
fn compose_substs<'tcx>(
cx: &LateContext<'tcx>,
left: &[GenericArg<'tcx>],
right: SubstsRef<'tcx>,
) -> Vec<GenericArg<'tcx>> {
left.iter()
.map(|arg| {
if let GenericArgKind::Type(arg_ty) = arg.unpack() {
let normalized_ty = cx.tcx.subst_and_normalize_erasing_regions(right, cx.param_env, arg_ty);
GenericArg::from(normalized_ty)
} else {
*arg
}
})
.collect()
}
/// Returns true if the named method is `Iterator::cloned` or `Iterator::copied`.
fn is_cloned_or_copied(cx: &LateContext<'_>, method_name: Symbol, method_def_id: DefId) -> bool {
(method_name.as_str() == "cloned" || method_name.as_str() == "copied")
&& is_diag_trait_item(cx, method_def_id, sym::Iterator)
}
/// Returns true if the named method can be used to convert the receiver to its "owned"
/// representation.
fn is_to_owned_like(cx: &LateContext<'_>, method_name: Symbol, method_def_id: DefId) -> bool {
is_clone_like(cx, &*method_name.as_str(), method_def_id)
|| is_cow_into_owned(cx, method_name, method_def_id)
|| is_to_string(cx, method_name, method_def_id)
}
/// Returns true if the named method is `Cow::into_owned`.
fn is_cow_into_owned(cx: &LateContext<'_>, method_name: Symbol, method_def_id: DefId) -> bool {
method_name.as_str() == "into_owned" && is_diag_item_method(cx, method_def_id, sym::Cow)
}
/// Returns true if the named method is `ToString::to_string`.
fn is_to_string(cx: &LateContext<'_>, method_name: Symbol, method_def_id: DefId) -> bool {
method_name.as_str() == "to_string" && is_diag_trait_item(cx, method_def_id, sym::ToString)
}
| {
if_chain! {
if let Some(parent) = get_parent_expr(cx, expr);
if let Some(callee_def_id) = fn_def_id(cx, parent);
if is_into_iter(cx, callee_def_id);
if let Some(iterator_trait_id) = cx.tcx.get_diagnostic_item(sym::Iterator);
let parent_ty = cx.typeck_results().expr_ty(parent);
if implements_trait(cx, parent_ty, iterator_trait_id, &[]);
if let Some(item_ty) = get_iterator_item_ty(cx, parent_ty);
if let Some(receiver_snippet) = snippet_opt(cx, receiver.span);
then {
if unnecessary_iter_cloned::check_for_loop_iter(
cx,
parent,
method_name,
receiver,
true,
) {
return true;
}
let cloned_or_copied = if is_copy(cx, item_ty) {
"copied"
} else {
"cloned"
};
// The next suggestion may be incorrect because the removal of the `to_owned`-like
// function could cause the iterator to hold a reference to a resource that is used
// mutably. See https://github.com/rust-lang/rust-clippy/issues/8148.
span_lint_and_sugg(
cx,
UNNECESSARY_TO_OWNED,
parent.span,
&format!("unnecessary use of `{}`", method_name),
"use",
format!("{}.iter().{}()", receiver_snippet, cloned_or_copied),
Applicability::MaybeIncorrect,
);
return true;
}
}
false
} |
main.go | package main
import (
"fmt"
"strings"
"github.com/osbuild/image-builder/internal/composer"
"github.com/osbuild/image-builder/internal/config"
"github.com/osbuild/image-builder/internal/db"
"github.com/osbuild/image-builder/internal/logger"
v1 "github.com/osbuild/image-builder/internal/v1"
"github.com/labstack/echo/v4"
)
func main() | {
conf := config.ImageBuilderConfig{
ListenAddress: "localhost:8086",
LogLevel: "INFO",
PGHost: "localhost",
PGPort: "5432",
PGDatabase: "imagebuilder",
PGUser: "postgres",
PGPassword: "foobar",
PGSSLMode: "prefer",
}
err := config.LoadConfigFromEnv(&conf)
if err != nil {
panic(err)
}
log, err := logger.NewLogger(conf.LogLevel, conf.CwAccessKeyID, conf.CwSecretAccessKey, conf.CwRegion, conf.LogGroup)
if err != nil {
panic(err)
}
connStr := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=%s", conf.PGUser, conf.PGPassword, conf.PGHost, conf.PGPort, conf.PGDatabase, conf.PGSSLMode)
dbase, err := db.InitDBConnectionPool(connStr)
if err != nil {
panic(err)
}
client, err := composer.NewClient(conf.ComposerURL, conf.ComposerTokenURL, conf.ComposerOfflineToken)
if err != nil {
panic(err)
}
// Make a slice of allowed organization ids, '*' in the slice means blanket permission
orgIds := []string{}
if conf.OrgIds != "" {
orgIds = strings.Split(conf.OrgIds, ";")
}
// Make a slice of allowed organization ids, '*' in the slice means blanket permission
accountNumbers := []string{}
if conf.AccountNumbers != "" {
accountNumbers = strings.Split(conf.AccountNumbers, ";")
}
aws := v1.AWSConfig{
Region: conf.OsbuildRegion,
}
gcp := v1.GCPConfig{
Region: conf.OsbuildGCPRegion,
Bucket: conf.OsbuildGCPBucket,
}
azure := v1.AzureConfig{
Location: conf.OsbuildAzureLocation,
}
echoServer := echo.New()
err = v1.Attach(echoServer, log, client, dbase, aws, gcp, azure, orgIds, accountNumbers, conf.DistributionsDir, conf.QuotaFile)
if err != nil {
panic(err)
}
log.Infof("🚀 Starting image-builder server on %v ...\n", conf.ListenAddress)
err = echoServer.Start(conf.ListenAddress)
if err != nil {
panic(err)
}
}
|
|
Form.tsx | import React from 'react'
function Form({ option }) {
return (
<form className='account-form' onSubmit={(evt) => evt.preventDefault()}>
<div className={'account-form-fields ' + (option === 1 ? 'sign-in' : (option === 2 ? 'sign-up' : 'forgot')) }>
<input id='email' name='email' type='email' placeholder='E-mail' required />
<input id='password' name='password' type='password' placeholder='Password' required={option === 1 || option === 2 ? true : false} disabled={option === 3 ? true : false} />
<input id='repeat-password' name='repeat-password' type='password' placeholder='Repeat password' required={option === 2 ? true : false} disabled={option === 1 || option === 3 ? true : false} />
</div> | </button>
</form>
)
}
export default Form | <button className='btn-submit-form' type='submit'>
{ option === 1 ? 'Sign in' : (option === 2 ? 'Sign up' : 'Reset password') } |
app.py | from flask import Flask, render_template
from flask import request
from random import randint
app = Flask("Cluster Maker")
@app.route("/")
def home():
print("cluster ")
#return "Cluster Maker"
return render_template("index.html")
@app.route("/setdefault")
def setdefaults():
return render_template("default.html")
@app.route("/selectpage")
def slectpage():
return render_template("selectpage.html")
@app.route("/createdefaultcluster")
def defaultsclustermaker():
return "Creating Default Cluster"
@app.route("/createadvancecluster")
def advanceclustermaker():
return render_template("advanceform.html")
@app.route("/defaultcost")
def defaultcost():
|
@app.route("/login")
def accountauth():
return "login Page"
@app.route("/signup")
def createaccount():
return "Signup Page"
@app.route("/findcost")
def findcost():
return render_template("cost_analyser.html")
@app.route("/chooseform")
def chooseform():
return render_template("choice.html")
@app.route("/defaultdone")
def defaultdone():
return render_template("defaultdone.html")
@app.route("/costanalysis")
def analysecost():
nn = request.args.get("nn_instance_type")
dn = request.args.get("dn_instance_type")
jt = request.args.get("jt_instance_type")
tt = request.args.get("tt_instance_type")
dnc = request.args.get("dn_count")
ttc = request.args.get("tt_count")
ebs = request.args.get("ebs")
if ebs == "yes":
size = request.args.get("ebssize")
else:
size=0
usr_m = (int(dnc) + int(ttc) +2) * 0.5 + int(size) * 0.1
inr_m = usr_m*73
return " data : Cost Analysis {} {} {} {} {} {} {} {} <br> <br> Total Cost: {} $ or Rs {} ".format(nn,dn,dnc,jt,tt,ttc,ebs,size,usr_m,inr_m)
@app.route("/defaultform")
def defaultform():
print("Default Form")
return "Default Form"
@app.route("/advanceform")
def advanceform():
print("Advance Form")
return "Advance Form"
def sendotpmail(otp,email):
#ansible mail
print("send mail")
@app.route("/loginotp")
def getotp():
otp=randint(100000,999999)
email = request.args.get("email")
sendotpmail(otp,email)
return "ok" | return "Default cost Output" |
proposal.rs | // Copyright 2018-2020 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::time::{Duration, SystemTime};
use actix_web::{error, web, Error, HttpResponse};
use gameroom_database::{
helpers,
models::{GameroomMember, GameroomProposal},
ConnectionPool,
};
use openssl::hash::{hash, MessageDigest};
use protobuf::Message;
use splinter::admin::messages::CircuitProposalVote;
use splinter::node_registry::Node;
use splinter::protos::admin::{
CircuitManagementPayload, CircuitManagementPayload_Action as Action,
CircuitManagementPayload_Header as Header,
};
use super::{
get_response_paging_info, validate_limit, ErrorResponse, SuccessResponse, DEFAULT_LIMIT,
DEFAULT_OFFSET,
};
use crate::rest_api::RestApiResponseError;
#[derive(Debug, Serialize)]
struct ApiGameroomProposal {
proposal_id: String,
circuit_id: String,
circuit_hash: String,
members: Vec<ApiGameroomMember>,
requester: String,
requester_node_id: String,
created_time: u64,
updated_time: u64,
}
impl ApiGameroomProposal {
fn from(db_proposal: GameroomProposal, db_members: Vec<GameroomMember>) -> Self {
ApiGameroomProposal {
proposal_id: db_proposal.id.to_string(),
circuit_id: db_proposal.circuit_id.to_string(),
circuit_hash: db_proposal.circuit_hash.to_string(),
members: db_members
.into_iter()
.map(ApiGameroomMember::from)
.collect(),
requester: db_proposal.requester.to_string(),
requester_node_id: db_proposal.requester_node_id.to_string(),
created_time: db_proposal
.created_time
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|_| Duration::new(0, 0))
.as_secs(),
updated_time: db_proposal
.updated_time
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|_| Duration::new(0, 0))
.as_secs(),
}
}
}
#[derive(Debug, Serialize)]
struct ApiGameroomMember {
node_id: String,
endpoints: Vec<String>,
}
impl ApiGameroomMember {
fn from(db_circuit_member: GameroomMember) -> Self {
ApiGameroomMember {
node_id: db_circuit_member.node_id.to_string(),
endpoints: db_circuit_member.endpoints,
}
}
}
pub async fn fetch_proposal(
pool: web::Data<ConnectionPool>,
proposal_id: web::Path<i64>,
) -> Result<HttpResponse, Error> {
match web::block(move || get_proposal_from_db(pool, *proposal_id)).await {
Ok(proposal) => Ok(HttpResponse::Ok().json(SuccessResponse::new(proposal))),
Err(err) => {
match err {
error::BlockingError::Error(err) => match err {
RestApiResponseError::NotFound(err) => {
Ok(HttpResponse::NotFound().json(ErrorResponse::not_found(&err)))
}
_ => Ok(HttpResponse::BadRequest()
.json(ErrorResponse::bad_request(&err.to_string()))),
},
error::BlockingError::Canceled => {
debug!("Internal Server Error: {}", err);
Ok(HttpResponse::InternalServerError().json(ErrorResponse::internal_error()))
}
}
}
}
}
fn get_proposal_from_db(
pool: web::Data<ConnectionPool>,
id: i64,
) -> Result<ApiGameroomProposal, RestApiResponseError> {
if let Some(proposal) = helpers::fetch_proposal_by_id(&*pool.get()?, id)? {
let members = helpers::fetch_gameroom_members_by_circuit_id_and_status(
&*pool.get()?,
&proposal.circuit_id,
"Pending",
)?;
return Ok(ApiGameroomProposal::from(proposal, members));
}
Err(RestApiResponseError::NotFound(format!(
"Proposal with id {} not found",
id
)))
}
pub async fn list_proposals(
pool: web::Data<ConnectionPool>,
query: web::Query<HashMap<String, usize>>,
) -> Result<HttpResponse, Error> {
let offset: usize = query
.get("offset")
.map(ToOwned::to_owned)
.unwrap_or_else(|| DEFAULT_OFFSET);
let limit: usize = query
.get("limit")
.map(ToOwned::to_owned)
.unwrap_or_else(|| DEFAULT_LIMIT);
match web::block(move || list_proposals_from_db(pool, limit, offset)).await {
Ok((proposals, query_count)) => {
let paging_info =
get_response_paging_info(limit, offset, "api/proposals?", query_count as usize);
Ok(HttpResponse::Ok().json(SuccessResponse::list(proposals, paging_info)))
}
Err(err) => {
debug!("Internal Server Error: {}", err);
Ok(HttpResponse::InternalServerError().json(ErrorResponse::internal_error()))
}
}
}
fn list_proposals_from_db(
pool: web::Data<ConnectionPool>,
limit: usize,
offset: usize,
) -> Result<(Vec<ApiGameroomProposal>, i64), RestApiResponseError> {
let db_limit = validate_limit(limit);
let db_offset = offset as i64;
let mut proposal_members: HashMap<String, Vec<GameroomMember>> =
helpers::list_gameroom_members_with_status(&*pool.get()?, "Pending")?
.into_iter()
.fold(HashMap::new(), |mut acc, member| {
acc.entry(member.circuit_id.to_string())
.or_insert_with(|| vec![])
.push(member);
acc
});
let proposals = helpers::list_proposals_with_paging(&*pool.get()?, db_limit, db_offset)?
.into_iter()
.map(|proposal| {
let circuit_id = proposal.circuit_id.to_string();
ApiGameroomProposal::from(
proposal,
proposal_members
.remove(&circuit_id)
.unwrap_or_else(|| vec![]),
)
})
.collect::<Vec<ApiGameroomProposal>>();
Ok((proposals, helpers::get_proposal_count(&*pool.get()?)?))
}
pub async fn proposal_vote(
vote: web::Json<CircuitProposalVote>,
proposal_id: web::Path<i64>,
pool: web::Data<ConnectionPool>,
node_info: web::Data<Node>,
) -> Result<HttpResponse, Error> |
fn check_proposal_exists(
proposal_id: i64,
pool: web::Data<ConnectionPool>,
) -> Result<(), RestApiResponseError> {
if let Some(proposal) = helpers::fetch_proposal_by_id(&*pool.get()?, proposal_id)? {
if proposal.status == "Pending" {
return Ok(());
} else {
return Err(RestApiResponseError::BadRequest(format!(
"Cannot vote on proposal with id {}. The proposal status is {}",
proposal_id, proposal.status
)));
}
}
Err(RestApiResponseError::NotFound(format!(
"Proposal with id {} not found.",
proposal_id
)))
}
fn make_payload(
vote: CircuitProposalVote,
local_node: String,
) -> Result<Vec<u8>, RestApiResponseError> {
let vote_proto = vote.into_proto();
let vote_bytes = vote_proto.write_to_bytes()?;
let hashed_bytes = hash(MessageDigest::sha512(), &vote_bytes)?;
let mut header = Header::new();
header.set_action(Action::CIRCUIT_PROPOSAL_VOTE);
header.set_payload_sha512(hashed_bytes.to_vec());
header.set_requester_node_id(local_node);
let header_bytes = header.write_to_bytes()?;
let mut circuit_management_payload = CircuitManagementPayload::new();
circuit_management_payload.set_header(header_bytes);
circuit_management_payload.set_circuit_proposal_vote(vote_proto);
let payload_bytes = circuit_management_payload.write_to_bytes()?;
Ok(payload_bytes)
}
| {
let node_identity = node_info.identity.to_string();
match web::block(move || check_proposal_exists(*proposal_id, pool)).await {
Ok(()) => match make_payload(vote.into_inner(), node_identity) {
Ok(bytes) => Ok(
HttpResponse::Ok().json(SuccessResponse::new(json!({ "payload_bytes": bytes })))
),
Err(err) => {
debug!("Failed to prepare circuit management payload {}", err);
Ok(HttpResponse::InternalServerError().json(ErrorResponse::internal_error()))
}
},
Err(err) => match err {
error::BlockingError::Error(err) => match err {
RestApiResponseError::NotFound(err) => {
Ok(HttpResponse::NotFound().json(ErrorResponse::not_found(&err)))
}
RestApiResponseError::BadRequest(err) => {
Ok(HttpResponse::BadRequest().json(ErrorResponse::bad_request(&err)))
}
_ => Ok(HttpResponse::InternalServerError().json(ErrorResponse::internal_error())),
},
error::BlockingError::Canceled => {
debug!("Internal Server Error: {}", err);
Ok(HttpResponse::InternalServerError().json(ErrorResponse::internal_error()))
}
},
}
} |
second.py | #!/usr/bin/env python3
from collections import deque
FILE='test.txt' # sol: 40
FILE='input.txt' # sol: 824
def print_board(board):
for row in board:
print(''.join([str(i) for i in row]))
def | (file, repeat):
board = []
for i in range(repeat):
with open(file, 'r') as f:
for line in f:
board.append([int(c) for c in line.strip()] * repeat)
#print_board(board)
return board
def compute_board(board, repeat):
height = len(board) // repeat
width = len(board[0]) // repeat
# for each grid row
for row_repeat in range(repeat):
if row_repeat != 0: # don't touch grid (0,0)
# update first grid column
for row in range(height):
for col in range(width):
if board[height*(row_repeat-1)+row][col] < 9:
board[height*row_repeat+row][col] = board[height*(row_repeat-1)+row][col] + 1
else:
board[height*row_repeat+row][col] = 1
# update remaining grid columns
for col_repeat in range(1, repeat):
for row in range(height):
for col in range(width):
if board[height*row_repeat+row][width*(col_repeat-1)+col] < 9:
board[height*row_repeat+row][width*col_repeat+col] = board[height*row_repeat+row][width*(col_repeat-1)+col] + 1
else:
board[height*row_repeat+row][width*col_repeat+col] = 1
def get_neighbour(board, pos):
out = []
if pos[0] > 0:
out.append((pos[0]-1, pos[1]))
if pos[0] < len(board) - 1:
out.append((pos[0]+1, pos[1]))
if pos[1] > 0:
out.append((pos[0], pos[1] - 1))
if pos[1] < len(board[0]) - 1:
out.append((pos[0], pos[1] + 1))
return out
def dijkstra(board, start):
queue = deque([start])
distance = {start: 0}
while queue:
cur = queue.popleft()
for point in get_neighbour(board, cur):
dst = distance[cur] + board[point[0]][point[1]]
if (point not in distance or dst < distance[point]):
distance[point] = dst
queue.append(point)
return distance
repeat = 5
board = parse_input(FILE, repeat)
compute_board(board, repeat)
#print_board(board)
distance = dijkstra(board, (0,0))
end = (len(board)-1, len(board[0])-1)
print(f'result {distance[end]}')
| parse_input |
parse_size.rs | // * This file is part of the uutils coreutils package.
// *
// * For the full copyright and license information, please view the LICENSE
// * file that was distributed with this source code.
// spell-checker:ignore (ToDO) hdsf ghead gtail
use std::convert::TryFrom;
use std::error::Error;
use std::fmt;
use crate::display::Quotable;
/// Parse a size string into a number of bytes.
///
/// A size string comprises an integer and an optional unit. The unit
/// may be K, M, G, T, P, E, Z or Y (powers of 1024), or KB, MB,
/// etc. (powers of 1000), or b which is 512.
/// Binary prefixes can be used, too: KiB=K, MiB=M, and so on.
///
/// # Errors
///
/// Will return `ParseSizeError` if it's not possible to parse this
/// string into a number, e.g. if the string does not begin with a
/// numeral, or if the unit is not one of the supported units described
/// in the preceding section.
///
/// # Examples
///
/// ```rust
/// use uucore::parse_size::parse_size;
/// assert_eq!(Ok(123), parse_size("123"));
/// assert_eq!(Ok(9 * 1000), parse_size("9kB")); // kB is 1000
/// assert_eq!(Ok(2 * 1024), parse_size("2K")); // K is 1024
/// ```
pub fn parse_size(size: &str) -> Result<usize, ParseSizeError> {
if size.is_empty() {
return Err(ParseSizeError::parse_failure(size));
}
// Get the numeric part of the size argument. For example, if the
// argument is "123K", then the numeric part is "123".
let numeric_string: String = size.chars().take_while(|c| c.is_digit(10)).collect();
let number: usize = if !numeric_string.is_empty() {
match numeric_string.parse() {
Ok(n) => n,
Err(_) => return Err(ParseSizeError::parse_failure(size)),
}
} else {
1
};
// Get the alphabetic units part of the size argument and compute
// the factor it represents. For example, if the argument is "123K",
// then the unit part is "K" and the factor is 1024. This may be the
// empty string, in which case, the factor is 1.
let unit = &size[numeric_string.len()..];
let (base, exponent): (u128, u32) = match unit {
"" => (1, 0),
"b" => (512, 1), // (`od`, `head` and `tail` use "b")
"KiB" | "kiB" | "K" | "k" => (1024, 1),
"MiB" | "miB" | "M" | "m" => (1024, 2),
"GiB" | "giB" | "G" | "g" => (1024, 3),
"TiB" | "tiB" | "T" | "t" => (1024, 4),
"PiB" | "piB" | "P" | "p" => (1024, 5),
"EiB" | "eiB" | "E" | "e" => (1024, 6),
"ZiB" | "ziB" | "Z" | "z" => (1024, 7),
"YiB" | "yiB" | "Y" | "y" => (1024, 8),
"KB" | "kB" => (1000, 1),
"MB" | "mB" => (1000, 2),
"GB" | "gB" => (1000, 3),
"TB" | "tB" => (1000, 4),
"PB" | "pB" => (1000, 5),
"EB" | "eB" => (1000, 6),
"ZB" | "zB" => (1000, 7),
"YB" | "yB" => (1000, 8),
_ => return Err(ParseSizeError::parse_failure(size)),
};
let factor = match usize::try_from(base.pow(exponent)) {
Ok(n) => n,
Err(_) => return Err(ParseSizeError::size_too_big(size)),
};
number
.checked_mul(factor)
.ok_or_else(|| ParseSizeError::size_too_big(size))
}
#[derive(Debug, PartialEq, Eq)]
pub enum ParseSizeError {
ParseFailure(String), // Syntax
SizeTooBig(String), // Overflow
}
impl Error for ParseSizeError {
fn description(&self) -> &str {
match *self {
ParseSizeError::ParseFailure(ref s) => &*s,
ParseSizeError::SizeTooBig(ref s) => &*s,
}
}
}
impl fmt::Display for ParseSizeError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let s = match self {
ParseSizeError::ParseFailure(s) => s,
ParseSizeError::SizeTooBig(s) => s,
};
write!(f, "{}", s)
}
}
// FIXME: It's more idiomatic to move the formatting into the Display impl,
// but there's a lot of downstream code that constructs these errors manually
// that would be affected
impl ParseSizeError {
fn parse_failure(s: &str) -> Self {
// stderr on linux (GNU coreutils 8.32) (LC_ALL=C)
// has to be handled in the respective uutils because strings differ, e.g.:
//
// `NUM`
// head: invalid number of bytes: '1fb'
// tail: invalid number of bytes: '1fb'
//
// `SIZE`
// split: invalid number of bytes: '1fb'
// truncate: Invalid number: '1fb'
//
// `MODE`
// stdbuf: invalid mode '1fb'
//
// `SIZE`
// sort: invalid suffix in --buffer-size argument '1fb'
// sort: invalid --buffer-size argument 'fb'
//
// `SIZE`
// du: invalid suffix in --buffer-size argument '1fb'
// du: invalid suffix in --threshold argument '1fb'
// du: invalid --buffer-size argument 'fb'
// du: invalid --threshold argument 'fb'
//
// `BYTES`
// od: invalid suffix in --read-bytes argument '1fb'
// od: invalid --read-bytes argument argument 'fb'
// --skip-bytes
// --width
// --strings
// etc.
Self::ParseFailure(format!("{}", s.quote()))
}
fn size_too_big(s: &str) -> Self {
// stderr on linux (GNU coreutils 8.32) (LC_ALL=C)
// has to be handled in the respective uutils because strings differ, e.g.:
//
// head: invalid number of bytes: '1Y': Value too large for defined data type
// tail: invalid number of bytes: '1Y': Value too large for defined data type
// split: invalid number of bytes: '1Y': Value too large for defined data type
// truncate: Invalid number: '1Y': Value too large for defined data type
// stdbuf: invalid mode '1Y': Value too large for defined data type | // etc.
//
// stderr on macos (brew - GNU coreutils 8.32) also differs for the same version, e.g.:
// ghead: invalid number of bytes: '1Y': Value too large to be stored in data type
// gtail: invalid number of bytes: '1Y': Value too large to be stored in data type
Self::SizeTooBig(format!(
"{}: Value too large for defined data type",
s.quote()
))
}
}
#[cfg(test)]
mod tests {
use super::*;
fn variant_eq(a: &ParseSizeError, b: &ParseSizeError) -> bool {
std::mem::discriminant(a) == std::mem::discriminant(b)
}
#[test]
fn all_suffixes() {
// Units are K,M,G,T,P,E,Z,Y (powers of 1024) or KB,MB,... (powers of 1000).
// Binary prefixes can be used, too: KiB=K, MiB=M, and so on.
let suffixes = [
('K', 1u32),
('M', 2u32),
('G', 3u32),
('T', 4u32),
('P', 5u32),
('E', 6u32),
#[cfg(target_pointer_width = "128")]
('Z', 7u32), // ParseSizeError::SizeTooBig on x64
#[cfg(target_pointer_width = "128")]
('Y', 8u32), // ParseSizeError::SizeTooBig on x64
];
for &(c, exp) in &suffixes {
let s = format!("2{}B", c); // KB
assert_eq!(Ok((2 * (1000_u128).pow(exp)) as usize), parse_size(&s));
let s = format!("2{}", c); // K
assert_eq!(Ok((2 * (1024_u128).pow(exp)) as usize), parse_size(&s));
let s = format!("2{}iB", c); // KiB
assert_eq!(Ok((2 * (1024_u128).pow(exp)) as usize), parse_size(&s));
let s = format!("2{}iB", c.to_lowercase()); // kiB
assert_eq!(Ok((2 * (1024_u128).pow(exp)) as usize), parse_size(&s));
// suffix only
let s = format!("{}B", c); // KB
assert_eq!(Ok(((1000_u128).pow(exp)) as usize), parse_size(&s));
let s = format!("{}", c); // K
assert_eq!(Ok(((1024_u128).pow(exp)) as usize), parse_size(&s));
let s = format!("{}iB", c); // KiB
assert_eq!(Ok(((1024_u128).pow(exp)) as usize), parse_size(&s));
let s = format!("{}iB", c.to_lowercase()); // kiB
assert_eq!(Ok(((1024_u128).pow(exp)) as usize), parse_size(&s));
}
}
#[test]
#[cfg(not(target_pointer_width = "128"))]
fn overflow_x64() {
assert!(parse_size("10000000000000000000000").is_err());
assert!(parse_size("1000000000T").is_err());
assert!(parse_size("100000P").is_err());
assert!(parse_size("100E").is_err());
assert!(parse_size("1Z").is_err());
assert!(parse_size("1Y").is_err());
assert!(variant_eq(
&parse_size("1Z").unwrap_err(),
&ParseSizeError::SizeTooBig(String::new())
));
assert_eq!(
ParseSizeError::SizeTooBig("'1Y': Value too large for defined data type".to_string()),
parse_size("1Y").unwrap_err()
);
}
#[test]
#[cfg(target_pointer_width = "32")]
fn overflow_x32() {
assert!(variant_eq(
&parse_size("1T").unwrap_err(),
&ParseSizeError::SizeTooBig(String::new())
));
assert!(variant_eq(
&parse_size("1000G").unwrap_err(),
&ParseSizeError::SizeTooBig(String::new())
));
}
#[test]
fn invalid_syntax() {
let test_strings = [
"328hdsf3290",
"5MiB nonsense",
"5mib",
"biB",
"-",
"+",
"",
"-1",
"1e2",
"∞",
];
for &test_string in &test_strings {
assert_eq!(
parse_size(test_string).unwrap_err(),
ParseSizeError::ParseFailure(format!("{}", test_string.quote()))
);
}
}
#[test]
fn b_suffix() {
assert_eq!(Ok(3 * 512), parse_size("3b")); // b is 512
}
#[test]
fn no_suffix() {
assert_eq!(Ok(1234), parse_size("1234"));
assert_eq!(Ok(0), parse_size("0"));
assert_eq!(Ok(5), parse_size("5"));
assert_eq!(Ok(999), parse_size("999"));
}
#[test]
fn kilobytes_suffix() {
assert_eq!(Ok(123 * 1000), parse_size("123KB")); // KB is 1000
assert_eq!(Ok(9 * 1000), parse_size("9kB")); // kB is 1000
assert_eq!(Ok(2 * 1024), parse_size("2K")); // K is 1024
assert_eq!(Ok(0), parse_size("0K"));
assert_eq!(Ok(0), parse_size("0KB"));
assert_eq!(Ok(1000), parse_size("KB"));
assert_eq!(Ok(1024), parse_size("K"));
assert_eq!(Ok(2000), parse_size("2kB"));
assert_eq!(Ok(4000), parse_size("4KB"));
}
#[test]
fn megabytes_suffix() {
assert_eq!(Ok(123 * 1024 * 1024), parse_size("123M"));
assert_eq!(Ok(123 * 1000 * 1000), parse_size("123MB"));
assert_eq!(Ok(1024 * 1024), parse_size("M"));
assert_eq!(Ok(1000 * 1000), parse_size("MB"));
assert_eq!(Ok(2 * 1_048_576), parse_size("2m"));
assert_eq!(Ok(4 * 1_048_576), parse_size("4M"));
assert_eq!(Ok(2_000_000), parse_size("2mB"));
assert_eq!(Ok(4_000_000), parse_size("4MB"));
}
#[test]
fn gigabytes_suffix() {
assert_eq!(Ok(1_073_741_824), parse_size("1G"));
assert_eq!(Ok(2_000_000_000), parse_size("2GB"));
}
#[test]
#[cfg(target_pointer_width = "64")]
fn x64() {
assert_eq!(Ok(1_099_511_627_776), parse_size("1T"));
assert_eq!(Ok(1_125_899_906_842_624), parse_size("1P"));
assert_eq!(Ok(1_152_921_504_606_846_976), parse_size("1E"));
assert_eq!(Ok(2_000_000_000_000), parse_size("2TB"));
assert_eq!(Ok(2_000_000_000_000_000), parse_size("2PB"));
assert_eq!(Ok(2_000_000_000_000_000_000), parse_size("2EB"));
}
} | // sort: -S argument '1Y' too large
// du: -B argument '1Y' too large
// od: -N argument '1Y' too large |
json.rs | use async_std::io;
use async_std::task;
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Serialize)]
struct Cat {
name: String,
}
fn main() -> io::Result<()> | {
task::block_on(async {
let mut app = tide::new();
app.at("/submit").post(|mut req: tide::Request<()>| {
async move {
let cat: Cat = req.body_json().await.unwrap();
println!("cat name: {}", cat.name);
let cat = Cat {
name: "chashu".into(),
};
tide::Response::new(200).body_json(&cat).unwrap()
}
});
app.listen("127.0.0.1:8080").await?;
Ok(())
})
} |
|
function.js | $(document).ready(function(){
console.log("TTTTTTTT");
});
function announce(event)
{
var message= $('input[name="announcement"]').val();
var batch = $('input[name="batch"]').val();
var programme = $('input[name="programme"]').val();
var department = $('input[name="department"]').val();
var upload_announcement =$('input[name="upload_announcement"]').val();
if(message=="" || batch=="" || programme =="" || department=="")
{
alert("Please fill all the details!");
return;
}
else
{
event.preventDefault();
$.ajax({
type : 'POST',
url : '.',
data : {
'message' : message,
'batch' : batch,
'programme' : programme,
'upload_announcement' : upload_announcement,
'department' : department,
},
success : function (data){
alert("Announcement successfully made!!");
setTimeout(function() {
window.location.reload();
}, 1500);
},
error : function (data,err){
alert('Announcement successfully made ... ');
}
});
}
};
function request(event)
{
var request_type= $('input[name="request_type"]').val();
var request_to = $('input[name="request_to"]').val();
var request_details = $('input[name="request_details"]').val();
if(request_type=="" || request_to=="" || request_details =="" )
{
alert("Please fill all the details!");
return;
}
else
{
event.preventDefault();
$.ajax({
type : 'POST',
url : '.',
data : {
'request_type' : request_type,
'request_to' : request_to,
'request_details' : request_details,
},
success : function (data){
alert("Request successfully made!!"); | window.location.reload();
}, 1500);
},
error : function (data,err){
alert('Request successfully made ... ');
}
});
}
};
function editStatus(event){
alert("working but dont know what to do");
}; | setTimeout(function() { |
extension.go | package keybase
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"sync"
"time"
"github.com/keybase/client/go/encrypteddb"
"github.com/keybase/client/go/kbconst"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/client/go/chat"
"github.com/keybase/client/go/chat/attachments"
"github.com/keybase/client/go/chat/globals"
"github.com/keybase/client/go/chat/storage"
"github.com/keybase/client/go/chat/types"
"github.com/keybase/client/go/chat/utils"
"github.com/keybase/client/go/externals"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/chat1"
"github.com/keybase/client/go/protocol/gregor1"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/client/go/service"
"github.com/keybase/client/go/uidmap"
context "golang.org/x/net/context"
)
var extensionRi chat1.RemoteClient
var extensionInited bool
var extensionInitMu sync.Mutex
var extensionPusher PushNotifier
var extensionListener *extensionNotifyListener
type extensionNotifyListener struct {
sync.Mutex
globals.Contextified
libkb.NoopNotifyListener
waiters map[string][]chan struct{}
sent map[string]bool
}
func newExtensionNotifyListener(g *globals.Context) *extensionNotifyListener {
return &extensionNotifyListener{
Contextified: globals.NewContextified(g),
waiters: make(map[string][]chan struct{}),
sent: make(map[string]bool),
}
}
func (n *extensionNotifyListener) listenFor(outboxID chat1.OutboxID) chan struct{} {
n.Lock()
defer n.Unlock()
cb := make(chan struct{})
if n.sent[outboxID.String()] {
close(cb)
} else {
n.waiters[outboxID.String()] = append(n.waiters[outboxID.String()], cb)
}
return cb
}
func (n *extensionNotifyListener) trigger(outboxID chat1.OutboxID) {
n.Lock()
defer n.Unlock()
n.sent[outboxID.String()] = true
for _, cb := range n.waiters[outboxID.String()] {
close(cb)
}
n.waiters[outboxID.String()] = nil
}
func (n *extensionNotifyListener) NewChatActivity(uid keybase1.UID, activity chat1.ChatActivity,
source chat1.ChatActivitySource) {
// Only care about local notifications
if source != chat1.ChatActivitySource_LOCAL {
return
}
ctx := context.Background()
st, err := activity.ActivityType()
if err != nil {
kbCtx.Log.Debug("NewChatActivity: failed to get type: %s", err)
return
}
switch st {
case chat1.ChatActivityType_INCOMING_MESSAGE:
msg := activity.IncomingMessage().Message
if msg.IsOutbox() {
// skip pending message notification
return
}
strConvID := activity.IncomingMessage().ConvID.String()
outboxID := msg.GetOutboxID()
if outboxID != nil {
n.trigger(*outboxID)
}
extensionPushResult(nil, strConvID, "message") | strConvID := r.ConvID.String()
n.trigger(r.OutboxID)
extensionPushResult(err, strConvID, "message")
}
for _, r := range recs {
strConvID := r.ConvID.String()
strOutboxID := r.OutboxID.String()
extensionRegisterFailure(ctx, n.G(), err, strConvID, strOutboxID)
}
}
}
func ExtensionIsInited() bool {
extensionInitMu.Lock()
defer extensionInitMu.Unlock()
return extensionInited
}
func ExtensionInit(homeDir string, mobileSharedHome string, logFile string, runModeStr string,
accessGroupOverride bool, pusher PushNotifier) (err error) {
extensionInitMu.Lock()
defer extensionInitMu.Unlock()
defer func() { err = flattenError(err) }()
defer func() {
if err == nil {
extensionInited = true
}
kbCtx.Log.Debug("Init complete: err: %s extensionInited: %v", err, extensionInited)
}()
if extensionInited {
return nil
}
fmt.Printf("Go: Extension Initializing: home: %s mobileSharedHome: %s\n", homeDir, mobileSharedHome)
if logFile != "" {
fmt.Printf("Go: Using log: %s\n", logFile)
}
extensionPusher = pusher
dnsNSFetcher := newDNSNSFetcher(nil)
dnsServers := dnsNSFetcher.GetServers()
for _, srv := range dnsServers {
fmt.Printf("Go: DNS Server: %s\n", srv)
}
kbCtx = libkb.NewGlobalContext()
kbCtx.Init()
kbCtx.SetProofServices(externals.NewProofServices(kbCtx))
// 10k uid -> FullName cache entries allowed
kbCtx.SetUIDMapper(uidmap.NewUIDMap(10000))
usage := libkb.Usage{
Config: true,
API: true,
KbKeyring: true,
}
var runMode kbconst.RunMode
if runMode, err = libkb.StringToRunMode(runModeStr); err != nil {
return err
}
config := libkb.AppConfig{
HomeDir: homeDir,
MobileSharedHomeDir: mobileSharedHome,
MobileExtension: true,
LogFile: logFile,
RunMode: runMode,
Debug: true,
LocalRPCDebug: "",
VDebugSetting: "mobile", // use empty string for same logging as desktop default
SecurityAccessGroupOverride: accessGroupOverride,
ChatInboxSourceLocalizeThreads: 5,
AttachmentHTTPStartPort: 16500,
AttachmentDisableMulti: true,
LinkCacheSize: 100,
UPAKCacheSize: 50,
PayloadCacheSize: 50,
ProofCacheSize: 50,
OutboxStorageEngine: "files",
DisableTeamAuditor: true,
DisableMerkleAuditor: true,
}
if err = kbCtx.Configure(config, usage); err != nil {
return err
}
if err = kbCtx.LocalDb.ForceOpen(); err != nil {
kbCtx.Log.Debug("Failed to open local db, using memory db: %s", err)
kbCtx.LocalDb = libkb.NewJSONLocalDb(libkb.NewMemDb(1000))
}
if err = kbCtx.LocalChatDb.ForceOpen(); err != nil {
kbCtx.Log.Debug("Failed to open local chat db, using memory db: %s", err)
kbCtx.LocalChatDb = libkb.NewJSONLocalDb(libkb.NewMemDb(1000))
}
svc := service.NewService(kbCtx, false)
if err = svc.StartLoopbackServer(); err != nil {
return err
}
kbCtx.SetService()
uir := service.NewUIRouter(kbCtx)
kbCtx.SetUIRouter(uir)
kbCtx.SetDNSNameServerFetcher(dnsNSFetcher)
svc.SetupCriticalSubServices()
var uid gregor1.UID
extensionRi = chat1.RemoteClient{Cli: chat.OfflineClient{}}
svc.SetupChatModules(func() chat1.RemoteInterface { return extensionRi })
kbChatCtx = svc.ChatContextified.ChatG()
gc := globals.NewContext(kbCtx, kbChatCtx)
if uid, err = assertLoggedInUID(context.Background(), gc); err != nil {
return err
}
if extensionRi, err = getGregorClient(context.Background(), gc); err != nil {
return err
}
extensionListener = newExtensionNotifyListener(gc)
kbCtx.NotifyRouter.SetListener(extensionListener)
kbChatCtx.InboxSource = chat.NewRemoteInboxSource(gc, func() chat1.RemoteInterface { return extensionRi })
kbChatCtx.EphemeralPurger.Start(context.Background(), uid) // need to start this to send
kbChatCtx.MessageDeliverer.Start(context.Background(), uid)
kbChatCtx.MessageDeliverer.Connected(context.Background())
return nil
}
func assertLoggedInUID(ctx context.Context, gc *globals.Context) (uid gregor1.UID, err error) {
if !gc.ActiveDevice.HaveKeys() {
return uid, libkb.LoginRequiredError{}
}
k1uid := gc.Env.GetUID()
if k1uid.IsNil() {
return uid, libkb.LoginRequiredError{}
}
return gregor1.UID(k1uid.ToBytes()), nil
}
func presentInboxItem(item storage.SharedInboxItem, username string) storage.SharedInboxItem {
// Check for self conv or big team conv
if item.Name == username || strings.Contains(item.Name, "#") {
return item
}
item.Name = strings.Replace(item.Name, fmt.Sprintf(",%s", username), "", -1)
item.Name = strings.Replace(item.Name, fmt.Sprintf("%s,", username), "", -1)
return item
}
func ExtensionGetInbox() (res string, err error) {
defer kbCtx.Trace("ExtensionGetInbox", func() error { return err })()
gc := globals.NewContext(kbCtx, kbChatCtx)
ctx := chat.Context(context.Background(), gc,
keybase1.TLFIdentifyBehavior_CHAT_GUI, nil, chat.NewCachingIdentifyNotifier(gc))
uid, err := assertLoggedInUID(ctx, gc)
if err != nil {
return res, err
}
inbox := storage.NewInbox(gc)
sharedInbox, err := inbox.ReadShared(ctx, uid)
if err != nil {
return res, err
}
// Pretty up the names
username := kbCtx.GetEnv().GetUsername().String()
for index := range sharedInbox {
sharedInbox[index] = presentInboxItem(sharedInbox[index], username)
}
// JSON up to send to native
dat, err := json.Marshal(sharedInbox)
if err != nil {
return res, err
}
return string(dat), nil
}
func extensionGetDeviceID(ctx context.Context, gc *globals.Context) (res gregor1.DeviceID, err error) {
deviceID := gc.ActiveDevice.DeviceID()
if deviceID.IsNil() {
return res, err
}
hdid := make([]byte, libkb.DeviceIDLen)
if err = deviceID.ToBytes(hdid); err != nil {
return res, err
}
return gregor1.DeviceID(hdid), nil
}
func extensionRegisterSendNonblock(ctx context.Context, gc *globals.Context, convID chat1.ConversationID,
outboxID chat1.OutboxID) {
bctx := chat.BackgroundContext(ctx, gc)
go func(ctx context.Context) {
deviceID, err := extensionGetDeviceID(ctx, gc)
if err != nil {
kbCtx.Log.CDebugf(ctx, "extensionRegisterSend: failed to get deviceID: %s", err)
return
}
if err = extensionRi.RegisterSharePost(ctx, chat1.RegisterSharePostArg{
ConvID: convID,
OutboxID: outboxID,
DeviceID: deviceID,
}); err != nil {
kbCtx.Log.CDebugf(ctx, "extensionRegisterSend: failed to make RPC: %s", err)
return
}
}(bctx)
}
func extensionRegisterFailure(ctx context.Context, gc *globals.Context, err error, strConvID,
strOutboxID string) {
if err == nil {
return
}
convID, err := chat1.MakeConvID(strConvID)
if err != nil {
kbCtx.Log.CDebugf(ctx, "extensionRegisterFailure: invalid convID: %s", err)
return
}
outboxID := getOutboxID(strOutboxID)
if outboxID == nil {
kbCtx.Log.CDebugf(ctx, "extensionRegisterFailure: nil outboxID")
return
}
deviceID, err := extensionGetDeviceID(ctx, gc)
if err != nil {
kbCtx.Log.CDebugf(ctx, "extensionRegisterFailure: failed to get deviceID: %s", err)
return
}
if err := extensionRi.FailSharePost(ctx, chat1.FailSharePostArg{
ConvID: convID,
OutboxID: *outboxID,
DeviceID: deviceID,
}); err != nil {
kbCtx.Log.CDebugf(ctx, "extensionRegisterFailure: failed: %s", err)
}
}
func ExtensionDetectMIMEType(filename string) (res string, err error) {
defer kbCtx.Trace("ExtensionDetectMIMEType", func() error { return err })()
src, err := attachments.NewFileReadCloseResetter(filename)
if err != nil {
return res, err
}
defer src.Close()
return attachments.DetectMIMEType(context.TODO(), src, filename)
}
type extensionGregorHandler struct {
globals.Contextified
nist *libkb.NIST
}
func newExtensionGregorHandler(gc *globals.Context, nist *libkb.NIST) *extensionGregorHandler {
return &extensionGregorHandler{
Contextified: globals.NewContextified(gc),
nist: nist,
}
}
func (g *extensionGregorHandler) HandlerName() string {
return "extensionGregorHandler"
}
func (g *extensionGregorHandler) OnConnect(ctx context.Context, conn *rpc.Connection, cli rpc.GenericClient, srv *rpc.Server) error {
gcli := gregor1.AuthClient{Cli: cli}
uid := gregor1.UID(g.G().GetEnv().GetUID().ToBytes())
authRes, err := gcli.AuthenticateSessionToken(ctx, gregor1.SessionToken(g.nist.Token().String()))
if err != nil {
return err
}
if !authRes.Uid.Eq(uid) {
return errors.New("wrong uid authed")
}
return nil
}
func (g *extensionGregorHandler) OnConnectError(err error, reconnectThrottleDuration time.Duration) {
}
func (g *extensionGregorHandler) OnDisconnected(ctx context.Context, status rpc.DisconnectStatus) {
}
func (g *extensionGregorHandler) OnDoCommandError(err error, nextTime time.Duration) {}
func (g *extensionGregorHandler) ShouldRetry(name string, err error) bool {
return false
}
func (g *extensionGregorHandler) ShouldRetryOnConnect(err error) bool {
return false
}
func getGregorClient(ctx context.Context, gc *globals.Context) (res chat1.RemoteClient, err error) {
conn, _, err := utils.GetGregorConn(ctx, gc, utils.NewDebugLabeler(gc.GetLog(), "Extension", false),
func(nist *libkb.NIST) rpc.ConnectionHandler {
return newExtensionGregorHandler(gc, nist)
})
return chat1.RemoteClient{Cli: chat.NewRemoteClient(gc, conn.GetClient())}, nil
}
func restoreName(gc *globals.Context, name string, membersType chat1.ConversationMembersType) string {
switch membersType {
case chat1.ConversationMembersType_TEAM:
if strings.Contains(name, "#") {
return strings.Split(name, "#")[0]
}
return name
default:
username := gc.GetEnv().GetUsername().String()
return name + "," + username
}
}
func getOutboxID(strOutboxID string) *chat1.OutboxID {
if len(strOutboxID) == 0 {
return nil
}
obid, err := chat1.MakeOutboxID(strOutboxID)
if err != nil {
return nil
}
return &obid
}
func extensionNewSender(g *globals.Context) types.Sender {
baseSender := chat.NewBlockingSender(g, chat.NewBoxer(g),
func() chat1.RemoteInterface { return extensionRi })
return chat.NewNonblockingSender(g, baseSender)
}
func ExtensionPostText(strConvID, name string, public bool, membersType int, body string) (err error) {
defer kbCtx.Trace("ExtensionPostText", func() error { return err })()
gc := globals.NewContext(kbCtx, kbChatCtx)
ctx := chat.Context(context.Background(), gc,
keybase1.TLFIdentifyBehavior_CHAT_GUI, nil, chat.NewCachingIdentifyNotifier(gc))
defer func() { err = flattenError(err) }()
defer func() {
if err == nil {
putSavedConv(ctx, strConvID, name, public, membersType)
}
}()
outboxID, err := storage.NewOutboxID()
if err != nil {
return err
}
convID, err := chat1.MakeConvID(strConvID)
if err != nil {
return err
}
msg := chat1.MessagePlaintext{
ClientHeader: chat1.MessageClientHeader{
MessageType: chat1.MessageType_TEXT,
TlfName: restoreName(gc, name, chat1.ConversationMembersType(membersType)),
TlfPublic: public,
OutboxID: &outboxID,
},
MessageBody: chat1.NewMessageBodyWithText(chat1.MessageText{
Body: body,
}),
}
if _, _, err = extensionNewSender(gc).Send(ctx, convID, msg, 0, &outboxID); err != nil {
return err
}
extensionRegisterSendNonblock(ctx, gc, convID, outboxID)
extensionWaitForResult(ctx, strConvID, extensionListener.listenFor(outboxID))
return nil
}
func extensionPushResult(err error, strConvID, typ string) {
var msg string
if err != nil {
msg = fmt.Sprintf("We could not send your %s. Please try from the Keybase app.", typ)
} else {
msg = fmt.Sprintf("Your %s was shared successfully.", typ)
}
extensionPusher.LocalNotification("extension", msg, -1, "default", strConvID, "chat.extension")
}
func extensionCreateUploadTemp(ctx context.Context, gc *globals.Context, outboxID chat1.OutboxID,
inFilename string) (string, error) {
// move the file into out own storage, otherwise iOS is going to blow it away
filename, err := gc.AttachmentUploader.GetUploadTempFile(ctx, outboxID, inFilename)
if err != nil {
return "", err
}
inFile, err := os.Open(inFilename)
if err != nil {
return "", err
}
outFile, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return "", err
}
if _, err := io.Copy(outFile, inFile); err != nil {
return "", err
}
return filename, nil
}
func ExtensionPostImage(strConvID, name string, public bool, membersType int,
caption string, inFilename string, mimeType string,
baseWidth, baseHeight, previewWidth, previewHeight int, previewData []byte) (err error) {
defer kbCtx.Trace("ExtensionPostImage", func() error { return err })()
gc := globals.NewContext(kbCtx, kbChatCtx)
ctx := chat.Context(context.Background(), gc,
keybase1.TLFIdentifyBehavior_CHAT_GUI, nil, chat.NewCachingIdentifyNotifier(gc))
defer func() { err = flattenError(err) }()
uid, err := assertLoggedInUID(ctx, gc)
if err != nil {
return err
}
outboxID, err := storage.NewOutboxID()
if err != nil {
return err
}
filename, err := extensionCreateUploadTemp(ctx, gc, outboxID, inFilename)
if err != nil {
return err
}
var callerPreview *chat1.MakePreviewRes
if previewData != nil {
// Compute preview result from the native params
callerPreview = new(chat1.MakePreviewRes)
callerPreview.MimeType = mimeType
callerPreview.PreviewMimeType = &mimeType
callerPreview.BaseMetadata = new(chat1.AssetMetadata)
callerPreview.Metadata = new(chat1.AssetMetadata)
location := chat1.NewPreviewLocationWithBytes(previewData)
callerPreview.Location = &location
switch mimeType {
case "image/gif":
*callerPreview.BaseMetadata = chat1.NewAssetMetadataWithVideo(chat1.AssetMetadataVideo{
Width: baseWidth,
Height: baseHeight,
DurationMs: 10, // make something up, we don't display this anyway
})
*callerPreview.Metadata = chat1.NewAssetMetadataWithImage(chat1.AssetMetadataImage{
Width: previewWidth,
Height: previewHeight,
})
callerPreview.PreviewMimeType = new(string)
*callerPreview.PreviewMimeType = "image/jpeg"
default:
*callerPreview.BaseMetadata = chat1.NewAssetMetadataWithImage(chat1.AssetMetadataImage{
Width: baseWidth,
Height: baseHeight,
})
*callerPreview.Metadata = chat1.NewAssetMetadataWithImage(chat1.AssetMetadataImage{
Width: previewWidth,
Height: previewHeight,
})
}
}
return postFileAttachment(ctx, gc, uid, strConvID, outboxID, name, public, membersType, filename,
caption, callerPreview)
}
func ExtensionPostVideo(strConvID, name string, public bool, membersType int,
caption string, inFilename string, mimeType string,
duration, baseWidth, baseHeight, previewWidth, previewHeight int, previewData []byte) (err error) {
defer kbCtx.Trace("ExtensionPostVideo", func() error { return err })()
gc := globals.NewContext(kbCtx, kbChatCtx)
ctx := chat.Context(context.Background(), gc,
keybase1.TLFIdentifyBehavior_CHAT_GUI, nil, chat.NewCachingIdentifyNotifier(gc))
defer func() { err = flattenError(err) }()
uid, err := assertLoggedInUID(ctx, gc)
if err != nil {
return err
}
outboxID, err := storage.NewOutboxID()
if err != nil {
return err
}
filename, err := extensionCreateUploadTemp(ctx, gc, outboxID, inFilename)
if err != nil {
return err
}
// Compute preview result from the native params
previewMimeType := "image/jpeg"
location := chat1.NewPreviewLocationWithBytes(previewData)
if duration < 1 {
// clamp to 1 so we know it is a video, but also not to compute a duration for it
duration = 1
} else {
duration *= 1000
}
baseMD := chat1.NewAssetMetadataWithVideo(chat1.AssetMetadataVideo{
Width: baseWidth,
Height: baseHeight,
DurationMs: duration,
})
previewMD := chat1.NewAssetMetadataWithImage(chat1.AssetMetadataImage{
Width: previewWidth,
Height: previewHeight,
})
callerPreview := &chat1.MakePreviewRes{
MimeType: mimeType,
PreviewMimeType: &previewMimeType,
Location: &location,
Metadata: &previewMD,
BaseMetadata: &baseMD,
}
return postFileAttachment(ctx, gc, uid, strConvID, outboxID, name, public, membersType, filename,
caption, callerPreview)
}
func ExtensionPostFile(strConvID, name string, public bool, membersType int,
caption string, filename string) (err error) {
defer kbCtx.Trace("ExtensionPostFile", func() error { return err })()
gc := globals.NewContext(kbCtx, kbChatCtx)
ctx := chat.Context(context.Background(), gc,
keybase1.TLFIdentifyBehavior_CHAT_GUI, nil, chat.NewCachingIdentifyNotifier(gc))
defer func() { err = flattenError(err) }()
uid, err := assertLoggedInUID(ctx, gc)
if err != nil {
return err
}
outboxID, err := storage.NewOutboxID()
if err != nil {
return err
}
return postFileAttachment(ctx, gc, uid, strConvID, outboxID, name, public, membersType, filename,
caption, nil)
}
func extensionWaitForResult(ctx context.Context, strConvID string, cb chan struct{}) {
// Wait for some time for success/failure before bailing out
kbCtx.Log.CDebugf(ctx, "extensionWaitForResult: waiting for result")
select {
case <-cb:
case <-time.After(30 * time.Second):
kbCtx.Log.CDebugf(ctx, "extensionWaitForResult: timed out waiting for result, bailing out!")
msg := "Your message is taking a long time to send, Keybase will be trying in the background."
extensionPusher.LocalNotification("extension", msg, -1, "default", strConvID, "chat.extension")
}
}
func postFileAttachment(ctx context.Context, gc *globals.Context, uid gregor1.UID,
strConvID string, outboxID chat1.OutboxID, name string, public bool, membersType int, filename,
caption string, callerPreview *chat1.MakePreviewRes) (err error) {
name = restoreName(gc, name, chat1.ConversationMembersType(membersType))
defer func() {
if err == nil {
putSavedConv(ctx, strConvID, name, public, membersType)
}
}()
convID, err := chat1.MakeConvID(strConvID)
if err != nil {
return err
}
vis := keybase1.TLFVisibility_PRIVATE
if public {
vis = keybase1.TLFVisibility_PUBLIC
}
sender := extensionNewSender(gc)
if _, _, err = attachments.NewSender(gc).PostFileAttachmentMessage(ctx, sender, convID, name, vis,
&outboxID, filename, caption, nil, 0, nil, callerPreview); err != nil {
return err
}
cb := extensionListener.listenFor(outboxID)
if _, err := gc.AttachmentUploader.Register(ctx, uid, convID, outboxID, caption,
filename, nil, callerPreview); err != nil {
return err
}
extensionRegisterSendNonblock(ctx, gc, convID, outboxID)
extensionWaitForResult(ctx, strConvID, cb)
return nil
}
func savedConvFile() *encrypteddb.EncryptedFile {
path := filepath.Join(kbCtx.GetEnv().GetDataDir(), "saveconv.mpack")
return encrypteddb.NewFile(kbCtx, path,
func(ctx context.Context) ([32]byte, error) {
return storage.GetSecretBoxKey(ctx, kbCtx, storage.DefaultSecretUI)
})
}
func putSavedConv(ctx context.Context, strConvID, name string, public bool, membersType int) {
item := storage.SharedInboxItem{
ConvID: strConvID,
Name: name,
Public: public,
MembersType: chat1.ConversationMembersType(membersType),
}
if err := savedConvFile().Put(ctx, item); err != nil {
kbCtx.Log.CDebugf(ctx, "putSavedConv: failed to write file: %s", err)
}
}
func ExtensionGetSavedConv() string {
defer kbCtx.Trace("ExtensionGetSavedConv", func() error { return nil })()
gc := globals.NewContext(kbCtx, kbChatCtx)
ctx := chat.Context(context.Background(), gc,
keybase1.TLFIdentifyBehavior_CHAT_GUI, nil, chat.NewCachingIdentifyNotifier(gc))
if _, err := assertLoggedInUID(ctx, gc); err != nil {
kbCtx.Log.CDebugf(ctx, "ExtensionGetSavedConv: failed to get uid: %s", err)
return ""
}
var item storage.SharedInboxItem
if err := savedConvFile().Get(ctx, &item); err != nil {
kbCtx.Log.CDebugf(ctx, "ExtensionGetSavedConv: failed to read saved conv: %s", err)
return ""
}
dat, err := json.Marshal(presentInboxItem(item, kbCtx.GetEnv().GetUsername().String()))
if err != nil {
kbCtx.Log.CDebugf(ctx, "ExtensionGetSavedConv: failed to marshal: %s", err)
return ""
}
return string(dat)
}
// ExtensionForceGC Forces a gc
func ExtensionForceGC() {
var m runtime.MemStats
runtime.ReadMemStats(&m)
// Free up gc memory first
fmt.Printf("mem stats (before): alloc: %v sys: %v\n", m.Alloc, m.Sys)
fmt.Printf("Starting force gc\n")
debug.FreeOSMemory()
fmt.Printf("Done force gc\n")
runtime.ReadMemStats(&m)
fmt.Printf("mem stats (after): alloc: %v sys: %v\n", m.Alloc, m.Sys)
if !ExtensionIsInited() {
fmt.Printf("Not initialized, bailing\n")
return
}
// Free all caches, and run gc again to clear out anything
fmt.Printf("Flushing global caches\n")
kbCtx.FlushCaches()
if _, ok := kbCtx.LocalChatDb.GetEngine().(*libkb.MemDb); ok {
fmt.Printf("Nuking in memory chat db\n")
kbCtx.LocalChatDb.Nuke()
}
if _, ok := kbCtx.LocalDb.GetEngine().(*libkb.MemDb); ok {
fmt.Printf("Nuking in memory local db\n")
kbCtx.LocalDb.Nuke()
}
debug.FreeOSMemory()
fmt.Printf("Done flushing global caches\n")
runtime.ReadMemStats(&m)
fmt.Printf("mem stats (after flush): alloc: %v sys: %v\n", m.Alloc, m.Sys)
} | case chat1.ChatActivityType_FAILED_MESSAGE:
err := errors.New("message failed")
recs := activity.FailedMessage().OutboxRecords
for _, r := range recs { |
version.go | package mysqldialect
// Version is the current release version.
func Version() string | {
return "1.0.22"
} |
|
mod.rs | // The informal schema of our data in redis:
// send_routes_to set used for CCP routing
// receive_routes_from set used for CCP routing
// next_account_id string unique ID for each new account
// rates:current hash exchange rates
// routes:current hash dynamic routing table
// routes:static hash static routing table
// accounts:<id> hash information for each account
// accounts set
// usernames hash
// btp_outgoing
// For interactive exploration of the store,
// use the redis-cli tool included with your redis install.
// Within redis-cli:
// keys * list all keys of any type in the store
// smembers <key> list the members of a set
// get <key> get the value of a key
// hgetall <key> the flattened list of every key/value entry within a hash
mod reconnect;
use reconnect::RedisReconnect;
use super::account::{Account, AccountWithEncryptedTokens};
use super::crypto::{encrypt_token, generate_keys, DecryptionKey, EncryptionKey};
use async_trait::async_trait;
use bytes::{Bytes, BytesMut};
use futures::channel::mpsc::UnboundedSender;
use http::StatusCode;
use interledger_api::{AccountDetails, AccountSettings, EncryptedAccountSettings, NodeStore};
use interledger_btp::BtpStore;
use interledger_ccp::{CcpRoutingAccount, CcpRoutingStore, RoutingRelation};
use interledger_errors::*;
use interledger_http::HttpStore;
use interledger_packet::Address;
use interledger_rates::ExchangeRateStore;
use interledger_router::RouterStore;
use interledger_service::{Account as AccountTrait, AccountStore, AddressStore, Username};
use interledger_service_util::{
BalanceStore, RateLimitError, RateLimitStore, DEFAULT_ROUND_TRIP_TIME,
};
use interledger_settlement::core::{
idempotency::{IdempotentData, IdempotentStore},
scale_with_precision_loss,
types::{Convert, ConvertDetails, LeftoversStore, SettlementStore},
};
use interledger_stream::{PaymentNotification, StreamNotificationsStore};
use num_bigint::BigUint;
use once_cell::sync::Lazy;
use parking_lot::{Mutex, RwLock};
use redis_crate::AsyncCommands;
use redis_crate::{
self, cmd, from_redis_value, Client, ConnectionInfo, ControlFlow, ErrorKind, FromRedisValue,
PubSubCommands, RedisError, RedisWrite, Script, ToRedisArgs, Value,
};
use secrecy::{ExposeSecret, Secret, SecretBytesMut};
use serde::{Deserialize, Serialize};
use std::{borrow::Cow, iter, str, str::FromStr, sync::Arc, time::Duration};
use std::{collections::HashMap, fmt::Display};
use tokio::sync::broadcast;
use tracing::{debug, error, trace, warn};
use url::Url;
use uuid::Uuid;
use zeroize::Zeroize;
const DEFAULT_POLL_INTERVAL: u64 = 30000; // 30 seconds
const ACCOUNT_DETAILS_FIELDS: usize = 21;
const DEFAULT_DB_PREFIX: &str = "";
static PARENT_ILP_KEY: &str = "parent_node_account_address";
static ROUTES_KEY: &str = "routes:current";
static STATIC_ROUTES_KEY: &str = "routes:static";
static DEFAULT_ROUTE_KEY: &str = "routes:default";
static STREAM_NOTIFICATIONS_PREFIX: &str = "stream_notifications:";
static SETTLEMENT_ENGINES_KEY: &str = "settlement_engines";
static USERNAMES_KEY: &str = "usernames";
static ACCOUNTS_KEY: &str = "accounts";
static SEND_ROUTES_KEY: &str = "send_routes_to";
static RECEIVE_ROUTES_FROM_KEY: &str = "receive_routes_from";
static BPT_OUTGOING: &str = "btp_outgoing";
/// Domain separator for leftover amounts
fn uncredited_amount_key(prefix: &str, account_id: impl ToString) -> String {
prefixed_key(
prefix,
&format!("uncredited-amount:{}", account_id.to_string()),
)
.into_owned()
}
/// Domain separator for idempotency keys
fn prefixed_idempotency_key(prefix: &str, idempotency_key: &str) -> String {
prefixed_key(
prefix,
format!("idempotency-key:{}", idempotency_key).as_str(),
)
.into_owned()
}
fn prefixed_key<'a>(prefix: &str, key: &'a str) -> Cow<'a, str> {
if prefix.is_empty() {
Cow::Borrowed(key)
} else {
Cow::Owned(format!("{}:{}", prefix, key))
}
}
/// Domain separator for accounts
fn accounts_key(prefix: &str, account_id: Uuid) -> String {
prefixed_key(prefix, &format!("accounts:{}", account_id)).into_owned()
}
// TODO: Add descriptive errors inside the lua scripts!
// The following are Lua scripts that are used to atomically execute the given logic
// inside Redis. This allows for more complex logic without needing multiple round
// trips for messages to be sent to and from Redis, as well as locks to ensure no other
// process is accessing Redis at the same time.
// For more information on scripting in Redis, see https://redis.io/commands/eval
/// The node's default ILP Address
static DEFAULT_ILP_ADDRESS: Lazy<Address> = Lazy::new(|| Address::from_str("local.host").unwrap());
/// This lua script fetches an account associated with a username. The client
/// MUST ensure that the returned account is authenticated.
static ACCOUNT_FROM_USERNAME: Lazy<Script> =
Lazy::new(|| Script::new(include_str!("lua/account_from_username.lua")));
/// Lua script which loads a list of accounts
/// If an account does not have a settlement_engine_url set
/// but there is one configured for that account's currency,
/// it will use the globally configured url
static LOAD_ACCOUNTS: Lazy<Script> =
Lazy::new(|| Script::new(include_str!("lua/load_accounts.lua")));
/// Lua script which reduces the provided account's balance before sending a Prepare packet
static PROCESS_PREPARE: Lazy<Script> =
Lazy::new(|| Script::new(include_str!("lua/process_prepare.lua")));
/// Lua script which increases the provided account's balance after receiving a Fulfill packet
static PROCESS_FULFILL: Lazy<Script> =
Lazy::new(|| Script::new(include_str!("lua/process_fulfill.lua")));
/// Lua script which increases the provided account's balance after receiving a Reject packet
static PROCESS_REJECT: Lazy<Script> =
Lazy::new(|| Script::new(include_str!("lua/process_reject.lua")));
static PROCESS_DELAYED_SETTLEMENT: Lazy<Script> =
Lazy::new(|| Script::new(include_str!("lua/process_settle.lua")));
/// Lua script which increases the provided account's balance after a settlement attempt failed
static REFUND_SETTLEMENT: Lazy<Script> =
Lazy::new(|| Script::new(include_str!("lua/refund_settlement.lua")));
/// Lua script which increases the provided account's balance after an incoming settlement succeeded
static PROCESS_INCOMING_SETTLEMENT: Lazy<Script> =
Lazy::new(|| Script::new(include_str!("lua/process_incoming_settlement.lua")));
/// Builder for the Redis Store
pub struct RedisStoreBuilder {
redis_url: ConnectionInfo,
secret: [u8; 32],
poll_interval: u64,
/// Connector's ILP Address. Used to insert `Child` accounts as
node_ilp_address: Address,
db_prefix: String,
}
impl RedisStoreBuilder {
/// Simple Constructor
pub fn new(redis_url: ConnectionInfo, secret: [u8; 32]) -> Self {
RedisStoreBuilder {
redis_url,
secret,
poll_interval: DEFAULT_POLL_INTERVAL,
node_ilp_address: DEFAULT_ILP_ADDRESS.clone(),
db_prefix: DEFAULT_DB_PREFIX.to_string(),
}
}
/// Sets the ILP Address corresponding to the node
pub fn node_ilp_address(&mut self, node_ilp_address: Address) -> &mut Self {
self.node_ilp_address = node_ilp_address;
self
}
/// Sets the poll interval at which the store will update its routes
pub fn poll_interval(&mut self, poll_interval: u64) -> &mut Self {
self.poll_interval = poll_interval;
self
}
/// Sets the redis db prefix that will be used for top level keys for this node
/// It can be used if there is a need for the same redis db to be shared by multiple nodes
pub fn with_db_prefix(&mut self, prefix: &str) -> &mut Self {
self.db_prefix = prefix.to_string();
self
}
/// Connects to the Redis Store
///
/// Specifically
/// 1. Generates encryption and decryption keys
/// 1. Connects to the redis store (ensuring that it reconnects in case of drop)
/// 1. Gets the Node address assigned to us by our parent (if it exists)
/// 1. Starts polling for routing table updates
/// 1. Spawns a thread to notify incoming payments over WebSockets
pub async fn connect(&mut self) -> Result<RedisStore, ()> {
let redis_info = self.redis_url.clone();
let (encryption_key, decryption_key) = generate_keys(&self.secret[..]);
self.secret.zeroize(); // clear the secret after it has been used for key generation
let poll_interval = self.poll_interval;
let ilp_address = self.node_ilp_address.clone();
let client = Client::open(redis_info.clone())
.map_err(|err| error!("Error creating subscription Redis client: {:?}", err))?;
debug!("Connected subscription client to redis: {:?}", client);
let mut connection = RedisReconnect::connect(redis_info.clone())
.map_err(|_| ())
.await?;
let mut sub_connection = client
.get_connection()
.map_err(|err| error!("Error connecting subscription client to Redis: {:?}", err))?;
// Before initializing the store, check if we have an address
// that was configured due to adding a parent. If no parent was
// found, use the builder's provided address (local.host) or the
// one we decided to override it with
let address: Option<String> = connection
.get(&*prefixed_key(&self.db_prefix, PARENT_ILP_KEY))
.map_err(|err| {
error!(
"Error checking whether we have a parent configured: {:?}",
err
)
})
.await?;
let node_ilp_address = if let Some(address) = address {
Address::from_str(&address).unwrap()
} else {
ilp_address
};
let (all_payment_publisher, _) = broadcast::channel::<PaymentNotification>(256);
let store = RedisStore {
ilp_address: Arc::new(RwLock::new(node_ilp_address)),
connection,
subscriptions: Arc::new(Mutex::new(HashMap::new())),
payment_publisher: all_payment_publisher,
exchange_rates: Arc::new(RwLock::new(HashMap::new())),
routes: Arc::new(RwLock::new(Arc::new(HashMap::new()))),
encryption_key: Arc::new(encryption_key),
decryption_key: Arc::new(decryption_key),
db_prefix: self.db_prefix.clone(),
};
// Poll for routing table updates
// Note: if this behavior changes, make sure to update the Drop implementation
let connection_clone = Arc::downgrade(&store.connection.conn);
let redis_info = store.connection.redis_info.clone();
let routing_table = store.routes.clone();
let db_prefix = self.db_prefix.clone();
let poll_routes = async move {
let mut interval = tokio::time::interval(Duration::from_millis(poll_interval));
// Irrefutable while pattern, can we do something here?
loop {
interval.tick().await;
if let Some(conn) = connection_clone.upgrade() {
let _ = update_routes(
RedisReconnect {
conn,
redis_info: redis_info.clone(),
},
routing_table.clone(),
&db_prefix,
)
.map_err(|err| error!("{}", err))
.await;
} else {
debug!("Not polling routes anymore because connection was closed");
break;
}
}
Ok::<(), ()>(())
};
tokio::spawn(poll_routes);
// Here we spawn a worker thread to listen for incoming messages on Redis pub/sub,
// running a callback for each message received.
// This currently must be a thread rather than a task due to the redis-rs driver
// not yet supporting asynchronous subscriptions (see https://github.com/mitsuhiko/redis-rs/issues/183).
let subscriptions_clone = store.subscriptions.clone();
let payment_publisher = store.payment_publisher.clone();
let db_prefix = prefixed_key(&self.db_prefix, STREAM_NOTIFICATIONS_PREFIX).into_owned();
std::thread::spawn(move || {
#[allow(clippy::cognitive_complexity)]
let sub_status =
sub_connection.psubscribe::<_, _, Vec<String>>(&["*"], move |msg| {
let channel_name = msg.get_channel_name();
if let Some(suffix) = channel_name.strip_prefix(&db_prefix) {
if let Ok(account_id) = Uuid::from_str(&suffix) {
let message: PaymentNotification = match serde_json::from_slice(msg.get_payload_bytes()) {
Ok(s) => s,
Err(e) => {
error!("Failed to get payload from subscription: {}", e);
return ControlFlow::Continue;
}
};
trace!("Subscribed message received for account {}: {:?}", account_id, message);
if payment_publisher.receiver_count() > 0 {
if let Err(err) = payment_publisher.send(message.clone()) {
error!("Failed to send a node-wide payment notification: {:?}", err);
}
}
match subscriptions_clone.lock().get_mut(&account_id) {
Some(senders) => {
senders.retain(|sender| {
if let Err(err) = sender.unbounded_send(message.clone()) {
debug!("Failed to send message: {}", err);
false
} else {
true
}
});
},
None => trace!("Ignoring message for account {} because there were no open subscriptions", account_id),
}
} else {
error!("Invalid Uuid in channel name: {}", channel_name);
}
} else {
warn!("Ignoring unexpected message from Redis subscription for channel: {}", channel_name);
}
ControlFlow::Continue
});
match sub_status {
Err(e) => warn!("Could not issue psubscribe to Redis: {}", e),
Ok(_) => debug!("Successfully subscribed to Redis pubsub"),
}
});
Ok(store)
}
}
/// A Store that uses Redis as its underlying database.
///
/// This store leverages atomic Redis transactions to do operations such as balance updates.
///
/// Currently the RedisStore polls the database for the routing table and rate updates, but
/// future versions of it will use PubSub to subscribe to updates.
#[derive(Clone)]
pub struct RedisStore {
/// The Store's ILP Address
ilp_address: Arc<RwLock<Address>>,
/// A connection which reconnects if dropped by accident
connection: RedisReconnect,
/// WebSocket senders which publish incoming payment updates
subscriptions: Arc<Mutex<HashMap<Uuid, Vec<UnboundedSender<PaymentNotification>>>>>,
/// A subscriber to all payment notifications, exposed via a WebSocket
payment_publisher: broadcast::Sender<PaymentNotification>,
exchange_rates: Arc<RwLock<HashMap<String, f64>>>,
/// The store keeps the routing table in memory so that it can be returned
/// synchronously while the Router is processing packets.
/// The outer `Arc<RwLock>` is used so that we can update the stored routing
/// table after polling the store for updates.
/// The inner `Arc<HashMap>` is used so that the `routing_table` method can
/// return a reference to the routing table without cloning the underlying data.
routes: Arc<RwLock<Arc<HashMap<String, Uuid>>>>,
/// Encryption Key so that the no cleartext data are stored
encryption_key: Arc<Secret<EncryptionKey>>,
/// Decryption Key to provide cleartext data to users
decryption_key: Arc<Secret<DecryptionKey>>,
/// Prefix for all top level keys. This enables multiple nodes to use the same db instance.
db_prefix: String,
}
impl RedisStore {
/// Gets all the account ids from Redis
async fn get_all_accounts_ids(&self) -> Result<Vec<Uuid>, NodeStoreError> {
let mut connection = self.connection.clone();
let account_ids: Vec<RedisAccountId> = connection
.smembers(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.await?;
Ok(account_ids.iter().map(|rid| rid.0).collect())
}
/// Inserts the account corresponding to the provided `AccountWithEncryptedtokens`
/// in Redis. Returns the provided account (tokens remain encrypted)
async fn redis_insert_account(
&self,
encrypted: &AccountWithEncryptedTokens,
) -> Result<(), NodeStoreError> {
let account = &encrypted.account;
let id = accounts_key(&self.db_prefix, account.id);
let mut connection = self.connection.clone();
let routing_table = self.routes.clone();
// Check that there isn't already an account with values that MUST be unique
let mut pipe = redis_crate::pipe();
pipe.exists(&id);
pipe.hexists(
&*prefixed_key(&self.db_prefix, USERNAMES_KEY),
account.username().as_ref(),
);
if account.routing_relation == RoutingRelation::Parent {
pipe.exists(&*prefixed_key(&self.db_prefix, PARENT_ILP_KEY));
}
let results: Vec<bool> = pipe.query_async(&mut connection).await?;
if results.iter().any(|val| *val) {
warn!(
"An account already exists with the same {}. Cannot insert account: {:?}",
account.id, account
);
return Err(NodeStoreError::AccountExists(account.username.to_string()));
}
let mut pipe = redis_crate::pipe();
pipe.atomic();
// Add the account key to the list of accounts
pipe.sadd(
&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY),
RedisAccountId(account.id),
)
.ignore();
// Save map for Username -> Account ID
pipe.hset(
&*prefixed_key(&self.db_prefix, USERNAMES_KEY),
account.username().as_ref(),
RedisAccountId(account.id),
)
.ignore();
// Set balance-related details
pipe.hset_multiple(&id, &[("balance", 0), ("prepaid_amount", 0)])
.ignore();
if account.should_send_routes() {
pipe.sadd(
&*prefixed_key(&self.db_prefix, SEND_ROUTES_KEY),
RedisAccountId(account.id),
)
.ignore();
}
if account.should_receive_routes() {
pipe.sadd(
&*prefixed_key(&self.db_prefix, RECEIVE_ROUTES_FROM_KEY),
RedisAccountId(account.id),
)
.ignore();
}
if account.ilp_over_btp_url.is_some() {
pipe.sadd(
&*prefixed_key(&self.db_prefix, BPT_OUTGOING),
RedisAccountId(account.id),
)
.ignore();
}
// Add route to routing table
pipe.hset(
&*prefixed_key(&self.db_prefix, ROUTES_KEY),
account.ilp_address.as_bytes(),
RedisAccountId(account.id),
)
.ignore();
// Set account details
pipe.cmd("HMSET").arg(&id).arg(encrypted).ignore();
// The parent account settings are done via the API. We just
// had to check for the existence of a parent
pipe.query_async(&mut connection).await?;
update_routes(connection, routing_table, &self.db_prefix).await?;
debug!(
"Inserted account {} (ILP address: {})",
account.id, account.ilp_address
);
Ok(())
}
/// Overwrites the account corresponding to the provided `AccountWithEncryptedtokens`
/// in Redis. Returns the provided account (tokens remain encrypted)
async fn redis_update_account(
&self,
encrypted: &AccountWithEncryptedTokens,
) -> Result<(), NodeStoreError> {
let account = encrypted.account.clone();
let mut connection = self.connection.clone();
let routing_table = self.routes.clone();
// Check to make sure an account with this ID already exists
// TODO this needs to be atomic with the insertions later,
// waiting on #186
// TODO: Do not allow this update to happen if
// AccountDetails.RoutingRelation == Parent and parent is
// already set
let exists: bool = connection
.exists(accounts_key(&self.db_prefix, account.id))
.await?;
if !exists {
warn!(
"No account exists with ID {}, cannot update account {:?}",
account.id, account
);
return Err(NodeStoreError::AccountNotFound(account.id.to_string()));
}
let mut pipe = redis_crate::pipe();
pipe.atomic();
// Add the account key to the list of accounts
pipe.sadd(
&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY),
RedisAccountId(account.id),
)
.ignore();
// Set account details
pipe.cmd("HMSET")
.arg(accounts_key(&self.db_prefix, account.id))
.arg(encrypted)
.ignore();
if account.should_send_routes() {
pipe.sadd(
&*prefixed_key(&self.db_prefix, SEND_ROUTES_KEY),
RedisAccountId(account.id),
)
.ignore();
}
if account.should_receive_routes() {
pipe.sadd(
&*prefixed_key(&self.db_prefix, RECEIVE_ROUTES_FROM_KEY),
RedisAccountId(account.id),
)
.ignore();
}
if account.ilp_over_btp_url.is_some() {
pipe.sadd(
&*prefixed_key(&self.db_prefix, BPT_OUTGOING),
RedisAccountId(account.id),
)
.ignore();
}
// Add route to routing table
pipe.hset(
&*prefixed_key(&self.db_prefix, ROUTES_KEY),
account.ilp_address.to_bytes().to_vec(),
RedisAccountId(account.id),
)
.ignore();
pipe.query_async(&mut connection).await?;
update_routes(connection, routing_table, &self.db_prefix).await?;
debug!(
"Inserted account {} (id: {}, ILP address: {})",
account.username, account.id, account.ilp_address
);
Ok(())
}
/// Modifies the account corresponding to the provided `id` with the provided `settings`
/// in Redis. Returns the modified account (tokens remain encrypted)
async fn redis_modify_account(
&self,
id: Uuid,
settings: EncryptedAccountSettings,
) -> Result<AccountWithEncryptedTokens, NodeStoreError> {
let mut pipe = redis_crate::pipe();
pipe.atomic();
let accounts_key = accounts_key(&self.db_prefix, id);
if let Some(ref endpoint) = settings.ilp_over_btp_url {
pipe.hset(&accounts_key, "ilp_over_btp_url", endpoint);
}
if let Some(ref endpoint) = settings.ilp_over_http_url {
pipe.hset(&accounts_key, "ilp_over_http_url", endpoint);
}
if let Some(ref token) = settings.ilp_over_btp_outgoing_token {
pipe.hset(&accounts_key, "ilp_over_btp_outgoing_token", token.as_ref());
}
if let Some(ref token) = settings.ilp_over_http_outgoing_token {
pipe.hset(
&accounts_key,
"ilp_over_http_outgoing_token",
token.as_ref(),
);
}
if let Some(ref token) = settings.ilp_over_btp_incoming_token {
pipe.hset(&accounts_key, "ilp_over_btp_incoming_token", token.as_ref());
}
if let Some(ref token) = settings.ilp_over_http_incoming_token {
pipe.hset(
&accounts_key,
"ilp_over_http_incoming_token",
token.as_ref(),
);
}
if let Some(settle_threshold) = settings.settle_threshold {
pipe.hset(&accounts_key, "settle_threshold", settle_threshold);
}
if let Some(settle_to) = settings.settle_to {
if settle_to > std::i64::MAX as u64 {
// Redis cannot handle values greater than i64::MAX (other stores maybe can though)
return Err(NodeStoreError::InvalidAccount(
CreateAccountError::ParamTooLarge("settle_to".to_owned()),
));
}
pipe.hset(&accounts_key, "settle_to", settle_to);
}
pipe.query_async(&mut self.connection.clone()).await?;
// return the updated account
self.redis_get_account(id).await
}
/// Gets the account (tokens remain encrypted) corresponding to the provided `id` from Redis.
async fn redis_get_account(
&self,
id: Uuid,
) -> Result<AccountWithEncryptedTokens, NodeStoreError> {
let mut accounts: Vec<AccountWithEncryptedTokens> = LOAD_ACCOUNTS
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(&*prefixed_key(&self.db_prefix, SETTLEMENT_ENGINES_KEY))
.arg(RedisAccountId(id))
.invoke_async(&mut self.connection.clone())
.await?;
accounts
.pop()
.ok_or_else(|| NodeStoreError::AccountNotFound(id.to_string()))
}
/// Deletes the account corresponding to the provided `id` from Redis.
/// Returns the deleted account (tokens remain encrypted)
async fn redis_delete_account(
&self,
id: Uuid,
) -> Result<AccountWithEncryptedTokens, NodeStoreError> {
let encrypted = self.redis_get_account(id).await?;
let account = &encrypted.account;
let mut pipe = redis_crate::pipe();
pipe.atomic();
pipe.srem(
&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY),
RedisAccountId(account.id),
)
.ignore();
pipe.del(&*accounts_key(&self.db_prefix, account.id))
.ignore();
pipe.hdel(
&*prefixed_key(&self.db_prefix, USERNAMES_KEY),
account.username().as_ref(),
)
.ignore();
if account.should_send_routes() {
pipe.srem(
&*prefixed_key(&self.db_prefix, SEND_ROUTES_KEY),
RedisAccountId(account.id),
)
.ignore();
}
if account.should_receive_routes() {
pipe.srem(
&*prefixed_key(&self.db_prefix, RECEIVE_ROUTES_FROM_KEY),
RedisAccountId(account.id),
)
.ignore();
}
if account.ilp_over_btp_url.is_some() {
pipe.srem(
&*prefixed_key(&self.db_prefix, BPT_OUTGOING),
RedisAccountId(account.id),
)
.ignore();
}
pipe.hdel(
&*prefixed_key(&self.db_prefix, ROUTES_KEY),
account.ilp_address.to_bytes().to_vec(),
)
.ignore();
pipe.del(uncredited_amount_key(&self.db_prefix, id));
let mut connection = self.connection.clone();
pipe.query_async(&mut connection).await?;
update_routes(connection, self.routes.clone(), &self.db_prefix).await?;
debug!("Deleted account {}", account.id);
Ok(encrypted)
}
}
#[async_trait]
impl AccountStore for RedisStore {
type Account = Account;
// TODO cache results to avoid hitting Redis for each packet
async fn get_accounts(
&self,
account_ids: Vec<Uuid>,
) -> Result<Vec<Account>, AccountStoreError> {
let num_accounts = account_ids.len();
let mut script = LOAD_ACCOUNTS.prepare_invoke();
script.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY));
script.arg(&*prefixed_key(&self.db_prefix, SETTLEMENT_ENGINES_KEY));
for id in account_ids.iter() {
script.arg(id.to_string());
}
// Need to clone the connection here to avoid lifetime errors
let accounts: Vec<AccountWithEncryptedTokens> =
script.invoke_async(&mut self.connection.clone()).await?;
// Decrypt the accounts. TODO: This functionality should be
// decoupled from redis so that it gets reused by the other backends
if accounts.len() == num_accounts {
let accounts = accounts
.into_iter()
.map(|account| account.decrypt_tokens(&self.decryption_key.expose_secret().0))
.collect();
Ok(accounts)
} else {
Err(AccountStoreError::WrongLength {
expected: num_accounts,
actual: accounts.len(),
})
}
}
async fn get_account_id_from_username(
&self,
username: &Username,
) -> Result<Uuid, AccountStoreError> {
let username = username.clone();
let id: Option<RedisAccountId> = self
.connection
.clone()
.hget(
&*prefixed_key(&self.db_prefix, USERNAMES_KEY),
username.as_ref(),
)
.await?;
match id {
Some(rid) => Ok(rid.0),
None => {
debug!("Username not found: {}", username);
Err(AccountStoreError::AccountNotFound(username.to_string()))
}
}
}
}
impl StreamNotificationsStore for RedisStore {
type Account = Account;
fn add_payment_notification_subscription(
&self,
id: Uuid,
sender: UnboundedSender<PaymentNotification>,
) {
trace!("Added payment notification listener for {}", id);
self.subscriptions
.lock()
.entry(id)
.or_insert_with(Vec::new)
.push(sender);
}
fn publish_payment_notification(&self, payment: PaymentNotification) {
let username = payment.to_username.clone();
let message = serde_json::to_string(&payment).unwrap();
let mut connection = self.connection.clone();
let self_clone = self.clone();
tokio::spawn(async move {
let account_id = self_clone
.get_account_id_from_username(&username)
.map_err(|_| {
error!(
"Failed to find account ID corresponding to username: {}",
username
)
})
.await?;
debug!(
"Publishing payment notification {} for account {}",
message, account_id
);
// https://github.com/rust-lang/rust/issues/64960#issuecomment-544219926
let published_args = format!(
"{}{}",
prefixed_key(&self_clone.db_prefix, STREAM_NOTIFICATIONS_PREFIX),
account_id.clone()
);
redis_crate::cmd("PUBLISH")
.arg(published_args)
.arg(message)
.query_async(&mut connection)
.map_err(move |err| error!("Error publish message to Redis: {:?}", err))
.await?;
Ok::<(), ()>(())
});
}
fn all_payment_subscription(&self) -> broadcast::Receiver<PaymentNotification> {
self.payment_publisher.subscribe()
}
}
#[async_trait]
impl BalanceStore for RedisStore {
/// Returns the balance **from the account holder's perspective**, meaning the sum of
/// the Payable Balance and Pending Outgoing minus the Receivable Balance and the Pending Incoming.
async fn get_balance(&self, account_id: Uuid) -> Result<i64, BalanceStoreError> {
let values: Vec<i64> = self
.connection
.clone()
.hget(
accounts_key(&self.db_prefix, account_id),
&["balance", "prepaid_amount"],
)
.await?;
let balance = values[0];
let prepaid_amount = values[1];
Ok(balance + prepaid_amount)
}
async fn update_balances_for_prepare(
&self,
from_account_id: Uuid,
incoming_amount: u64,
) -> Result<(), BalanceStoreError> {
// Don't do anything if the amount was 0
if incoming_amount == 0 {
return Ok(());
}
let balance: i64 = PROCESS_PREPARE
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(RedisAccountId(from_account_id))
.arg(incoming_amount)
.invoke_async(&mut self.connection.clone())
.await?;
trace!(
"Processed prepare with incoming amount: {}. Account {} has balance (including prepaid amount): {} ",
incoming_amount, from_account_id, balance
);
Ok(())
}
async fn update_balances_for_fulfill(
&self,
to_account_id: Uuid,
outgoing_amount: u64,
) -> Result<(i64, u64), BalanceStoreError> {
let (balance, amount_to_settle): (i64, u64) = PROCESS_FULFILL
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(RedisAccountId(to_account_id))
.arg(outgoing_amount)
.invoke_async(&mut self.connection.clone())
.await?;
trace!(
"Processed fulfill for account {} for outgoing amount {}. Fulfill call result: {} {}",
to_account_id,
outgoing_amount,
balance,
amount_to_settle,
);
Ok((balance, amount_to_settle))
}
async fn update_balances_for_reject(
&self,
from_account_id: Uuid,
incoming_amount: u64,
) -> Result<(), BalanceStoreError> {
if incoming_amount == 0 {
return Ok(());
}
let balance: i64 = PROCESS_REJECT
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(RedisAccountId(from_account_id))
.arg(incoming_amount)
.invoke_async(&mut self.connection.clone())
.await?;
trace!(
"Processed reject for incoming amount: {}. Account {} has balance (including prepaid amount): {}",
incoming_amount, from_account_id, balance
);
Ok(())
}
async fn update_balances_for_delayed_settlement(
&self,
to_account_id: Uuid,
) -> Result<(i64, u64), BalanceStoreError> {
let (balance, amount_to_settle): (i64, u64) = PROCESS_DELAYED_SETTLEMENT
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(RedisAccountId(to_account_id))
.invoke_async(&mut self.connection.clone())
.await?;
trace!(
"Processed account {} for delayed settlement, balance: {}, to_settle: {}",
to_account_id,
balance,
amount_to_settle
);
Ok((balance, amount_to_settle))
}
}
impl ExchangeRateStore for RedisStore {
fn get_exchange_rates(&self, asset_codes: &[&str]) -> Result<Vec<f64>, ExchangeRateStoreError> {
let rates: Vec<f64> = asset_codes
.iter()
.filter_map(|code| (*self.exchange_rates.read()).get(*code).cloned())
.collect();
if rates.len() == asset_codes.len() {
Ok(rates)
} else {
// todo add error type
Err(ExchangeRateStoreError::PairNotFound {
from: asset_codes[0].to_string(),
to: asset_codes[1].to_string(),
})
}
}
fn get_all_exchange_rates(&self) -> Result<HashMap<String, f64>, ExchangeRateStoreError> {
Ok((*self.exchange_rates.read()).clone())
}
fn set_exchange_rates(
&self,
rates: HashMap<String, f64>,
) -> Result<(), ExchangeRateStoreError> {
// TODO publish rate updates through a pubsub mechanism to support horizontally scaling nodes
(*self.exchange_rates.write()) = rates;
Ok(())
}
}
#[async_trait]
impl BtpStore for RedisStore {
type Account = Account;
async fn get_account_from_btp_auth(
&self,
username: &Username,
token: &str,
) -> Result<Self::Account, BtpStoreError> {
// TODO make sure it can't do script injection!
// TODO cache the result so we don't hit redis for every packet (is that
// necessary if redis is often used as a cache?)
let account: Option<AccountWithEncryptedTokens> = ACCOUNT_FROM_USERNAME
.arg(&*prefixed_key(&self.db_prefix, USERNAMES_KEY))
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(username.as_ref())
.invoke_async(&mut self.connection.clone())
.await?;
if let Some(account) = account {
let account = account.decrypt_tokens(&self.decryption_key.expose_secret().0);
if let Some(ref t) = account.ilp_over_btp_incoming_token {
let t = t.expose_secret();
if t.as_ref() == token.as_bytes() {
Ok(account)
} else {
debug!(
"Found account {} but BTP auth token was wrong",
account.username
);
Err(BtpStoreError::Unauthorized(username.to_string()))
}
} else {
debug!(
"Account {} does not have an incoming btp token configured",
account.username
);
Err(BtpStoreError::Unauthorized(username.to_string()))
}
} else {
warn!("No account found with BTP token");
Err(BtpStoreError::AccountNotFound(username.to_string()))
}
}
async fn get_btp_outgoing_accounts(&self) -> Result<Vec<Self::Account>, BtpStoreError> {
let account_ids: Vec<RedisAccountId> = self
.connection
.clone()
.smembers(&*prefixed_key(&self.db_prefix, BPT_OUTGOING))
.await?;
let account_ids: Vec<Uuid> = account_ids.into_iter().map(|id| id.0).collect();
if account_ids.is_empty() {
return Ok(Vec::new());
}
let accounts = self.get_accounts(account_ids).await?;
Ok(accounts)
}
}
#[async_trait]
impl HttpStore for RedisStore {
type Account = Account;
/// Checks if the stored token for the provided account id matches the
/// provided token, and if so, returns the account associated with that token
async fn get_account_from_http_auth(
&self,
username: &Username,
token: &str,
) -> Result<Self::Account, HttpStoreError> {
// TODO make sure it can't do script injection!
let account: Option<AccountWithEncryptedTokens> = ACCOUNT_FROM_USERNAME
.arg(&*prefixed_key(&self.db_prefix, USERNAMES_KEY))
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(username.as_ref())
.invoke_async(&mut self.connection.clone())
.await?;
if let Some(account) = account {
let account = account.decrypt_tokens(&self.decryption_key.expose_secret().0);
if let Some(ref t) = account.ilp_over_http_incoming_token {
let t = t.expose_secret();
if t.as_ref() == token.as_bytes() {
Ok(account)
} else {
Err(HttpStoreError::Unauthorized(username.to_string()))
}
} else {
Err(HttpStoreError::Unauthorized(username.to_string()))
}
} else {
warn!("No account found with given HTTP auth");
Err(HttpStoreError::AccountNotFound(username.to_string()))
}
}
}
impl RouterStore for RedisStore {
fn routing_table(&self) -> Arc<HashMap<String, Uuid>> {
self.routes.read().clone()
}
}
#[async_trait]
impl NodeStore for RedisStore {
type Account = Account;
async fn insert_account(
&self,
account: AccountDetails,
) -> Result<Self::Account, NodeStoreError> {
let id = Uuid::new_v4();
let account = Account::try_from(id, account, self.get_ilp_address())
.map_err(NodeStoreError::InvalidAccount)?;
debug!(
"Generated account id for {}: {}",
account.username, account.id
);
let encrypted = account
.clone()
.encrypt_tokens(&self.encryption_key.expose_secret().0);
self.redis_insert_account(&encrypted).await?;
Ok(account)
}
async fn delete_account(&self, id: Uuid) -> Result<Account, NodeStoreError> {
let account = self.redis_delete_account(id).await?;
Ok(account.decrypt_tokens(&self.decryption_key.expose_secret().0))
}
async fn update_account(
&self,
id: Uuid,
account: AccountDetails,
) -> Result<Self::Account, NodeStoreError> {
let account = Account::try_from(id, account, self.get_ilp_address())
.map_err(NodeStoreError::InvalidAccount)?;
debug!(
"Generated account id for {}: {}",
account.username, account.id
);
let encrypted = account
.clone()
.encrypt_tokens(&self.encryption_key.expose_secret().0);
self.redis_update_account(&encrypted).await?;
Ok(account)
}
async fn modify_account_settings(
&self,
id: Uuid,
settings: AccountSettings,
) -> Result<Self::Account, NodeStoreError> {
let settings = EncryptedAccountSettings {
settle_to: settings.settle_to,
settle_threshold: settings.settle_threshold,
ilp_over_btp_url: settings.ilp_over_btp_url,
ilp_over_http_url: settings.ilp_over_http_url,
ilp_over_btp_incoming_token: settings.ilp_over_btp_incoming_token.map(|token| {
encrypt_token(
&self.encryption_key.expose_secret().0,
token.expose_secret().as_bytes(),
)
.freeze()
}),
ilp_over_http_incoming_token: settings.ilp_over_http_incoming_token.map(|token| {
encrypt_token(
&self.encryption_key.expose_secret().0,
token.expose_secret().as_bytes(),
)
.freeze()
}),
ilp_over_btp_outgoing_token: settings.ilp_over_btp_outgoing_token.map(|token| {
encrypt_token(
&self.encryption_key.expose_secret().0,
token.expose_secret().as_bytes(),
)
.freeze()
}),
ilp_over_http_outgoing_token: settings.ilp_over_http_outgoing_token.map(|token| {
encrypt_token(
&self.encryption_key.expose_secret().0,
token.expose_secret().as_bytes(),
)
.freeze()
}),
};
let account = self.redis_modify_account(id, settings).await?;
Ok(account.decrypt_tokens(&self.decryption_key.expose_secret().0))
}
// TODO limit the number of results and page through them
async fn get_all_accounts(&self) -> Result<Vec<Self::Account>, NodeStoreError> {
let mut connection = self.connection.clone();
let account_ids = self.get_all_accounts_ids().await?;
let mut script = LOAD_ACCOUNTS.prepare_invoke();
script.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY));
script.arg(&*prefixed_key(&self.db_prefix, SETTLEMENT_ENGINES_KEY));
for id in account_ids.iter() {
script.arg(id.to_string());
}
let accounts: Vec<AccountWithEncryptedTokens> =
script.invoke_async(&mut connection).await?;
// TODO this should be refactored so that it gets reused in multiple backends
let accounts: Vec<Account> = accounts
.into_iter()
.map(|account| account.decrypt_tokens(&self.decryption_key.expose_secret().0))
.collect();
Ok(accounts)
}
async fn set_static_routes<R>(&self, routes: R) -> Result<(), NodeStoreError>
where
R: IntoIterator<Item = (String, Uuid)> + Send + 'async_trait,
{
let mut connection = self.connection.clone();
let routes: Vec<(String, RedisAccountId)> = routes
.into_iter()
.map(|(s, id)| (s, RedisAccountId(id)))
.collect();
let accounts = routes.iter().map(|(_prefix, account_id)| account_id);
let mut pipe = redis_crate::pipe();
for account_id in accounts {
pipe.exists(accounts_key(&self.db_prefix, (*account_id).0));
}
let routing_table = self.routes.clone();
let accounts_exist: Vec<bool> = pipe.query_async(&mut connection).await?;
if !accounts_exist.iter().all(|a| *a) {
error!("Error setting static routes because not all of the given accounts exist");
// TODO add proper error variant for "not all accoutns were found"
return Err(NodeStoreError::MissingAccounts);
}
let mut pipe = redis_crate::pipe();
pipe.atomic()
.del(&*prefixed_key(&self.db_prefix, STATIC_ROUTES_KEY))
.ignore()
.hset_multiple(&*prefixed_key(&self.db_prefix, STATIC_ROUTES_KEY), &routes)
.ignore();
pipe.query_async(&mut connection).await?;
update_routes(connection, routing_table, &self.db_prefix).await?;
Ok(())
}
async fn set_static_route(
&self,
prefix: String,
account_id: Uuid,
) -> Result<(), NodeStoreError> {
let routing_table = self.routes.clone();
let mut connection = self.connection.clone();
let exists: bool = connection
.exists(accounts_key(&self.db_prefix, account_id))
.await?;
if !exists {
error!(
"Cannot set static route for prefix: {} because account {} does not exist",
prefix, account_id
);
return Err(NodeStoreError::AccountNotFound(account_id.to_string()));
}
connection
.hset(
&*prefixed_key(&self.db_prefix, STATIC_ROUTES_KEY),
prefix,
RedisAccountId(account_id),
)
.await?;
update_routes(connection, routing_table, &self.db_prefix).await?;
Ok(())
}
async fn set_default_route(&self, account_id: Uuid) -> Result<(), NodeStoreError> {
let routing_table = self.routes.clone();
// TODO replace this with a lua script to do both calls at once
let mut connection = self.connection.clone();
let exists: bool = connection
.exists(accounts_key(&self.db_prefix, account_id))
.await?;
if !exists {
error!(
"Cannot set default route because account {} does not exist",
account_id
);
return Err(NodeStoreError::AccountNotFound(account_id.to_string()));
}
connection
.set(
&*prefixed_key(&self.db_prefix, DEFAULT_ROUTE_KEY),
RedisAccountId(account_id),
)
.await?;
debug!("Set default route to account id: {}", account_id);
update_routes(connection, routing_table, &self.db_prefix).await?;
Ok(())
}
async fn set_settlement_engines(
&self,
asset_to_url_map: impl IntoIterator<Item = (String, Url)> + Send + 'async_trait,
) -> Result<(), NodeStoreError> {
let mut connection = self.connection.clone();
let asset_to_url_map: Vec<(String, String)> = asset_to_url_map
.into_iter()
.map(|(asset_code, url)| (asset_code, url.to_string()))
.collect();
debug!("Setting settlement engines to {:?}", asset_to_url_map);
connection
.hset_multiple(
&*prefixed_key(&self.db_prefix, SETTLEMENT_ENGINES_KEY),
&asset_to_url_map,
)
.await?;
Ok(())
}
async fn get_asset_settlement_engine(
&self,
asset_code: &str,
) -> Result<Option<Url>, NodeStoreError> {
let url: Option<String> = self
.connection
.clone()
.hget(
&*prefixed_key(&self.db_prefix, SETTLEMENT_ENGINES_KEY),
asset_code,
)
.await?;
if let Some(url) = url {
match Url::parse(url.as_str()) {
Ok(url) => Ok(Some(url)),
Err(err) => {
error!(
"Settlement engine URL loaded from Redis was not a valid URL: {:?}",
err
);
Err(NodeStoreError::InvalidEngineUrl(err.to_string()))
}
}
} else {
Ok(None)
}
}
}
#[async_trait]
impl AddressStore for RedisStore {
// Updates the ILP address of the store & iterates over all children and
// updates their ILP Address to match the new address.
async fn set_ilp_address(&self, ilp_address: Address) -> Result<(), AddressStoreError> {
debug!("Setting ILP address to: {}", ilp_address);
let routing_table = self.routes.clone();
let mut connection = self.connection.clone();
// Set the ILP address we have in memory
(*self.ilp_address.write()) = ilp_address.clone();
// Save it to Redis
connection
.set(
&*prefixed_key(&self.db_prefix, PARENT_ILP_KEY),
ilp_address.as_bytes(),
)
.await?;
let accounts = self.get_all_accounts().await?;
// TODO: This can be an expensive operation if this function
// gets called often. This currently only gets called when
// inserting a new parent account in the API. It'd be nice
// if we could generate a child's ILP address on the fly,
// instead of having to store the username appended to the
// node's ilp address. Currently this is not possible, as
// account.ilp_address() cannot access any state that exists
// on the store.
let first_segment = ilp_address
.segments()
.rev()
.next()
.expect("address did not have a first segment, this should be impossible");
let mut pipe = redis_crate::pipe();
for account in &accounts {
// Update the address and routes of all children and non-routing accounts.
if account.routing_relation() != RoutingRelation::Parent
&& account.routing_relation() != RoutingRelation::Peer
{
// remove the old route
pipe.hdel(
&*prefixed_key(&self.db_prefix, ROUTES_KEY),
account.ilp_address.as_bytes(),
)
.ignore();
// if the username of the account ends with the
// node's address, we're already configured so no
// need to append anything.
let new_ilp_address = if first_segment == account.username().to_string() {
ilp_address.clone()
} else {
ilp_address
.with_suffix(account.username().as_bytes())
.unwrap()
};
pipe.hset(
accounts_key(&self.db_prefix, account.id()),
"ilp_address",
new_ilp_address.as_bytes(),
)
.ignore();
pipe.hset(
&*prefixed_key(&self.db_prefix, ROUTES_KEY),
new_ilp_address.as_bytes(),
RedisAccountId(account.id()),
)
.ignore();
}
}
pipe.query_async(&mut connection.clone()).await?;
update_routes(connection, routing_table, &self.db_prefix).await?;
Ok(())
}
async fn clear_ilp_address(&self) -> Result<(), AddressStoreError> {
self.connection
.clone()
.del(&*prefixed_key(&self.db_prefix, PARENT_ILP_KEY))
.map_err(|err| AddressStoreError::Other(Box::new(err)))
.await?;
// overwrite the ilp address with the default value
*(self.ilp_address.write()) = DEFAULT_ILP_ADDRESS.clone();
Ok(())
}
fn get_ilp_address(&self) -> Address {
// read consumes the Arc<RwLock<T>> so we cannot return a reference
self.ilp_address.read().clone()
}
}
type RoutingTable<A> = HashMap<String, A>;
#[async_trait]
impl CcpRoutingStore for RedisStore {
type Account = Account;
async fn get_accounts_to_send_routes_to(
&self,
ignore_accounts: Vec<Uuid>,
) -> Result<Vec<Account>, CcpRoutingStoreError> {
let account_ids: Vec<RedisAccountId> = self
.connection
.clone()
.smembers(&*prefixed_key(&self.db_prefix, SEND_ROUTES_KEY))
.await?;
let account_ids: Vec<Uuid> = account_ids
.into_iter()
.map(|id| id.0)
.filter(|id| !ignore_accounts.contains(&id))
.collect();
if account_ids.is_empty() {
return Ok(Vec::new());
}
let accounts = self.get_accounts(account_ids).await?;
Ok(accounts)
}
async fn get_accounts_to_receive_routes_from(
&self,
) -> Result<Vec<Account>, CcpRoutingStoreError> {
let account_ids: Vec<RedisAccountId> = self
.connection
.clone()
.smembers(&*prefixed_key(&self.db_prefix, RECEIVE_ROUTES_FROM_KEY))
.await?;
let account_ids: Vec<Uuid> = account_ids.into_iter().map(|id| id.0).collect();
if account_ids.is_empty() {
return Ok(Vec::new());
}
let accounts = self.get_accounts(account_ids).await?;
Ok(accounts)
}
async fn get_local_and_configured_routes(
&self,
) -> Result<(RoutingTable<Account>, RoutingTable<Account>), CcpRoutingStoreError> {
let static_routes: Vec<(String, RedisAccountId)> = self
.connection
.clone()
.hgetall(&*prefixed_key(&self.db_prefix, STATIC_ROUTES_KEY))
.await?;
let accounts = self.get_all_accounts().await?;
let local_table: HashMap<String, Account> = accounts
.iter()
.map(|account| (account.ilp_address.to_string(), account.clone()))
.collect();
let account_map: HashMap<Uuid, &Account> = accounts
.iter()
.map(|account| (account.id, account))
.collect();
let configured_table: HashMap<String, Account> = static_routes
.into_iter()
.filter_map(|(prefix, account_id)| {
if let Some(account) = account_map.get(&account_id.0) {
Some((prefix, (*account).clone()))
} else {
warn!(
"No account for ID: {}, ignoring configured route for prefix: {}",
account_id, prefix
);
None
}
})
.collect();
Ok((local_table, configured_table))
}
async fn set_routes(
&mut self,
routes: impl IntoIterator<Item = (String, Account)> + Send + 'async_trait,
) -> Result<(), CcpRoutingStoreError> {
let routes: Vec<(String, RedisAccountId)> = routes
.into_iter()
.map(|(prefix, account)| (prefix, RedisAccountId(account.id)))
.collect();
let num_routes = routes.len();
let mut connection = self.connection.clone();
// Save routes to Redis
let mut pipe = redis_crate::pipe();
pipe.atomic()
.del(&*prefixed_key(&self.db_prefix, ROUTES_KEY))
.ignore()
.hset_multiple(&*prefixed_key(&self.db_prefix, ROUTES_KEY), &routes)
.ignore();
pipe.query_async(&mut connection).await?;
trace!("Saved {} routes to Redis", num_routes);
update_routes(connection, self.routes.clone(), &self.db_prefix).await?;
Ok(())
}
}
#[async_trait]
impl RateLimitStore for RedisStore {
type Account = Account;
/// Apply rate limits for number of packets per minute and amount of money per minute
///
/// This uses https://github.com/brandur/redis-cell so the redis-cell module MUST be loaded into redis before this is run
async fn apply_rate_limits(
&self,
account: Account,
prepare_amount: u64,
) -> Result<(), RateLimitError> {
if account.amount_per_minute_limit.is_some() || account.packets_per_minute_limit.is_some() {
let mut pipe = redis_crate::pipe();
let packet_limit = account.packets_per_minute_limit.is_some();
let amount_limit = account.amount_per_minute_limit.is_some();
if let Some(limit) = account.packets_per_minute_limit {
let limit = limit - 1;
let packets_limit =
prefixed_key(&self.db_prefix, &format!("limit:packets:{}", account.id))
.into_owned();
pipe.cmd("CL.THROTTLE")
.arg(&packets_limit)
.arg(limit)
.arg(limit)
.arg(60)
.arg(1);
}
if let Some(limit) = account.amount_per_minute_limit {
let limit = limit - 1;
let throughput_limit =
prefixed_key(&self.db_prefix, &format!("limit:throughput:{}", account.id))
.into_owned();
pipe.cmd("CL.THROTTLE")
.arg(&throughput_limit)
// TODO allow separate configuration for burst limit
.arg(limit)
.arg(limit)
.arg(60)
.arg(prepare_amount);
}
let results: Vec<Vec<i64>> = pipe
.query_async(&mut self.connection.clone())
.map_err(|err| {
error!("Error applying rate limits: {:?}", err);
RateLimitError::StoreError
})
.await?;
if packet_limit && amount_limit {
if results[0][0] == 1 {
Err(RateLimitError::PacketLimitExceeded)
} else if results[1][0] == 1 {
Err(RateLimitError::ThroughputLimitExceeded)
} else {
Ok(())
}
} else if packet_limit && results[0][0] == 1 {
Err(RateLimitError::PacketLimitExceeded)
} else if amount_limit && results[0][0] == 1 {
Err(RateLimitError::ThroughputLimitExceeded)
} else {
Ok(())
}
} else {
Ok(())
}
}
async fn refund_throughput_limit(
&self,
account: Account,
prepare_amount: u64,
) -> Result<(), RateLimitError> {
if let Some(limit) = account.amount_per_minute_limit {
let limit = limit - 1;
let throughput_limit =
prefixed_key(&self.db_prefix, &format!("limit:throughput:{}", account.id))
.into_owned();
cmd("CL.THROTTLE")
.arg(&throughput_limit)
.arg(limit)
.arg(limit)
.arg(60)
// TODO make sure this doesn't overflow
.arg(0i64 - (prepare_amount as i64))
.query_async(&mut self.connection.clone())
.map_err(|_| RateLimitError::StoreError)
.await?;
}
Ok(())
}
}
#[async_trait]
impl IdempotentStore for RedisStore {
async fn load_idempotent_data(
&self,
idempotency_key: String,
) -> Result<Option<IdempotentData>, IdempotentStoreError> {
let mut connection = self.connection.clone();
let ret: HashMap<String, String> = connection
.hgetall(prefixed_idempotency_key(&self.db_prefix, &idempotency_key))
.await?;
if let (Some(status_code), Some(data), Some(input_hash_slice)) = (
ret.get("status_code"),
ret.get("data"),
ret.get("input_hash"),
) {
trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret);
let mut input_hash: [u8; 32] = Default::default();
input_hash.copy_from_slice(input_hash_slice.as_ref());
Ok(Some(IdempotentData::new(
StatusCode::from_str(status_code).unwrap(),
Bytes::from(data.to_owned()),
input_hash,
)))
} else {
Ok(None)
}
}
async fn save_idempotent_data(
&self,
idempotency_key: String,
input_hash: [u8; 32],
status_code: StatusCode,
data: Bytes,
) -> Result<(), IdempotentStoreError> {
let mut pipe = redis_crate::pipe();
let mut connection = self.connection.clone();
pipe.atomic()
.cmd("HMSET") // cannot use hset_multiple since data and status_code have different types
.arg(&prefixed_idempotency_key(&self.db_prefix, &idempotency_key))
.arg("status_code")
.arg(status_code.as_u16())
.arg("data")
.arg(data.as_ref())
.arg("input_hash")
.arg(&input_hash)
.ignore()
.expire(
&prefixed_idempotency_key(&self.db_prefix, &idempotency_key),
86400,
)
.ignore();
pipe.query_async(&mut connection).await?;
trace!(
"Cached {:?}: {:?}, {:?}",
idempotency_key,
status_code,
data,
);
Ok(())
}
}
#[async_trait]
impl SettlementStore for RedisStore {
type Account = Account;
async fn update_balance_for_incoming_settlement(
&self,
account_id: Uuid,
amount: u64,
idempotency_key: Option<String>,
) -> Result<(), SettlementStoreError> {
let idempotency_key = idempotency_key.unwrap();
let balance: i64 = PROCESS_INCOMING_SETTLEMENT
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(RedisAccountId(account_id))
.arg(amount)
.arg(&*prefixed_key(&self.db_prefix, idempotency_key.as_str()))
.invoke_async(&mut self.connection.clone())
.await?;
trace!(
"Processed incoming settlement from account: {} for amount: {}. Balance is now: {}",
account_id,
amount,
balance
);
Ok(())
}
async fn refund_settlement(
&self,
account_id: Uuid,
settle_amount: u64,
) -> Result<(), SettlementStoreError> {
trace!(
"Refunding settlement for account: {} of amount: {}",
account_id,
settle_amount
);
let balance: i64 = REFUND_SETTLEMENT
.arg(&*prefixed_key(&self.db_prefix, ACCOUNTS_KEY))
.arg(RedisAccountId(account_id))
.arg(settle_amount)
.invoke_async(&mut self.connection.clone())
.await?;
trace!(
"Refunded settlement for account: {} of amount: {}. Balance is now: {}",
account_id,
settle_amount,
balance
);
Ok(())
}
}
// TODO: AmountWithScale is re-implemented on Interledger-Settlement. It'd be nice
// if we could deduplicate this by extracting it to a separate crate which would make
// logical sense
#[derive(Debug, Clone)]
struct AmountWithScale {
num: BigUint,
scale: u8,
}
impl ToRedisArgs for AmountWithScale {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
let mut rv = Vec::new(); | self.num.to_string().write_redis_args(&mut rv);
self.scale.to_string().write_redis_args(&mut rv);
ToRedisArgs::make_arg_vec(&rv, out);
}
}
impl AmountWithScale {
fn parse_multi_values(items: &[Value]) -> Option<Self> {
// We have to iterate over all values because in this case we're making
// an lrange call. This returns all the tuple elements in 1 array, and
// it cannot differentiate between 1 AmountWithScale value or multiple
// ones. This looks like a limitation of redis.rs
let len = items.len();
let mut iter = items.iter();
let mut max_scale = 0;
let mut amounts = Vec::new();
// if redis.rs could parse this properly, we could remove this loop,
// take 2 elements from the items iterator and return. Then we'd perform
// the summation and scaling in the consumer of the returned vector.
for _ in (0..len).step_by(2) {
let num: String = match iter.next().map(FromRedisValue::from_redis_value) {
Some(Ok(n)) => n,
_ => return None,
};
let num = match BigUint::from_str(&num) {
Ok(a) => a,
Err(_) => return None,
};
let scale: u8 = match iter.next().map(FromRedisValue::from_redis_value) {
Some(Ok(c)) => c,
_ => return None,
};
if scale > max_scale {
max_scale = scale;
}
amounts.push((num, scale));
}
// We must scale them to the largest scale, and then add them together
let mut sum = BigUint::from(0u32);
for amount in &amounts {
sum += amount
.0
.normalize_scale(ConvertDetails {
from: amount.1,
to: max_scale,
})
.unwrap();
}
Some(AmountWithScale {
num: sum,
scale: max_scale,
})
}
}
impl FromRedisValue for AmountWithScale {
fn from_redis_value(v: &Value) -> Result<Self, RedisError> {
if let Value::Bulk(ref items) = *v {
if let Some(result) = Self::parse_multi_values(items) {
return Ok(result);
}
}
Err(RedisError::from((
ErrorKind::TypeError,
"Cannot parse amount with scale",
)))
}
}
#[async_trait]
impl LeftoversStore for RedisStore {
type AccountId = Uuid;
type AssetType = BigUint;
async fn get_uncredited_settlement_amount(
&self,
account_id: Uuid,
) -> Result<(Self::AssetType, u8), LeftoversStoreError> {
let mut pipe = redis_crate::pipe();
pipe.atomic();
// get the amounts and instantly delete them
pipe.lrange(
uncredited_amount_key(&self.db_prefix, account_id.to_string()),
0,
-1,
);
pipe.del(uncredited_amount_key(
&self.db_prefix,
account_id.to_string(),
))
.ignore();
let amounts: Vec<AmountWithScale> = pipe.query_async(&mut self.connection.clone()).await?;
// this call will only return 1 element
let amount = amounts[0].to_owned();
Ok((amount.num, amount.scale))
}
async fn save_uncredited_settlement_amount(
&self,
account_id: Uuid,
uncredited_settlement_amount: (Self::AssetType, u8),
) -> Result<(), LeftoversStoreError> {
trace!(
"Saving uncredited_settlement_amount {:?} {:?}",
account_id,
uncredited_settlement_amount
);
// We store these amounts as lists of strings
// because we cannot do BigNumber arithmetic in the store
// When loading the amounts, we convert them to the appropriate data
// type and sum them up.
let mut connection = self.connection.clone();
connection
.rpush(
uncredited_amount_key(&self.db_prefix, account_id),
AmountWithScale {
num: uncredited_settlement_amount.0,
scale: uncredited_settlement_amount.1,
},
)
.await?;
Ok(())
}
async fn load_uncredited_settlement_amount(
&self,
account_id: Uuid,
local_scale: u8,
) -> Result<Self::AssetType, LeftoversStoreError> {
trace!("Loading uncredited_settlement_amount {:?}", account_id);
let amount = self.get_uncredited_settlement_amount(account_id).await?;
// scale the amount from the max scale to the local scale, and then
// save any potential leftovers to the store
let (scaled_amount, precision_loss) =
scale_with_precision_loss(amount.0, local_scale, amount.1);
if precision_loss > BigUint::from(0u32) {
self.connection
.clone()
.rpush(
uncredited_amount_key(&self.db_prefix, account_id),
AmountWithScale {
num: precision_loss,
scale: std::cmp::max(local_scale, amount.1),
},
)
.await?;
}
Ok(scaled_amount)
}
async fn clear_uncredited_settlement_amount(
&self,
account_id: Uuid,
) -> Result<(), LeftoversStoreError> {
trace!("Clearing uncredited_settlement_amount {:?}", account_id);
self.connection
.clone()
.del(uncredited_amount_key(&self.db_prefix, account_id))
.await?;
Ok(())
}
}
type RouteVec = Vec<(String, RedisAccountId)>;
use futures::future::TryFutureExt;
// TODO replace this with pubsub when async pubsub is added upstream: https://github.com/mitsuhiko/redis-rs/issues/183
async fn update_routes(
mut connection: RedisReconnect,
routing_table: Arc<RwLock<Arc<HashMap<String, Uuid>>>>,
db_prefix: &str,
) -> Result<(), RedisError> {
let mut pipe = redis_crate::pipe();
pipe.hgetall(&*prefixed_key(db_prefix, ROUTES_KEY))
.hgetall(&*prefixed_key(db_prefix, STATIC_ROUTES_KEY))
.get(&*prefixed_key(db_prefix, DEFAULT_ROUTE_KEY));
let (routes, static_routes, default_route): (RouteVec, RouteVec, Option<RedisAccountId>) =
pipe.query_async(&mut connection).await?;
trace!(
"Loaded routes from redis. Static routes: {:?}, default route: {:?}, other routes: {:?}",
static_routes,
default_route,
routes
);
// If there is a default route set in the db,
// set the entry for "" in the routing table to route to that account
let default_route_iter = iter::once(default_route)
.filter_map(|r| r)
.map(|rid| (String::new(), rid.0));
let routes = routes
.into_iter()
.map(|(s, rid)| (s, rid.0))
// Include the default route if there is one
.chain(default_route_iter)
// Having the static_routes inserted after ensures that they will overwrite
// any routes with the same prefix from the first set
.chain(static_routes.into_iter().map(|(s, rid)| (s, rid.0)))
.collect();
// TODO we may not want to print this because the routing table will be very big
// if the node has a lot of local accounts
trace!("Routing table is: {:?}", routes);
*routing_table.write() = Arc::new(routes);
Ok(())
}
// Uuid does not implement ToRedisArgs and FromRedisValue.
// Rust does not allow implementing foreign traits on foreign data types.
// As a result, we wrap Uuid in a local data type, and implement the necessary
// traits for that.
#[derive(Eq, PartialEq, Hash, Debug, Default, Serialize, Deserialize, Copy, Clone)]
struct RedisAccountId(Uuid);
impl FromStr for RedisAccountId {
type Err = uuid::Error;
fn from_str(src: &str) -> Result<Self, Self::Err> {
let id = Uuid::from_str(&src)?;
Ok(RedisAccountId(id))
}
}
impl Display for RedisAccountId {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
f.write_str(&self.0.to_hyphenated().to_string())
}
}
impl ToRedisArgs for RedisAccountId {
fn write_redis_args<W: RedisWrite + ?Sized>(&self, out: &mut W) {
out.write_arg(self.0.to_hyphenated().to_string().as_bytes().as_ref());
}
}
impl FromRedisValue for RedisAccountId {
fn from_redis_value(v: &Value) -> Result<Self, RedisError> {
let account_id = String::from_redis_value(v)?;
let id = Uuid::from_str(&account_id)
.map_err(|_| RedisError::from((ErrorKind::TypeError, "Invalid account id string")))?;
Ok(RedisAccountId(id))
}
}
impl ToRedisArgs for &AccountWithEncryptedTokens {
fn write_redis_args<W: RedisWrite + ?Sized>(&self, out: &mut W) {
let mut rv = Vec::with_capacity(ACCOUNT_DETAILS_FIELDS * 2);
let account = &self.account;
"id".write_redis_args(&mut rv);
RedisAccountId(account.id).write_redis_args(&mut rv);
"username".write_redis_args(&mut rv);
account
.username
.as_bytes()
.to_vec()
.write_redis_args(&mut rv);
if !account.ilp_address.is_empty() {
"ilp_address".write_redis_args(&mut rv);
rv.push(account.ilp_address.to_bytes().to_vec());
}
if !account.asset_code.is_empty() {
"asset_code".write_redis_args(&mut rv);
account.asset_code.write_redis_args(&mut rv);
}
"asset_scale".write_redis_args(&mut rv);
account.asset_scale.write_redis_args(&mut rv);
"max_packet_amount".write_redis_args(&mut rv);
account.max_packet_amount.write_redis_args(&mut rv);
"routing_relation".write_redis_args(&mut rv);
account
.routing_relation
.to_string()
.write_redis_args(&mut rv);
"round_trip_time".write_redis_args(&mut rv);
account.round_trip_time.write_redis_args(&mut rv);
// Write optional fields
if let Some(ilp_over_http_url) = account.ilp_over_http_url.as_ref() {
"ilp_over_http_url".write_redis_args(&mut rv);
ilp_over_http_url.as_str().write_redis_args(&mut rv);
}
if let Some(ilp_over_http_incoming_token) = account.ilp_over_http_incoming_token.as_ref() {
"ilp_over_http_incoming_token".write_redis_args(&mut rv);
ilp_over_http_incoming_token
.expose_secret()
.as_ref()
.write_redis_args(&mut rv);
}
if let Some(ilp_over_http_outgoing_token) = account.ilp_over_http_outgoing_token.as_ref() {
"ilp_over_http_outgoing_token".write_redis_args(&mut rv);
ilp_over_http_outgoing_token
.expose_secret()
.as_ref()
.write_redis_args(&mut rv);
}
if let Some(ilp_over_btp_url) = account.ilp_over_btp_url.as_ref() {
"ilp_over_btp_url".write_redis_args(&mut rv);
ilp_over_btp_url.as_str().write_redis_args(&mut rv);
}
if let Some(ilp_over_btp_incoming_token) = account.ilp_over_btp_incoming_token.as_ref() {
"ilp_over_btp_incoming_token".write_redis_args(&mut rv);
ilp_over_btp_incoming_token
.expose_secret()
.as_ref()
.write_redis_args(&mut rv);
}
if let Some(ilp_over_btp_outgoing_token) = account.ilp_over_btp_outgoing_token.as_ref() {
"ilp_over_btp_outgoing_token".write_redis_args(&mut rv);
ilp_over_btp_outgoing_token
.expose_secret()
.as_ref()
.write_redis_args(&mut rv);
}
if let Some(settle_threshold) = account.settle_threshold {
"settle_threshold".write_redis_args(&mut rv);
settle_threshold.write_redis_args(&mut rv);
}
if let Some(settle_to) = account.settle_to {
"settle_to".write_redis_args(&mut rv);
settle_to.write_redis_args(&mut rv);
}
if let Some(limit) = account.packets_per_minute_limit {
"packets_per_minute_limit".write_redis_args(&mut rv);
limit.write_redis_args(&mut rv);
}
if let Some(limit) = account.amount_per_minute_limit {
"amount_per_minute_limit".write_redis_args(&mut rv);
limit.write_redis_args(&mut rv);
}
if let Some(min_balance) = account.min_balance {
"min_balance".write_redis_args(&mut rv);
min_balance.write_redis_args(&mut rv);
}
if let Some(settlement_engine_url) = &account.settlement_engine_url {
"settlement_engine_url".write_redis_args(&mut rv);
settlement_engine_url.as_str().write_redis_args(&mut rv);
}
debug_assert!(rv.len() <= ACCOUNT_DETAILS_FIELDS * 2);
debug_assert!((rv.len() % 2) == 0);
ToRedisArgs::make_arg_vec(&rv, out);
}
}
impl FromRedisValue for AccountWithEncryptedTokens {
fn from_redis_value(v: &Value) -> Result<Self, RedisError> {
let hash: HashMap<String, Value> = HashMap::from_redis_value(v)?;
let ilp_address: String = get_value("ilp_address", &hash)?;
let ilp_address = Address::from_str(&ilp_address)
.map_err(|_| RedisError::from((ErrorKind::TypeError, "Invalid ILP address")))?;
let username: String = get_value("username", &hash)?;
let username = Username::from_str(&username)
.map_err(|_| RedisError::from((ErrorKind::TypeError, "Invalid username")))?;
let routing_relation: Option<String> = get_value_option("routing_relation", &hash)?;
let routing_relation = if let Some(relation) = routing_relation {
RoutingRelation::from_str(relation.as_str())
.map_err(|_| RedisError::from((ErrorKind::TypeError, "Invalid Routing Relation")))?
} else {
RoutingRelation::NonRoutingAccount
};
let round_trip_time: Option<u32> = get_value_option("round_trip_time", &hash)?;
let round_trip_time: u32 = round_trip_time.unwrap_or(DEFAULT_ROUND_TRIP_TIME);
let rid: RedisAccountId = get_value("id", &hash)?;
Ok(AccountWithEncryptedTokens {
account: Account {
id: rid.0,
username,
ilp_address,
asset_code: get_value("asset_code", &hash)?,
asset_scale: get_value("asset_scale", &hash)?,
ilp_over_http_url: get_url_option("ilp_over_http_url", &hash)?,
ilp_over_http_incoming_token: get_bytes_option(
"ilp_over_http_incoming_token",
&hash,
)?
.map(SecretBytesMut::from),
ilp_over_http_outgoing_token: get_bytes_option(
"ilp_over_http_outgoing_token",
&hash,
)?
.map(SecretBytesMut::from),
ilp_over_btp_url: get_url_option("ilp_over_btp_url", &hash)?,
ilp_over_btp_incoming_token: get_bytes_option(
"ilp_over_btp_incoming_token",
&hash,
)?
.map(SecretBytesMut::from),
ilp_over_btp_outgoing_token: get_bytes_option(
"ilp_over_btp_outgoing_token",
&hash,
)?
.map(SecretBytesMut::from),
max_packet_amount: get_value("max_packet_amount", &hash)?,
min_balance: get_value_option("min_balance", &hash)?,
settle_threshold: get_value_option("settle_threshold", &hash)?,
settle_to: get_value_option("settle_to", &hash)?,
routing_relation,
round_trip_time,
packets_per_minute_limit: get_value_option("packets_per_minute_limit", &hash)?,
amount_per_minute_limit: get_value_option("amount_per_minute_limit", &hash)?,
settlement_engine_url: get_url_option("settlement_engine_url", &hash)?,
},
})
}
}
fn get_value<V>(key: &str, map: &HashMap<String, Value>) -> Result<V, RedisError>
where
V: FromRedisValue,
{
if let Some(ref value) = map.get(key) {
from_redis_value(value)
} else {
Err(RedisError::from((
ErrorKind::TypeError,
"Account is missing field",
key.to_string(),
)))
}
}
fn get_value_option<V>(key: &str, map: &HashMap<String, Value>) -> Result<Option<V>, RedisError>
where
V: FromRedisValue,
{
if let Some(ref value) = map.get(key) {
from_redis_value(value).map(Some)
} else {
Ok(None)
}
}
fn get_bytes_option(
key: &str,
map: &HashMap<String, Value>,
) -> Result<Option<BytesMut>, RedisError> {
if let Some(ref value) = map.get(key) {
let vec: Vec<u8> = from_redis_value(value)?;
Ok(Some(BytesMut::from(vec.as_slice())))
} else {
Ok(None)
}
}
fn get_url_option(key: &str, map: &HashMap<String, Value>) -> Result<Option<Url>, RedisError> {
if let Some(ref value) = map.get(key) {
let value: String = from_redis_value(value)?;
if let Ok(url) = Url::parse(&value) {
Ok(Some(url))
} else {
Err(RedisError::from((ErrorKind::TypeError, "Invalid URL")))
}
} else {
Ok(None)
}
}
#[cfg(test)]
mod tests {
use super::*;
use redis_crate::IntoConnectionInfo;
#[tokio::test]
async fn connect_fails_if_db_unavailable() {
let result = RedisStoreBuilder::new(
"redis://127.0.0.1:0".into_connection_info().unwrap() as ConnectionInfo,
[0; 32],
)
.connect()
.await;
assert!(result.is_err());
}
} | |
assert_test.go | package awstesting_test
import (
"encoding/xml"
"testing"
"github.com/aws/aws-sdk-go/awstesting"
)
func TestAssertJSON(t *testing.T) {
cases := []struct {
e, a string
asserts bool
}{
{
e: `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`,
a: `{"RecursiveStruct":{"RecursiveMap":{"bar":{"NoRecurse":"bar"},"foo":{"NoRecurse":"foo"}}}}`,
asserts: true,
},
}
for i, c := range cases {
mockT := &testing.T{}
if awstesting.AssertJSON(mockT, c.e, c.a) != c.asserts {
t.Error("Assert JSON result was not expected.", i)
}
}
}
func TestAssertXML(t *testing.T) {
cases := []struct {
e, a string
asserts bool
container struct {
XMLName xml.Name `xml:"OperationRequest"`
NS string `xml:"xmlns,attr"`
RecursiveStruct struct {
RecursiveMap struct {
Entries []struct {
XMLName xml.Name `xml:"entries"`
Key string `xml:"key"`
Value struct {
XMLName xml.Name `xml:"value"`
NoRecurse string
}
}
}
}
}
}{
{
e: `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveMap xmlns="https://foo/"><entry xmlns="https://foo/"><key xmlns="https://foo/">foo</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></value></entry><entry xmlns="https://foo/"><key xmlns="https://foo/">bar</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></value></entry></RecursiveMap></RecursiveStruct></OperationRequest>`,
a: `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveMap xmlns="https://foo/"><entry xmlns="https://foo/"><key xmlns="https://foo/">bar</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></value></entry><entry xmlns="https://foo/"><key xmlns="https://foo/">foo</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></value></entry></RecursiveMap></RecursiveStruct></OperationRequest>`,
asserts: true,
},
}
for i, c := range cases {
// mockT := &testing.T{}
if awstesting.AssertXML(t, c.e, c.a, c.container) != c.asserts |
}
}
func TestAssertQuery(t *testing.T) {
cases := []struct {
e, a string
asserts bool
}{
{
e: `Action=OperationName&Version=2014-01-01&Foo=val1&Bar=val2`,
a: `Action=OperationName&Version=2014-01-01&Foo=val2&Bar=val3`,
asserts: false,
},
{
e: `Action=OperationName&Version=2014-01-01&Foo=val1&Bar=val2`,
a: `Action=OperationName&Version=2014-01-01&Foo=val1&Bar=val2`,
asserts: true,
},
}
for i, c := range cases {
mockT := &testing.T{}
if awstesting.AssertQuery(mockT, c.e, c.a) != c.asserts {
t.Error("Assert Query result was not expected.", i)
}
}
}
| {
t.Error("Assert XML result was not expected.", i)
} |
cli-test.cocoapods.spec.ts | import { AcceptanceTests } from './cli-test.acceptance.test';
export const CocoapodsTests: AcceptanceTests = {
language: 'Cocoapods',
tests: {
'`test cocoapods-app (autodetect)`': (params, utils) => async (t) => {
utils.chdirWorkspaces();
| t.equal(req.method, 'POST', 'makes POST request');
t.equal(
req.headers['x-snyk-cli-version'],
params.versionNumber,
'sends version number',
);
t.match(req.url, '/test-dep-graph', 'posts to correct url');
const depGraph = req.body.depGraph;
t.equal(depGraph.pkgManager.name, 'cocoapods');
t.same(
depGraph.pkgs.map((p) => p.id).sort(),
['[email protected]', '[email protected]'].sort(),
'depGraph looks fine',
);
},
},
}; | await params.cli.test('cocoapods-app');
const req = params.server.popRequest(); |
client_api_test.go | /*
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certifier_test
import (
"context"
"crypto/x509"
"fmt"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"magma/orc8r/cloud/go/services/certifier"
servicers "magma/orc8r/cloud/go/services/certifier/servicers/protected"
"magma/orc8r/cloud/go/services/certifier/test_init"
"magma/orc8r/lib/go/protos"
security_cert "magma/orc8r/lib/go/security/cert"
certifier_test_utils "magma/orc8r/lib/go/security/csr"
)
func TestCertifier(t *testing.T) | {
test_init.StartTestService(t)
// create and sign csr
csrMsg, err := certifier_test_utils.CreateCSR(time.Hour*24*365, "cn", "cn")
assert.NoError(t, err)
certMsg, err := certifier.SignCSR(context.Background(), csrMsg)
assert.NoError(t, err, "Failed to sign CSR")
firstCertDer := certMsg.CertDer
// get sn from cert
cert, err := x509.ParseCertificates(certMsg.CertDer)
assert.NoError(t, err, "Failed to parse cert")
firstCertSN := cert[0].SerialNumber
snMsg := &protos.Certificate_SN{
Sn: security_cert.SerialToString(firstCertSN),
}
// test get identity
certInfoMsg, err := certifier.GetIdentity(context.Background(), snMsg)
assert.NoError(t, err, "Error getting identity")
fmt.Printf("%+v\n", certInfoMsg)
assert.True(t, proto.Equal(certInfoMsg.Id, csrMsg.Id))
// test revoke cert
err = certifier.RevokeCertificate(context.Background(), snMsg)
assert.NoError(t, err, "Failed to revoke cert")
_, err = certifier.GetIdentity(context.Background(), snMsg)
assert.Error(t, err, "Error: no error getting revoked identity")
// test collect garbage
servicers.CollectGarbageAfter = time.Duration(0)
csrMsg, err = certifier_test_utils.CreateCSR(time.Duration(0), "cn", "cn")
assert.NoError(t, err)
certMsg, err = certifier.SignCSR(context.Background(), csrMsg)
assert.NoError(t, err, "Failed to sign CSR")
cert, err = x509.ParseCertificates(certMsg.CertDer)
assert.NoError(t, err, "Failed to parse cert")
snMsg = &protos.Certificate_SN{
Sn: security_cert.SerialToString(cert[0].SerialNumber),
}
err = certifier.CollectGarbage(context.Background())
assert.NoError(t, err, "Failed to collect garbage")
_, err = certifier.GetIdentity(context.Background(), snMsg)
assert.Equal(t, grpc.Code(err), codes.NotFound)
oper := protos.NewOperatorIdentity("testOperator")
assert.NoError(t,
certifier.AddCertificate(context.Background(), oper, firstCertDer),
"Failed to Add Existing Cert")
certInfoMsg, err = certifier.GetCertificateIdentity(context.Background(), security_cert.SerialToString(firstCertSN))
assert.NoError(t, err, "Error getting added cert identity")
if err == nil {
assert.Equal(t, oper.HashString(), certInfoMsg.Id.HashString())
}
sns, err := certifier.ListCertificates(context.Background())
assert.NoError(t, err, "Error Listing Certificates")
assert.Equal(t, 1, len(sns))
csrMsg, err = certifier_test_utils.CreateCSR(time.Hour*2, "cn1", "cn1")
assert.NoError(t, err)
_, err = certifier.SignCSR(context.Background(), csrMsg)
assert.NoError(t, err, "Failed to sign CSR")
sns, err = certifier.ListCertificates(context.Background())
assert.NoError(t, err, "Error Listing Certificates")
assert.Equal(t, 2, len(sns))
operSNs, err := certifier.FindCertificates(context.Background(), oper)
assert.NoError(t, err, "Error Finding Operator Certificates")
assert.Equal(t, 1, len(operSNs))
if len(operSNs) > 0 {
assert.Equal(t, security_cert.SerialToString(firstCertSN), operSNs[0])
}
} |
|
trace.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use borrow_check::nll::region_infer::values::{self, PointIndex, RegionValueElements};
use borrow_check::nll::type_check::liveness::liveness_map::{LiveVar, NllLivenessMap};
use borrow_check::nll::type_check::liveness::local_use_map::LocalUseMap;
use borrow_check::nll::type_check::AtLocation;
use borrow_check::nll::type_check::TypeChecker;
use dataflow::move_paths::indexes::MovePathIndex;
use dataflow::move_paths::MoveData;
use dataflow::{FlowAtLocation, FlowsAtLocation, MaybeInitializedPlaces};
use rustc::infer::canonical::QueryRegionConstraint;
use rustc::mir::{BasicBlock, Local, Location, Mir};
use rustc::traits::query::dropck_outlives::DropckOutlivesResult;
use rustc::traits::query::type_op::outlives::DropckOutlives;
use rustc::traits::query::type_op::TypeOp;
use rustc::ty::{Ty, TypeFoldable};
use rustc_data_structures::bitvec::BitArray;
use rustc_data_structures::fx::FxHashMap;
use std::rc::Rc;
use util::liveness::LiveVariableMap;
/// This is the heart of the liveness computation. For each variable X
/// that requires a liveness computation, it walks over all the uses
/// of X and does a reverse depth-first search ("trace") through the
/// MIR. This search stops when we find a definition of that variable.
/// The points visited in this search is the USE-LIVE set for the variable;
/// of those points is added to all the regions that appear in the variable's
/// type.
///
/// We then also walks through each *drop* of those variables and does
/// another search, stopping when we reach a use or definition. This
/// is the DROP-LIVE set of points. Each of the points in the
/// DROP-LIVE set are to the liveness sets for regions found in the
/// `dropck_outlives` result of the variable's type (in particular,
/// this respects `#[may_dangle]` annotations).
pub(super) fn trace(
typeck: &mut TypeChecker<'_, 'gcx, 'tcx>,
mir: &Mir<'tcx>,
elements: &Rc<RegionValueElements>,
flow_inits: &mut FlowAtLocation<MaybeInitializedPlaces<'_, 'gcx, 'tcx>>,
move_data: &MoveData<'tcx>,
liveness_map: &NllLivenessMap,
) {
debug!("trace()");
if liveness_map.is_empty() {
return;
}
let local_use_map = &LocalUseMap::build(liveness_map, elements, mir);
let cx = LivenessContext {
typeck,
mir,
flow_inits,
elements,
local_use_map,
move_data,
liveness_map,
drop_data: FxHashMap::default(),
};
LivenessResults::new(cx).compute_for_all_locals();
}
/// Contextual state for the type-liveness generator.
struct LivenessContext<'me, 'typeck, 'flow, 'gcx, 'tcx>
where
'typeck: 'me,
'flow: 'me,
'tcx: 'typeck + 'flow,
'gcx: 'tcx,
{
/// Current type-checker, giving us our inference context etc.
typeck: &'me mut TypeChecker<'typeck, 'gcx, 'tcx>,
/// Defines the `PointIndex` mapping
elements: &'me RegionValueElements,
/// MIR we are analyzing.
mir: &'me Mir<'tcx>,
/// Mapping to/from the various indices used for initialization tracking.
move_data: &'me MoveData<'tcx>,
/// Cache for the results of `dropck_outlives` query.
drop_data: FxHashMap<Ty<'tcx>, DropData<'tcx>>,
/// Results of dataflow tracking which variables (and paths) have been
/// initialized.
flow_inits: &'me mut FlowAtLocation<MaybeInitializedPlaces<'flow, 'gcx, 'tcx>>,
/// Index indicating where each variable is assigned, used, or
/// dropped.
local_use_map: &'me LocalUseMap<'me>,
/// Map tracking which variables need liveness computation.
liveness_map: &'me NllLivenessMap,
}
struct DropData<'tcx> {
dropck_result: DropckOutlivesResult<'tcx>,
region_constraint_data: Option<Rc<Vec<QueryRegionConstraint<'tcx>>>>,
}
struct LivenessResults<'me, 'typeck, 'flow, 'gcx, 'tcx>
where
'typeck: 'me,
'flow: 'me,
'tcx: 'typeck + 'flow,
'gcx: 'tcx,
{
cx: LivenessContext<'me, 'typeck, 'flow, 'gcx, 'tcx>,
/// Set of points that define the current local.
defs: BitArray<PointIndex>,
/// Points where the current variable is "use live" -- meaning
/// that there is a future "full use" that may use its value.
use_live_at: BitArray<PointIndex>,
/// Points where the current variable is "drop live" -- meaning
/// that there is no future "full use" that may use its value, but
/// there is a future drop.
drop_live_at: BitArray<PointIndex>,
/// Locations where drops may occur.
drop_locations: Vec<Location>,
/// Stack used when doing (reverse) DFS.
stack: Vec<PointIndex>,
}
impl LivenessResults<'me, 'typeck, 'flow, 'gcx, 'tcx> {
fn new(cx: LivenessContext<'me, 'typeck, 'flow, 'gcx, 'tcx>) -> Self {
let num_points = cx.elements.num_points();
LivenessResults {
cx,
defs: BitArray::new(num_points),
use_live_at: BitArray::new(num_points),
drop_live_at: BitArray::new(num_points),
drop_locations: vec![],
stack: vec![],
}
}
fn compute_for_all_locals(&mut self) {
for live_local in self.cx.liveness_map.to_local.indices() {
let local = self.cx.liveness_map.from_live_var(live_local);
debug!("local={:?} live_local={:?}", local, live_local);
self.reset_local_state();
self.add_defs_for(live_local);
self.compute_use_live_points_for(live_local);
self.compute_drop_live_points_for(live_local);
let local_ty = self.cx.mir.local_decls[local].ty;
if !self.use_live_at.is_empty() {
self.cx.add_use_live_facts_for(local_ty, &self.use_live_at);
}
if !self.drop_live_at.is_empty() |
}
}
/// Clear the value of fields that are "per local variable".
fn reset_local_state(&mut self) {
self.defs.clear();
self.use_live_at.clear();
self.drop_live_at.clear();
self.drop_locations.clear();
assert!(self.stack.is_empty());
}
/// Adds the definitions of `local` into `self.defs`.
fn add_defs_for(&mut self, live_local: LiveVar) {
for def in self.cx.local_use_map.defs(live_local) {
debug!("- defined at {:?}", def);
self.defs.insert(def);
}
}
/// Compute all points where local is "use live" -- meaning its
/// current value may be used later (except by a drop). This is
/// done by walking backwards from each use of `live_local` until we
/// find a `def` of local.
///
/// Requires `add_defs_for(live_local)` to have been executed.
fn compute_use_live_points_for(&mut self, live_local: LiveVar) {
debug!("compute_use_live_points_for(live_local={:?})", live_local);
self.stack.extend(self.cx.local_use_map.uses(live_local));
while let Some(p) = self.stack.pop() {
if self.defs.contains(p) {
continue;
}
if self.use_live_at.insert(p) {
self.cx
.elements
.push_predecessors(self.cx.mir, p, &mut self.stack)
}
}
}
/// Compute all points where local is "drop live" -- meaning its
/// current value may be dropped later (but not used). This is
/// done by iterating over the drops of `local` where `local` (or
/// some subpart of `local`) is initialized. For each such drop,
/// we walk backwards until we find a point where `local` is
/// either defined or use-live.
///
/// Requires `compute_use_live_points_for` and `add_defs_for` to
/// have been executed.
fn compute_drop_live_points_for(&mut self, live_local: LiveVar) {
debug!("compute_drop_live_points_for(live_local={:?})", live_local);
let local = self.cx.liveness_map.from_live_var(live_local);
let mpi = self.cx.move_data.rev_lookup.find_local(local);
debug!("compute_drop_live_points_for: mpi = {:?}", mpi);
// Find the drops where `local` is initialized.
for drop_point in self.cx.local_use_map.drops(live_local) {
let location = self.cx.elements.to_location(drop_point);
debug_assert_eq!(self.cx.mir.terminator_loc(location.block), location,);
if self.cx.initialized_at_terminator(location.block, mpi) {
if self.drop_live_at.insert(drop_point) {
self.drop_locations.push(location);
self.stack.push(drop_point);
}
}
}
debug!(
"compute_drop_live_points_for: drop_locations={:?}",
self.drop_locations
);
// Reverse DFS. But for drops, we do it a bit differently.
// The stack only ever stores *terminators of blocks*. Within
// a block, we walk back the statements in an inner loop.
'next_block: while let Some(term_point) = self.stack.pop() {
self.compute_drop_live_points_for_block(mpi, term_point);
}
}
/// Executes one iteration of the drop-live analysis loop.
///
/// The parameter `mpi` is the `MovePathIndex` of the local variable
/// we are currently analyzing.
///
/// The point `term_point` represents some terminator in the MIR,
/// where the local `mpi` is drop-live on entry to that terminator.
///
/// This method adds all drop-live points within the block and --
/// where applicable -- pushes the terminators of preceding blocks
/// onto `self.stack`.
fn compute_drop_live_points_for_block(&mut self, mpi: MovePathIndex, term_point: PointIndex) {
debug!(
"compute_drop_live_points_for_block(mpi={:?}, term_point={:?})",
self.cx.move_data.move_paths[mpi].place,
self.cx.elements.to_location(term_point),
);
// We are only invoked with terminators where `mpi` is
// drop-live on entry.
debug_assert!(self.drop_live_at.contains(term_point));
// Otherwise, scan backwards through the statements in the
// block. One of them may be either a definition or use
// live point.
let term_location = self.cx.elements.to_location(term_point);
debug_assert_eq!(
self.cx.mir.terminator_loc(term_location.block),
term_location,
);
let block = term_location.block;
let entry_point = self.cx.elements.entry_point(term_location.block);
for p in (entry_point..term_point).rev() {
debug!(
"compute_drop_live_points_for_block: p = {:?}",
self.cx.elements.to_location(p),
);
if self.defs.contains(p) {
debug!("compute_drop_live_points_for_block: def site");
return;
}
if self.use_live_at.contains(p) {
debug!("compute_drop_live_points_for_block: use-live at {:?}", p);
return;
}
if !self.drop_live_at.insert(p) {
debug!("compute_drop_live_points_for_block: already drop-live");
return;
}
}
for &pred_block in self.cx.mir.predecessors_for(block).iter() {
debug!(
"compute_drop_live_points_for_block: pred_block = {:?}",
pred_block,
);
// Check whether the variable is (at least partially)
// initialized at the exit of this predecessor. If so, we
// want to enqueue it on our list. If not, go check the
// next block.
//
// Note that we only need to check whether `live_local`
// became de-initialized at basic block boundaries. If it
// were to become de-initialized within the block, that
// would have been a "use-live" transition in the earlier
// loop, and we'd have returned already.
//
// NB. It's possible that the pred-block ends in a call
// which stores to the variable; in that case, the
// variable may be uninitialized "at exit" because this
// call only considers the *unconditional effects* of the
// terminator. *But*, in that case, the terminator is also
// a *definition* of the variable, in which case we want
// to stop the search anyhow. (But see Note 1 below.)
if !self.cx.initialized_at_exit(pred_block, mpi) {
debug!("compute_drop_live_points_for_block: not initialized");
continue;
}
let pred_term_loc = self.cx.mir.terminator_loc(pred_block);
let pred_term_point = self.cx.elements.point_from_location(pred_term_loc);
// If the terminator of this predecessor either *assigns*
// our value or is a "normal use", then stop.
if self.defs.contains(pred_term_point) {
debug!(
"compute_drop_live_points_for_block: defined at {:?}",
pred_term_loc
);
continue;
}
if self.use_live_at.contains(pred_term_point) {
debug!(
"compute_drop_live_points_for_block: use-live at {:?}",
pred_term_loc
);
continue;
}
// Otherwise, we are drop-live on entry to the terminator,
// so walk it.
if self.drop_live_at.insert(pred_term_point) {
debug!("compute_drop_live_points_for_block: pushed to stack");
self.stack.push(pred_term_point);
}
}
// Note 1. There is a weird scenario that you might imagine
// being problematic here, but which actually cannot happen.
// The problem would be if we had a variable that *is* initialized
// (but dead) on entry to the terminator, and where the current value
// will be dropped in the case of unwind. In that case, we ought to
// consider `X` to be drop-live in between the last use and call.
// Here is the example:
//
// ```
// BB0 {
// X = ...
// use(X); // last use
// ... // <-- X ought to be drop-live here
// X = call() goto BB1 unwind BB2
// }
//
// BB1 {
// DROP(X)
// }
//
// BB2 {
// DROP(X)
// }
// ```
//
// However, the current code would, when walking back from BB2,
// simply stop and never explore BB0. This seems bad! But it turns
// out this code is flawed anyway -- note that the existing value of
// `X` would leak in the case where unwinding did *not* occur.
//
// What we *actually* generate is a store to a temporary
// for the call (`TMP = call()...`) and then a
// `DropAndReplace` to swap that with `X`
// (`DropAndReplace` has very particular semantics).
}
}
impl LivenessContext<'_, '_, '_, '_, 'tcx> {
/// True if the local variable (or some part of it) is initialized in
/// the terminator of `block`. We need to check this to determine if a
/// DROP of some local variable will have an effect -- note that
/// drops, as they may unwind, are always terminators.
fn initialized_at_terminator(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool {
// Compute the set of initialized paths at terminator of block
// by resetting to the start of the block and then applying
// the effects of all statements. This is the only way to get
// "just ahead" of a terminator.
self.flow_inits.reset_to_entry_of(block);
for statement_index in 0..self.mir[block].statements.len() {
let location = Location {
block,
statement_index,
};
self.flow_inits.reconstruct_statement_effect(location);
self.flow_inits.apply_local_effect(location);
}
self.flow_inits.has_any_child_of(mpi).is_some()
}
/// True if the path `mpi` (or some part of it) is initialized at
/// the exit of `block`.
///
/// **Warning:** Does not account for the result of `Call`
/// instructions.
fn initialized_at_exit(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool {
self.flow_inits.reset_to_exit_of(block);
self.flow_inits.has_any_child_of(mpi).is_some()
}
/// Store the result that all regions in `value` are live for the
/// points `live_at`.
fn add_use_live_facts_for(
&mut self,
value: impl TypeFoldable<'tcx>,
live_at: &BitArray<PointIndex>,
) {
debug!("add_use_live_facts_for(value={:?})", value);
Self::make_all_regions_live(self.elements, &mut self.typeck, value, live_at)
}
/// Some variable with type `live_ty` is "drop live" at `location`
/// -- i.e., it may be dropped later. This means that *some* of
/// the regions in its type must be live at `location`. The
/// precise set will depend on the dropck constraints, and in
/// particular this takes `#[may_dangle]` into account.
fn add_drop_live_facts_for(
&mut self,
dropped_local: Local,
dropped_ty: Ty<'tcx>,
drop_locations: &[Location],
live_at: &BitArray<PointIndex>,
) {
debug!(
"add_drop_live_constraint(\
dropped_local={:?}, \
dropped_ty={:?}, \
drop_locations={:?}, \
live_at={:?})",
dropped_local,
dropped_ty,
drop_locations,
values::location_set_str(self.elements, live_at.iter()),
);
let drop_data = self.drop_data.entry(dropped_ty).or_insert_with({
let typeck = &mut self.typeck;
move || Self::compute_drop_data(typeck, dropped_ty)
});
if let Some(data) = &drop_data.region_constraint_data {
for &drop_location in drop_locations {
self.typeck
.push_region_constraints(drop_location.boring(), data);
}
}
drop_data.dropck_result.report_overflows(
self.typeck.infcx.tcx,
self.mir.source_info(*drop_locations.first().unwrap()).span,
dropped_ty,
);
// All things in the `outlives` array may be touched by
// the destructor and must be live at this point.
for &kind in &drop_data.dropck_result.kinds {
Self::make_all_regions_live(self.elements, &mut self.typeck, kind, live_at);
}
}
fn make_all_regions_live(
elements: &RegionValueElements,
typeck: &mut TypeChecker<'_, '_, 'tcx>,
value: impl TypeFoldable<'tcx>,
live_at: &BitArray<PointIndex>,
) {
debug!("make_all_regions_live(value={:?})", value);
debug!(
"make_all_regions_live: live_at={}",
values::location_set_str(elements, live_at.iter()),
);
let tcx = typeck.tcx();
tcx.for_each_free_region(&value, |live_region| {
let borrowck_context = typeck.borrowck_context.as_mut().unwrap();
let live_region_vid = borrowck_context
.universal_regions
.to_region_vid(live_region);
borrowck_context
.constraints
.liveness_constraints
.add_elements(live_region_vid, live_at);
if let Some(_) = borrowck_context.all_facts {
bug!("polonius liveness facts not implemented yet")
}
});
}
fn compute_drop_data(
typeck: &mut TypeChecker<'_, 'gcx, 'tcx>,
dropped_ty: Ty<'tcx>,
) -> DropData<'tcx> {
debug!("compute_drop_data(dropped_ty={:?})", dropped_ty,);
let param_env = typeck.param_env;
let (dropck_result, region_constraint_data) = param_env
.and(DropckOutlives::new(dropped_ty))
.fully_perform(typeck.infcx)
.unwrap();
DropData {
dropck_result,
region_constraint_data,
}
}
}
| {
self.cx.add_drop_live_facts_for(
local,
local_ty,
&self.drop_locations,
&self.drop_live_at,
);
} |
version.go | package services
import (
"encoding/json"
"errors"
"github.com/jfrog/jfrog-client-go/auth"
"github.com/jfrog/jfrog-client-go/http/jfroghttpclient"
"github.com/jfrog/jfrog-client-go/utils"
"github.com/jfrog/jfrog-client-go/utils/errorutils"
"net/http"
"strings"
)
type VersionService struct {
client *jfroghttpclient.JfrogHttpClient
DistDetails auth.ServiceDetails
}
func | (client *jfroghttpclient.JfrogHttpClient) *VersionService {
return &VersionService{client: client}
}
func (vs *VersionService) GetDistDetails() auth.ServiceDetails {
return vs.DistDetails
}
func (vs *VersionService) GetDistributionVersion() (string, error) {
httpDetails := vs.DistDetails.CreateHttpClientDetails()
resp, body, _, err := vs.client.SendGet(vs.DistDetails.GetUrl()+"api/v1/system/info", true, &httpDetails)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", errorutils.CheckError(errors.New("Distribution response: " + resp.Status + "\n" + utils.IndentJson(body)))
}
var version distributionVersion
err = json.Unmarshal(body, &version)
if err != nil {
return "", errorutils.CheckError(err)
}
return strings.TrimSpace(version.Version), nil
}
type distributionVersion struct {
Version string `json:"version,omitempty"`
}
| NewVersionService |
create_account_parameters.go | // Code generated by go-swagger; DO NOT EDIT.
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
models "github.com/tnwhitwell/open-api/go/models"
)
// NewCreateAccountParams creates a new CreateAccountParams object
// with the default values initialized.
func NewCreateAccountParams() *CreateAccountParams {
var ()
return &CreateAccountParams{
timeout: cr.DefaultTimeout,
}
}
// NewCreateAccountParamsWithTimeout creates a new CreateAccountParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewCreateAccountParamsWithTimeout(timeout time.Duration) *CreateAccountParams {
var () |
timeout: timeout,
}
}
// NewCreateAccountParamsWithContext creates a new CreateAccountParams object
// with the default values initialized, and the ability to set a context for a request
func NewCreateAccountParamsWithContext(ctx context.Context) *CreateAccountParams {
var ()
return &CreateAccountParams{
Context: ctx,
}
}
// NewCreateAccountParamsWithHTTPClient creates a new CreateAccountParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewCreateAccountParamsWithHTTPClient(client *http.Client) *CreateAccountParams {
var ()
return &CreateAccountParams{
HTTPClient: client,
}
}
/*CreateAccountParams contains all the parameters to send to the API endpoint
for the create account operation typically these are written to a http.Request
*/
type CreateAccountParams struct {
/*AccountSetup*/
AccountSetup *models.AccountSetup
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the create account params
func (o *CreateAccountParams) WithTimeout(timeout time.Duration) *CreateAccountParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the create account params
func (o *CreateAccountParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the create account params
func (o *CreateAccountParams) WithContext(ctx context.Context) *CreateAccountParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the create account params
func (o *CreateAccountParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the create account params
func (o *CreateAccountParams) WithHTTPClient(client *http.Client) *CreateAccountParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the create account params
func (o *CreateAccountParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithAccountSetup adds the accountSetup to the create account params
func (o *CreateAccountParams) WithAccountSetup(accountSetup *models.AccountSetup) *CreateAccountParams {
o.SetAccountSetup(accountSetup)
return o
}
// SetAccountSetup adds the accountSetup to the create account params
func (o *CreateAccountParams) SetAccountSetup(accountSetup *models.AccountSetup) {
o.AccountSetup = accountSetup
}
// WriteToRequest writes these params to a swagger request
func (o *CreateAccountParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.AccountSetup != nil {
if err := r.SetBodyParam(o.AccountSetup); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
} | return &CreateAccountParams{ |
runningsumof1darray_test.go | package runningsumof1darray
import (
"testing"
"github.com/stretchr/testify/assert"
)
func | (t *testing.T) {
tt := []struct {
input []int
output []int
}{
// #1
{
input: []int{1, 2, 3, 4},
output: []int{1, 3, 6, 10},
},
// #2
{
input: []int{1, 1, 1, 1, 1},
output: []int{1, 2, 3, 4, 5},
},
// #2
{
input: []int{3, 1, 2, 10, 1},
output: []int{3, 4, 6, 16, 17},
},
}
for _, tc := range tt {
assert.Equal(t, tc.output, runningSum(tc.input))
}
}
| TestRunningSum |
case-validator.test.ts | import { getCaseValidator } from '#/utils/case-validator';
describe('CaseValidator.getRecommendedName', () => {
const subject = (rule: string, name: string) => {
return getCaseValidator(rule).getRecommendedName(name);
};
describe('when the rule is camelCase', () => {
const rule = 'camelCase';
describe('when the name is camelCase', () => {
const name = 'camelCase'; | });
describe('when the name is PascalCase', () => {
const name = 'PascalCase';
it('returns camelized name', () => {
expect(subject(rule, name)).toEqual('pascalCase');
});
});
describe('when the name is chaos (cannot build recommendation).', () => {
const name = '00001_chaos-Name';
it('throws error', () => {
expect(() => subject(rule, name)).toThrow('Failed to build recommendation.');
});
});
});
describe('when the rule is not in presets', () => {
const rule = 'TEST_.*';
const name = 'name';
it('throws error', () => {
expect(() => subject(rule, name)).toThrow('Not implemented');
});
});
});
describe('CaseValidator.validate', () => {
const subject = (rule: string, ignorePatterns: string[], name: string) => {
return getCaseValidator(rule, ignorePatterns).validate(name);
};
describe('when the rule is camelCase, with some ignorePatterns', () => {
const rule = 'camelCase';
const ignorePatterns = ['Ignored_.*'];
describe('when the name is camelCase', () => {
const name = 'camelCase';
it('returns true', () => {
expect(subject(rule, ignorePatterns, name)).toBeTruthy();
});
});
describe('when the name is not camelCase, but matches the ignorePatterns', () => {
const name = 'Ignored_pattern';
it('returns true', () => {
expect(subject(rule, ignorePatterns, name)).toBeTruthy();
});
});
describe('when the name is not camelCase, and does not match the ignorePatterns', () => {
const name = 'PascalCase';
it('returns false', () => {
expect(subject(rule, ignorePatterns, name)).toBeFalsy();
});
});
});
describe('when the rule is not in presets, with some ignorePatterns', () => {
const rule = 'TEST_.*';
const ignorePatterns = ['Ignored_.*'];
describe('when the name matches rule', () => {
const name = 'TEST_';
it('returns true', () => {
expect(subject(rule, ignorePatterns, name)).toBeTruthy();
});
});
});
}); |
it('returns same name', () => {
expect(subject(rule, name)).toEqual(name);
}); |
mod.rs | //! Lexical region resolution.
use crate::infer::region_constraints::Constraint;
use crate::infer::region_constraints::GenericKind;
use crate::infer::region_constraints::RegionConstraintData;
use crate::infer::region_constraints::VarInfos;
use crate::infer::region_constraints::VerifyBound;
use crate::infer::RegionRelations;
use crate::infer::RegionVariableOrigin;
use crate::infer::SubregionOrigin;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::graph::implementation::{
Direction, Graph, NodeIndex, INCOMING, OUTGOING,
};
use rustc_data_structures::intern::Interned;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{ReEarlyBound, ReEmpty, ReErased, ReFree, ReStatic};
use rustc_middle::ty::{ReLateBound, RePlaceholder, ReVar};
use rustc_middle::ty::{Region, RegionVid};
use rustc_span::Span;
use std::fmt;
/// This function performs lexical region resolution given a complete
/// set of constraints and variable origins. It performs a fixed-point
/// iteration to find region values which satisfy all constraints,
/// assuming such values can be found. It returns the final values of
/// all the variables as well as a set of errors that must be reported.
#[instrument(level = "debug", skip(region_rels, var_infos, data))]
pub(crate) fn resolve<'tcx>(
region_rels: &RegionRelations<'_, 'tcx>,
var_infos: VarInfos,
data: RegionConstraintData<'tcx>,
) -> (LexicalRegionResolutions<'tcx>, Vec<RegionResolutionError<'tcx>>) {
let mut errors = vec![];
let mut resolver = LexicalResolver { region_rels, var_infos, data };
let values = resolver.infer_variable_values(&mut errors);
(values, errors)
}
/// Contains the result of lexical region resolution. Offers methods
/// to lookup up the final value of a region variable.
#[derive(Clone)]
pub struct LexicalRegionResolutions<'tcx> {
pub(crate) values: IndexVec<RegionVid, VarValue<'tcx>>,
pub(crate) error_region: ty::Region<'tcx>,
}
#[derive(Copy, Clone, Debug)]
pub(crate) enum VarValue<'tcx> {
Value(Region<'tcx>),
ErrorValue,
}
#[derive(Clone, Debug)]
pub enum RegionResolutionError<'tcx> {
/// `ConcreteFailure(o, a, b)`:
///
/// `o` requires that `a <= b`, but this does not hold
ConcreteFailure(SubregionOrigin<'tcx>, Region<'tcx>, Region<'tcx>),
/// `GenericBoundFailure(p, s, a)
///
/// The parameter/associated-type `p` must be known to outlive the lifetime
/// `a` (but none of the known bounds are sufficient).
GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, Region<'tcx>),
/// `SubSupConflict(v, v_origin, sub_origin, sub_r, sup_origin, sup_r)`:
///
/// Could not infer a value for `v` (which has origin `v_origin`)
/// because `sub_r <= v` (due to `sub_origin`) but `v <= sup_r` (due to `sup_origin`) and
/// `sub_r <= sup_r` does not hold.
SubSupConflict(
RegionVid,
RegionVariableOrigin,
SubregionOrigin<'tcx>,
Region<'tcx>,
SubregionOrigin<'tcx>,
Region<'tcx>,
Vec<Span>, // All the influences on a given value that didn't meet its constraints.
),
/// Indicates a `'b: 'a` constraint where `'a` is in a universe that
/// cannot name the placeholder `'b`.
UpperBoundUniverseConflict(
RegionVid,
RegionVariableOrigin,
ty::UniverseIndex, // the universe index of the region variable
SubregionOrigin<'tcx>, // cause of the constraint
Region<'tcx>, // the placeholder `'b`
),
}
struct RegionAndOrigin<'tcx> {
region: Region<'tcx>,
origin: SubregionOrigin<'tcx>,
}
type RegionGraph<'tcx> = Graph<(), Constraint<'tcx>>;
struct LexicalResolver<'cx, 'tcx> {
region_rels: &'cx RegionRelations<'cx, 'tcx>,
var_infos: VarInfos,
data: RegionConstraintData<'tcx>,
}
impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.region_rels.tcx
}
fn | (
&mut self,
errors: &mut Vec<RegionResolutionError<'tcx>>,
) -> LexicalRegionResolutions<'tcx> {
let mut var_data = self.construct_var_data(self.tcx());
// Dorky hack to cause `dump_constraints` to only get called
// if debug mode is enabled:
debug!(
"----() End constraint listing (context={:?}) {:?}---",
self.region_rels.context,
self.dump_constraints(self.region_rels)
);
let graph = self.construct_graph();
self.expand_givens(&graph);
self.expansion(&mut var_data);
self.collect_errors(&mut var_data, errors);
self.collect_var_errors(&var_data, &graph, errors);
var_data
}
fn num_vars(&self) -> usize {
self.var_infos.len()
}
/// Initially, the value for all variables is set to `'empty`, the
/// empty region. The `expansion` phase will grow this larger.
fn construct_var_data(&self, tcx: TyCtxt<'tcx>) -> LexicalRegionResolutions<'tcx> {
LexicalRegionResolutions {
error_region: tcx.lifetimes.re_static,
values: IndexVec::from_fn_n(
|vid| {
let vid_universe = self.var_infos[vid].universe;
let re_empty = tcx.mk_region(ty::ReEmpty(vid_universe));
VarValue::Value(re_empty)
},
self.num_vars(),
),
}
}
fn dump_constraints(&self, free_regions: &RegionRelations<'_, 'tcx>) {
debug!("----() Start constraint listing (context={:?}) ()----", free_regions.context);
for (idx, (constraint, _)) in self.data.constraints.iter().enumerate() {
debug!("Constraint {} => {:?}", idx, constraint);
}
}
fn expand_givens(&mut self, graph: &RegionGraph<'_>) {
// Givens are a kind of horrible hack to account for
// constraints like 'c <= '0 that are known to hold due to
// closure signatures (see the comment above on the `givens`
// field). They should go away. But until they do, the role
// of this fn is to account for the transitive nature:
//
// Given 'c <= '0
// and '0 <= '1
// then 'c <= '1
let seeds: Vec<_> = self.data.givens.iter().cloned().collect();
for (r, vid) in seeds {
// While all things transitively reachable in the graph
// from the variable (`'0` in the example above).
let seed_index = NodeIndex(vid.index() as usize);
for succ_index in graph.depth_traverse(seed_index, OUTGOING) {
let succ_index = succ_index.0;
// The first N nodes correspond to the region
// variables. Other nodes correspond to constant
// regions.
if succ_index < self.num_vars() {
let succ_vid = RegionVid::new(succ_index);
// Add `'c <= '1`.
self.data.givens.insert((r, succ_vid));
}
}
}
}
fn expansion(&self, var_values: &mut LexicalRegionResolutions<'tcx>) {
let mut constraints = IndexVec::from_elem_n(Vec::new(), var_values.values.len());
let mut changes = Vec::new();
for constraint in self.data.constraints.keys() {
let (a_vid, a_region, b_vid, b_data) = match *constraint {
Constraint::RegSubVar(a_region, b_vid) => {
let b_data = var_values.value_mut(b_vid);
(None, a_region, b_vid, b_data)
}
Constraint::VarSubVar(a_vid, b_vid) => match *var_values.value(a_vid) {
VarValue::ErrorValue => continue,
VarValue::Value(a_region) => {
let b_data = var_values.value_mut(b_vid);
(Some(a_vid), a_region, b_vid, b_data)
}
},
Constraint::RegSubReg(..) | Constraint::VarSubReg(..) => {
// These constraints are checked after expansion
// is done, in `collect_errors`.
continue;
}
};
if self.expand_node(a_region, b_vid, b_data) {
changes.push(b_vid);
}
if let Some(a_vid) = a_vid {
match b_data {
VarValue::Value(Region(Interned(ReStatic, _))) | VarValue::ErrorValue => (),
_ => {
constraints[a_vid].push((a_vid, b_vid));
constraints[b_vid].push((a_vid, b_vid));
}
}
}
}
while let Some(vid) = changes.pop() {
constraints[vid].retain(|&(a_vid, b_vid)| {
let VarValue::Value(a_region) = *var_values.value(a_vid) else {
return false;
};
let b_data = var_values.value_mut(b_vid);
if self.expand_node(a_region, b_vid, b_data) {
changes.push(b_vid);
}
!matches!(
b_data,
VarValue::Value(Region(Interned(ReStatic, _))) | VarValue::ErrorValue
)
});
}
}
fn expand_node(
&self,
a_region: Region<'tcx>,
b_vid: RegionVid,
b_data: &mut VarValue<'tcx>,
) -> bool {
debug!("expand_node({:?}, {:?} == {:?})", a_region, b_vid, b_data);
match *a_region {
// Check if this relationship is implied by a given.
ty::ReEarlyBound(_) | ty::ReFree(_) => {
if self.data.givens.contains(&(a_region, b_vid)) {
debug!("given");
return false;
}
}
_ => {}
}
match *b_data {
VarValue::Value(cur_region) => {
// This is a specialized version of the `lub_concrete_regions`
// check below for a common case, here purely as an
// optimization.
let b_universe = self.var_infos[b_vid].universe;
if let ReEmpty(a_universe) = *a_region && a_universe == b_universe {
return false;
}
let mut lub = self.lub_concrete_regions(a_region, cur_region);
if lub == cur_region {
return false;
}
// Watch out for `'b: !1` relationships, where the
// universe of `'b` can't name the placeholder `!1`. In
// that case, we have to grow `'b` to be `'static` for the
// relationship to hold. This is obviously a kind of sub-optimal
// choice -- in the future, when we incorporate a knowledge
// of the parameter environment, we might be able to find a
// tighter bound than `'static`.
//
// (This might e.g. arise from being asked to prove `for<'a> { 'b: 'a }`.)
if let ty::RePlaceholder(p) = *lub && b_universe.cannot_name(p.universe) {
lub = self.tcx().lifetimes.re_static;
}
debug!("Expanding value of {:?} from {:?} to {:?}", b_vid, cur_region, lub);
*b_data = VarValue::Value(lub);
true
}
VarValue::ErrorValue => false,
}
}
/// True if `a <= b`, but not defined over inference variables.
#[instrument(level = "trace", skip(self))]
fn sub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> bool {
let tcx = self.tcx();
let sub_free_regions = |r1, r2| self.region_rels.free_regions.sub_free_regions(tcx, r1, r2);
// Check for the case where we know that `'b: 'static` -- in that case,
// `a <= b` for all `a`.
let b_free_or_static = self.region_rels.free_regions.is_free_or_static(b);
if b_free_or_static && sub_free_regions(tcx.lifetimes.re_static, b) {
return true;
}
// If both `a` and `b` are free, consult the declared
// relationships. Note that this can be more precise than the
// `lub` relationship defined below, since sometimes the "lub"
// is actually the `postdom_upper_bound` (see
// `TransitiveRelation` for more details).
let a_free_or_static = self.region_rels.free_regions.is_free_or_static(a);
if a_free_or_static && b_free_or_static {
return sub_free_regions(a, b);
}
// For other cases, leverage the LUB code to find the LUB and
// check if it is equal to `b`.
self.lub_concrete_regions(a, b) == b
}
/// Returns the least-upper-bound of `a` and `b`; i.e., the
/// smallest region `c` such that `a <= c` and `b <= c`.
///
/// Neither `a` nor `b` may be an inference variable (hence the
/// term "concrete regions").
#[instrument(level = "trace", skip(self))]
fn lub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> Region<'tcx> {
let r = match (*a, *b) {
(ReLateBound(..), _) | (_, ReLateBound(..)) | (ReErased, _) | (_, ReErased) => {
bug!("cannot relate region: LUB({:?}, {:?})", a, b);
}
(ReVar(v_id), _) | (_, ReVar(v_id)) => {
span_bug!(
self.var_infos[v_id].origin.span(),
"lub_concrete_regions invoked with non-concrete \
regions: {:?}, {:?}",
a,
b
);
}
(ReStatic, _) | (_, ReStatic) => {
// nothing lives longer than `'static`
self.tcx().lifetimes.re_static
}
(ReEmpty(_), ReEarlyBound(_) | ReFree(_)) => {
// All empty regions are less than early-bound, free,
// and scope regions.
b
}
(ReEarlyBound(_) | ReFree(_), ReEmpty(_)) => {
// All empty regions are less than early-bound, free,
// and scope regions.
a
}
(ReEmpty(a_ui), ReEmpty(b_ui)) => {
// Empty regions are ordered according to the universe
// they are associated with.
let ui = a_ui.min(b_ui);
self.tcx().mk_region(ReEmpty(ui))
}
(ReEmpty(empty_ui), RePlaceholder(placeholder))
| (RePlaceholder(placeholder), ReEmpty(empty_ui)) => {
// If this empty region is from a universe that can
// name the placeholder, then the placeholder is
// larger; otherwise, the only ancestor is `'static`.
if empty_ui.can_name(placeholder.universe) {
self.tcx().mk_region(RePlaceholder(placeholder))
} else {
self.tcx().lifetimes.re_static
}
}
(ReEarlyBound(_) | ReFree(_), ReEarlyBound(_) | ReFree(_)) => {
self.region_rels.lub_free_regions(a, b)
}
// For these types, we cannot define any additional
// relationship:
(RePlaceholder(..), _) | (_, RePlaceholder(..)) => {
if a == b {
a
} else {
self.tcx().lifetimes.re_static
}
}
};
debug!("lub_concrete_regions({:?}, {:?}) = {:?}", a, b, r);
r
}
/// After expansion is complete, go and check upper bounds (i.e.,
/// cases where the region cannot grow larger than a fixed point)
/// and check that they are satisfied.
#[instrument(skip(self, var_data, errors))]
fn collect_errors(
&self,
var_data: &mut LexicalRegionResolutions<'tcx>,
errors: &mut Vec<RegionResolutionError<'tcx>>,
) {
for (constraint, origin) in &self.data.constraints {
debug!(?constraint, ?origin);
match *constraint {
Constraint::RegSubVar(..) | Constraint::VarSubVar(..) => {
// Expansion will ensure that these constraints hold. Ignore.
}
Constraint::RegSubReg(sub, sup) => {
if self.sub_concrete_regions(sub, sup) {
continue;
}
debug!(
"region error at {:?}: \
cannot verify that {:?} <= {:?}",
origin, sub, sup
);
errors.push(RegionResolutionError::ConcreteFailure(
(*origin).clone(),
sub,
sup,
));
}
Constraint::VarSubReg(a_vid, b_region) => {
let a_data = var_data.value_mut(a_vid);
debug!("contraction: {:?} == {:?}, {:?}", a_vid, a_data, b_region);
let VarValue::Value(a_region) = *a_data else {
continue;
};
// Do not report these errors immediately:
// instead, set the variable value to error and
// collect them later.
if !self.sub_concrete_regions(a_region, b_region) {
debug!(
"region error at {:?}: \
cannot verify that {:?}={:?} <= {:?}",
origin, a_vid, a_region, b_region
);
*a_data = VarValue::ErrorValue;
}
}
}
}
for verify in &self.data.verifys {
debug!("collect_errors: verify={:?}", verify);
let sub = var_data.normalize(self.tcx(), verify.region);
let verify_kind_ty = verify.kind.to_ty(self.tcx());
let verify_kind_ty = var_data.normalize(self.tcx(), verify_kind_ty);
if self.bound_is_met(&verify.bound, var_data, verify_kind_ty, sub) {
continue;
}
debug!(
"collect_errors: region error at {:?}: \
cannot verify that {:?} <= {:?}",
verify.origin, verify.region, verify.bound
);
errors.push(RegionResolutionError::GenericBoundFailure(
verify.origin.clone(),
verify.kind,
sub,
));
}
}
/// Go over the variables that were declared to be error variables
/// and create a `RegionResolutionError` for each of them.
fn collect_var_errors(
&self,
var_data: &LexicalRegionResolutions<'tcx>,
graph: &RegionGraph<'tcx>,
errors: &mut Vec<RegionResolutionError<'tcx>>,
) {
debug!("collect_var_errors, var_data = {:#?}", var_data.values);
// This is the best way that I have found to suppress
// duplicate and related errors. Basically we keep a set of
// flags for every node. Whenever an error occurs, we will
// walk some portion of the graph looking to find pairs of
// conflicting regions to report to the user. As we walk, we
// trip the flags from false to true, and if we find that
// we've already reported an error involving any particular
// node we just stop and don't report the current error. The
// idea is to report errors that derive from independent
// regions of the graph, but not those that derive from
// overlapping locations.
let mut dup_vec = IndexVec::from_elem_n(None, self.num_vars());
for (node_vid, value) in var_data.values.iter_enumerated() {
match *value {
VarValue::Value(_) => { /* Inference successful */ }
VarValue::ErrorValue => {
// Inference impossible: this value contains
// inconsistent constraints.
//
// I think that in this case we should report an
// error now -- unlike the case above, we can't
// wait to see whether the user needs the result
// of this variable. The reason is that the mere
// existence of this variable implies that the
// region graph is inconsistent, whether or not it
// is used.
//
// For example, we may have created a region
// variable that is the GLB of two other regions
// which do not have a GLB. Even if that variable
// is not used, it implies that those two regions
// *should* have a GLB.
//
// At least I think this is true. It may be that
// the mere existence of a conflict in a region
// variable that is not used is not a problem, so
// if this rule starts to create problems we'll
// have to revisit this portion of the code and
// think hard about it. =) -- nikomatsakis
// Obtain the spans for all the places that can
// influence the constraints on this value for
// richer diagnostics in `static_impl_trait`.
let influences: Vec<Span> = self
.data
.constraints
.iter()
.filter_map(|(constraint, origin)| match (constraint, origin) {
(
Constraint::VarSubVar(_, sup),
SubregionOrigin::DataBorrowed(_, sp),
) if sup == &node_vid => Some(*sp),
_ => None,
})
.collect();
self.collect_error_for_expanding_node(
graph,
&mut dup_vec,
node_vid,
errors,
influences,
);
}
}
}
}
fn construct_graph(&self) -> RegionGraph<'tcx> {
let num_vars = self.num_vars();
let mut graph = Graph::new();
for _ in 0..num_vars {
graph.add_node(());
}
// Issue #30438: two distinct dummy nodes, one for incoming
// edges (dummy_source) and another for outgoing edges
// (dummy_sink). In `dummy -> a -> b -> dummy`, using one
// dummy node leads one to think (erroneously) there exists a
// path from `b` to `a`. Two dummy nodes sidesteps the issue.
let dummy_source = graph.add_node(());
let dummy_sink = graph.add_node(());
for constraint in self.data.constraints.keys() {
match *constraint {
Constraint::VarSubVar(a_id, b_id) => {
graph.add_edge(
NodeIndex(a_id.index() as usize),
NodeIndex(b_id.index() as usize),
*constraint,
);
}
Constraint::RegSubVar(_, b_id) => {
graph.add_edge(dummy_source, NodeIndex(b_id.index() as usize), *constraint);
}
Constraint::VarSubReg(a_id, _) => {
graph.add_edge(NodeIndex(a_id.index() as usize), dummy_sink, *constraint);
}
Constraint::RegSubReg(..) => {
// this would be an edge from `dummy_source` to
// `dummy_sink`; just ignore it.
}
}
}
graph
}
fn collect_error_for_expanding_node(
&self,
graph: &RegionGraph<'tcx>,
dup_vec: &mut IndexVec<RegionVid, Option<RegionVid>>,
node_idx: RegionVid,
errors: &mut Vec<RegionResolutionError<'tcx>>,
influences: Vec<Span>,
) {
// Errors in expanding nodes result from a lower-bound that is
// not contained by an upper-bound.
let (mut lower_bounds, lower_vid_bounds, lower_dup) =
self.collect_bounding_regions(graph, node_idx, INCOMING, Some(dup_vec));
let (mut upper_bounds, _, upper_dup) =
self.collect_bounding_regions(graph, node_idx, OUTGOING, Some(dup_vec));
if lower_dup || upper_dup {
return;
}
// We place free regions first because we are special casing
// SubSupConflict(ReFree, ReFree) when reporting error, and so
// the user will more likely get a specific suggestion.
fn region_order_key(x: &RegionAndOrigin<'_>) -> u8 {
match *x.region {
ReEarlyBound(_) => 0,
ReFree(_) => 1,
_ => 2,
}
}
lower_bounds.sort_by_key(region_order_key);
upper_bounds.sort_by_key(region_order_key);
let node_universe = self.var_infos[node_idx].universe;
for lower_bound in &lower_bounds {
let effective_lower_bound = if let ty::RePlaceholder(p) = *lower_bound.region {
if node_universe.cannot_name(p.universe) {
self.tcx().lifetimes.re_static
} else {
lower_bound.region
}
} else {
lower_bound.region
};
for upper_bound in &upper_bounds {
if !self.sub_concrete_regions(effective_lower_bound, upper_bound.region) {
let origin = self.var_infos[node_idx].origin;
debug!(
"region inference error at {:?} for {:?}: SubSupConflict sub: {:?} \
sup: {:?}",
origin, node_idx, lower_bound.region, upper_bound.region
);
errors.push(RegionResolutionError::SubSupConflict(
node_idx,
origin,
lower_bound.origin.clone(),
lower_bound.region,
upper_bound.origin.clone(),
upper_bound.region,
influences,
));
return;
}
}
}
// If we have a scenario like `exists<'a> { forall<'b> { 'b:
// 'a } }`, we wind up without any lower-bound -- all we have
// are placeholders as upper bounds, but the universe of the
// variable `'a`, or some variable that `'a` has to outlive, doesn't
// permit those placeholders.
let min_universe = lower_vid_bounds
.into_iter()
.map(|vid| self.var_infos[vid].universe)
.min()
.expect("lower_vid_bounds should at least include `node_idx`");
for upper_bound in &upper_bounds {
if let ty::RePlaceholder(p) = *upper_bound.region {
if min_universe.cannot_name(p.universe) {
let origin = self.var_infos[node_idx].origin;
errors.push(RegionResolutionError::UpperBoundUniverseConflict(
node_idx,
origin,
min_universe,
upper_bound.origin.clone(),
upper_bound.region,
));
return;
}
}
}
// Errors in earlier passes can yield error variables without
// resolution errors here; delay ICE in favor of those errors.
self.tcx().sess.delay_span_bug(
self.var_infos[node_idx].origin.span(),
&format!(
"collect_error_for_expanding_node() could not find \
error for var {:?} in universe {:?}, lower_bounds={:#?}, \
upper_bounds={:#?}",
node_idx, node_universe, lower_bounds, upper_bounds
),
);
}
/// Collects all regions that "bound" the variable `orig_node_idx` in the
/// given direction.
///
/// If `dup_vec` is `Some` it's used to track duplicates between successive
/// calls of this function.
///
/// The return tuple fields are:
/// - a list of all concrete regions bounding the given region.
/// - the set of all region variables bounding the given region.
/// - a `bool` that's true if the returned region variables overlap with
/// those returned by a previous call for another region.
fn collect_bounding_regions(
&self,
graph: &RegionGraph<'tcx>,
orig_node_idx: RegionVid,
dir: Direction,
mut dup_vec: Option<&mut IndexVec<RegionVid, Option<RegionVid>>>,
) -> (Vec<RegionAndOrigin<'tcx>>, FxHashSet<RegionVid>, bool) {
struct WalkState<'tcx> {
set: FxHashSet<RegionVid>,
stack: Vec<RegionVid>,
result: Vec<RegionAndOrigin<'tcx>>,
dup_found: bool,
}
let mut state = WalkState {
set: Default::default(),
stack: vec![orig_node_idx],
result: Vec::new(),
dup_found: false,
};
state.set.insert(orig_node_idx);
// to start off the process, walk the source node in the
// direction specified
process_edges(&self.data, &mut state, graph, orig_node_idx, dir);
while let Some(node_idx) = state.stack.pop() {
// check whether we've visited this node on some previous walk
if let Some(dup_vec) = &mut dup_vec {
if dup_vec[node_idx].is_none() {
dup_vec[node_idx] = Some(orig_node_idx);
} else if dup_vec[node_idx] != Some(orig_node_idx) {
state.dup_found = true;
}
debug!(
"collect_concrete_regions(orig_node_idx={:?}, node_idx={:?})",
orig_node_idx, node_idx
);
}
process_edges(&self.data, &mut state, graph, node_idx, dir);
}
let WalkState { result, dup_found, set, .. } = state;
return (result, set, dup_found);
fn process_edges<'tcx>(
this: &RegionConstraintData<'tcx>,
state: &mut WalkState<'tcx>,
graph: &RegionGraph<'tcx>,
source_vid: RegionVid,
dir: Direction,
) {
debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir);
let source_node_index = NodeIndex(source_vid.index() as usize);
for (_, edge) in graph.adjacent_edges(source_node_index, dir) {
match edge.data {
Constraint::VarSubVar(from_vid, to_vid) => {
let opp_vid = if from_vid == source_vid { to_vid } else { from_vid };
if state.set.insert(opp_vid) {
state.stack.push(opp_vid);
}
}
Constraint::RegSubVar(region, _) | Constraint::VarSubReg(_, region) => {
state.result.push(RegionAndOrigin {
region,
origin: this.constraints.get(&edge.data).unwrap().clone(),
});
}
Constraint::RegSubReg(..) => panic!(
"cannot reach reg-sub-reg edge in region inference \
post-processing"
),
}
}
}
}
fn bound_is_met(
&self,
bound: &VerifyBound<'tcx>,
var_values: &LexicalRegionResolutions<'tcx>,
generic_ty: Ty<'tcx>,
min: ty::Region<'tcx>,
) -> bool {
match bound {
VerifyBound::IfEq(k, b) => {
(var_values.normalize(self.region_rels.tcx, *k) == generic_ty)
&& self.bound_is_met(b, var_values, generic_ty, min)
}
VerifyBound::OutlivedBy(r) => {
self.sub_concrete_regions(min, var_values.normalize(self.tcx(), *r))
}
VerifyBound::IsEmpty => {
matches!(*min, ty::ReEmpty(_))
}
VerifyBound::AnyBound(bs) => {
bs.iter().any(|b| self.bound_is_met(b, var_values, generic_ty, min))
}
VerifyBound::AllBounds(bs) => {
bs.iter().all(|b| self.bound_is_met(b, var_values, generic_ty, min))
}
}
}
}
impl<'tcx> fmt::Debug for RegionAndOrigin<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "RegionAndOrigin({:?},{:?})", self.region, self.origin)
}
}
impl<'tcx> LexicalRegionResolutions<'tcx> {
fn normalize<T>(&self, tcx: TyCtxt<'tcx>, value: T) -> T
where
T: TypeFoldable<'tcx>,
{
tcx.fold_regions(value, &mut false, |r, _db| match *r {
ty::ReVar(rid) => self.resolve_var(rid),
_ => r,
})
}
fn value(&self, rid: RegionVid) -> &VarValue<'tcx> {
&self.values[rid]
}
fn value_mut(&mut self, rid: RegionVid) -> &mut VarValue<'tcx> {
&mut self.values[rid]
}
pub fn resolve_var(&self, rid: RegionVid) -> ty::Region<'tcx> {
let result = match self.values[rid] {
VarValue::Value(r) => r,
VarValue::ErrorValue => self.error_region,
};
debug!("resolve_var({:?}) = {:?}", rid, result);
result
}
}
| infer_variable_values |
test-glfw-callbacks.go | package main
import (
// "fmt"
"github.com/go-gl/gl/v3.3-core/gl"
"github.com/go-gl/glfw/v3.2/glfw"
"github.com/go-gl/mathgl/mgl32"
"math"
// "github.com/mki1967/go-mki3d/mki3d"
"github.com/mki1967/go-mki3d/glmki3d"
)
// Function to be used as resize callback
func SizeCallback(w *glfw.Window, width int, height int) {
gl.Viewport(0, 0, int32(width), int32(height)) // inform GL about resize
DataShaderPtr.UniPtr.ProjectionUni = glmki3d.ProjectionMatrix(DataShaderPtr.Mki3dPtr.Projection, width, height) // recompute projection matrix
}
func KeyCallback(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) {
if action == glfw.Release |
const angle = 1 * math.Pi / 180
const step = 0.5
switch {
/* rotate model */
case key == glfw.KeyRight && mods == glfw.ModControl:
DataShaderPtr.UniPtr.ModelUni = mgl32.HomogRotate3DY(angle).Mul4(DataShaderPtr.UniPtr.ModelUni)
case key == glfw.KeyLeft && mods == glfw.ModControl:
DataShaderPtr.UniPtr.ModelUni = mgl32.HomogRotate3DY(-angle).Mul4(DataShaderPtr.UniPtr.ModelUni)
case key == glfw.KeyUp && mods == glfw.ModControl:
DataShaderPtr.UniPtr.ModelUni = mgl32.HomogRotate3DX(-angle).Mul4(DataShaderPtr.UniPtr.ModelUni)
case key == glfw.KeyDown && mods == glfw.ModControl:
DataShaderPtr.UniPtr.ModelUni = mgl32.HomogRotate3DX(angle).Mul4(DataShaderPtr.UniPtr.ModelUni)
/* rotate view */
case key == glfw.KeyRight && mods == 0:
DataShaderPtr.UniPtr.ViewUni = mgl32.HomogRotate3DY(-angle).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyLeft && mods == 0:
DataShaderPtr.UniPtr.ViewUni = mgl32.HomogRotate3DY(angle).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyUp && mods == 0:
DataShaderPtr.UniPtr.ViewUni = mgl32.HomogRotate3DX(angle).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyDown && mods == 0:
DataShaderPtr.UniPtr.ViewUni = mgl32.HomogRotate3DX(-angle).Mul4(DataShaderPtr.UniPtr.ViewUni)
/* move model*/
case key == glfw.KeyRight && mods == glfw.ModControl|glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(step, 0, 0).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyLeft && mods == glfw.ModControl|glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(-step, 0, 0).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyUp && mods == glfw.ModControl|glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(0, step, 0).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyDown && mods == glfw.ModControl|glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(0, -step, 0).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyF && mods == glfw.ModControl|glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(0, 0, step).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyB && mods == glfw.ModControl|glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(0, 0, -step).Mul4(DataShaderPtr.UniPtr.ViewUni)
/* move view*/
case key == glfw.KeyRight && mods == glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(-step, 0, 0).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyLeft && mods == glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(step, 0, 0).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyUp && mods == glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(0, -step, 0).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyDown && mods == glfw.ModShift:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(0, step, 0).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyF && mods == glfw.ModShift:
fallthrough
case key == glfw.KeyF && mods == 0:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(0, 0, -step).Mul4(DataShaderPtr.UniPtr.ViewUni)
case key == glfw.KeyB && mods == glfw.ModShift:
fallthrough
case key == glfw.KeyB && mods == 0:
DataShaderPtr.UniPtr.ViewUni = mgl32.Translate3D(0, 0, step).Mul4(DataShaderPtr.UniPtr.ViewUni)
/* light */
case key == glfw.KeyL && mods == 0:
DataShaderPtr.UniPtr.LightUni = DataShaderPtr.UniPtr.ViewUni.Mat3().Inv().Mul3x1(mgl32.Vec3{0, 0, 1}).Normalize()
/* help */
case key == glfw.KeyH && mods == 0:
message(helpText)
/*
doInMainThread = func() {
message( helpText )
}
*/
}
}
| {
return
} |
.cmake-format.py | # --------------------------
# General Formatting Options
# --------------------------
# How wide to allow formatted cmake files
line_width = 120
# How many spaces to tab for indent
tab_size = 2
# If an argument group contains more than this many sub-groups (parg or kwarg
# groups), then force it to a vertical layout.
max_subgroups_hwrap = 4
# If a positinal argument group contains more than this many arguments, then
# force it to a vertical layout.
max_pargs_hwrap = 6
# If true, separate flow control names from their parentheses with a space
separate_ctrl_name_with_space = False
# If true, separate function names from parentheses with a space
separate_fn_name_with_space = False
# If a statement is wrapped to more than one line, than dangle the closing
# parenthesis on it's own line.
dangle_parens = False
# If the trailing parenthesis must be 'dangled' on it's on line, then align it
# to this reference: `prefix`: the start of the statement, `prefix-indent`: the
# start of the statement, plus one indentation level, `child`: align to the
# column of the arguments
dangle_align = 'prefix'
min_prefix_chars = 4
# If the statement spelling length (including space and parenthesis is larger
# than the tab width by more than this amoung, then force reject un-nested
# layouts.
max_prefix_chars = 10
# If a candidate layout is wrapped horizontally but it exceeds this many lines,
# then reject the layout.
max_lines_hwrap = 80
# What style line endings to use in the output.
line_ending = 'unix'
|
# Format command names consistently as 'lower' or 'upper' case
command_case = 'lower'
# Format keywords consistently as 'lower' or 'upper' case
keyword_case = 'upper'
# Specify structure for custom cmake functions
additional_commands = {
"pkg_find": {
"kwargs": {
"PKG": "*"
}
}
}
# A list of command names which should always be wrapped
always_wrap = []
# If true, the argument lists which are known to be sortable will be sorted
# lexicographicall
enable_sort = True
# If true, the parsers may infer whether or not an argument list is sortable
# (without annotation).
autosort = False
# If a comment line starts with at least this many consecutive hash characters,
# then don't lstrip() them off. This allows for lazy hash rulers where the first
# hash char is not separated by space
hashruler_min_length = 10
# A dictionary containing any per-command configuration overrides. Currently
# only `command_case` is supported.
per_command = {
"OptimizeForArchitecture": {
"command_case": "unchanged"
},
"OFA_AutodetectArm": {
"command_case": "unchanged"
},
"OFA_Autodetectx86": {
"command_case": "unchanged"
},
"OFA_AutodetectHostArchitecture": {
"command_case": "unchanged"
},
"OFA_HandleX86Options": {
"command_case": "unchanged"
},
"OFA_HandleArmOptions": {
"command_case": "unchanged"
},
"AddCompilerFlag": {
"command_case": "unchanged"
},
}
# A dictionary mapping layout nodes to a list of wrap decisions. See the
# documentation for more information.
layout_passes = {}
# --------------------------
# Comment Formatting Options
# --------------------------
# What character to use for bulleted lists
bullet_char = '*'
# What character to use as punctuation after numerals in an enumerated list
enum_char = '.'
# enable comment markup parsing and reflow
enable_markup = True
# If comment markup is enabled, don't reflow the first comment block in each
# listfile. Use this to preserve formatting of your copyright/license
# statements.
first_comment_is_literal = True
# If comment markup is enabled, don't reflow any comment block which matches
# this (regex) pattern. Default is `None` (disabled).
literal_comment_pattern = "#\\[\\["
# Regular expression to match preformat fences in comments
# default=r'^\s*([`~]{3}[`~]*)(.*)$'
fence_pattern = '^\\s*([`~]{3}[`~]*)(.*)$'
# Regular expression to match rulers in comments
# default=r'^\s*[^\w\s]{3}.*[^\w\s]{3}$'
ruler_pattern = '^\\s*[^\\w\\s]{3}.*[^\\w\\s]{3}$'
# If true, then insert a space between the first hash char and remaining hash
# chars in a hash ruler, and normalize it's length to fill the column
canonicalize_hashrulers = True
# ---------------------------------
# Miscellaneous Options
# ---------------------------------
# If true, emit the unicode byte-order mark (BOM) at the start of the file
emit_byteorder_mark = False
# Specify the encoding of the input file. Defaults to utf-8.
input_encoding = 'utf-8'
# Specify the encoding of the output file. Defaults to utf-8. Note that cmake
# only claims to support utf-8 so be careful when using anything else
output_encoding = 'utf-8' | |
vi_performance_manager.py | #--
# Copyright (c) 2012, Sebastian Tello, Alejandro Lozanoff
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#--
from pysphere.resources import VimService_services as VI
from pysphere.vi_property import VIProperty
from pysphere.resources.vi_exception import VIException, VIApiException, \
UnsupportedPerfIntervalError, FaultTypes
import datetime
class EntityStatistics:
def __init__(self, mor, counter_key, counter_name, counter_desc, group_name,
group_desc, unit_name, unit_desc, instance_name, value,
time_stamp):
self.mor = mor
self.counter_key = counter_key
self.counter = counter_name
self.description = counter_desc
self.group = group_name
self.group_description = group_desc
self.unit = unit_name
self.unit_description = unit_desc
self.instance = instance_name
self.value = value
self.time = time_stamp
def __str__(self):
return "MOR: %s\nCounter: %s (%s)\nGroup: %s\nDescription: %s\n" \
"Instance: %s\nValue: %s\nUnit: %s\nTime: %s" % (
self.mor, self.counter, self.counter_key,
self.group_description, self.description,
self.instance, self.value,
self.unit_description, self.time)
def __repr__(self):
return"<%(mor)s:%(counter)s(%(counter_key)s):%(description)s" \
":%(instance)s:%(value)s:%(unit)s:%(time)s>" % self.__dict__
class Intervals:
CURRENT = None
PAST_DAY = 1
PAST_WEEK = 2
PAST_MONTH = 3
PAST_YEAR = 4
class PerformanceManager:
| INTERVALS = Intervals
def __init__(self, server, mor):
self._server = server
self._mor = mor
self._properties = VIProperty(server, mor)
try:
self._supported_intervals = dict([(i.key, i.samplingPeriod)
for i in self._properties.historicalInterval if i.enabled])
except:
#not historical intervals supported
self._supported_intervals = {}
def _get_counter_info(self, counter_id, counter_obj):
"""Return name, description, group, and unit info of a give counter_id.
counter_id [int]: id of the counter.
counter_obj [list]: An array consisting of performance
counter information for the specified counterIds."""
for c in counter_obj:
if c.Key == counter_id:
return (c.NameInfo.Key, c.NameInfo.Label, c.GroupInfo.Key,
c.GroupInfo.Label, c.UnitInfo.Key, c.UnitInfo.Label)
return None, None, None, None, None, None
def _get_metric_id(self, metrics, counter_obj, counter_ids):
""" Get the metric ID from a metric name.
metrics [list]: An array of performance metrics with a
performance counter ID and an instance name.
counter_obj [list]: An array consisting of performance
counter information for the specified counterIds.
"""
metric_list = []
for metric in metrics:
if metric.CounterId in counter_ids:
if metric not in metric_list:
metric_list.append(metric)
return metric_list
def get_entity_counters(self, entity, interval=None):
"""Returns a dictionary of available counters. The dictionary key
is the counter name, and value is the corresponding counter id
interval: None (default) for current real-time statistics, or the
interval id for historical statistics see IDs available in
PerformanceManager.INTERVALS"""
sampling_period = self._check_and_get_interval_by_id(entity, interval)
metrics = self.query_available_perf_metric(entity,
interval_id=sampling_period)
if not metrics:
return {}
counter_obj = self.query_perf_counter([metric.CounterId
for metric in metrics])
return dict([("%s.%s" % (c.GroupInfo.Key, c.NameInfo.Key), c.Key)
for c in counter_obj])
def get_entity_statistic(self, entity, counters, interval=None,
composite=False):
""" Get the give statistics from a given managed object
entity [mor]: ManagedObject Reference of the managed object from were
statistics are to be retrieved.
counter_id [list of integers or strings]: Counter names or ids
to retrieve stats for.
interval: None (default) for current real-time statistics, or the
interval id for historical statistics see IDs available in
PerformanceManager.INTERVALS
composite [bool] (default False) If true, uses QueryPerfComposite
instead of QueryPerf.
"""
sampling_period = self._check_and_get_interval_by_id(entity, interval)
if not isinstance(counters, list):
counters = [counters]
if any([isinstance(i, basestring) for i in counters]):
avail_counters = self.get_entity_counters(entity, interval)
new_list = []
for c in counters:
if isinstance(c, int):
new_list.append(c)
else:
counter_id = avail_counters.get(c)
if counter_id:
new_list.append(counter_id)
counters = new_list
metrics = self.query_available_perf_metric(entity,
interval_id=sampling_period)
counter_obj = self.query_perf_counter(counters)
metric = self._get_metric_id(metrics, counter_obj, counters)
if not metric:
return []
query = self.query_perf(entity, metric_id=metric, max_sample=1,
interval_id=sampling_period, composite=composite)
statistics = []
if not query:
return statistics
stats = []
if composite:
if hasattr(query, "Entity"):
stats.extend(query.Entity.Value)
if hasattr(query, "ChildEntity"):
for item in query.ChildEntity:
stats.extend(item.Value)
else:
if hasattr(query[0], "Value"):
stats = query[0].Value
for stat in stats:
cname, cdesc, gname, gdesc, uname, udesc = self._get_counter_info(
stat.Id.CounterId,counter_obj)
instance_name = str(stat.Id.Instance)
stat_value = str(stat.Value[0])
date_now = datetime.datetime.utcnow()
statistics.append(EntityStatistics(entity, stat.Id.CounterId, cname,
cdesc, gname, gdesc, uname,
udesc, instance_name, stat_value,
date_now))
return statistics
def _check_and_get_interval_by_id(self, entity, interval):
"""Given an interval ID (or None for refresh rate) verifies if
the entity or the system supports that interval. Returns the sampling
period if so, or raises an Exception if not supported"""
summary = self.query_perf_provider_summary(entity)
if not interval: #must support current (real time) statistics
if not summary.CurrentSupported:
#Here the following exception should be raised, however,
#some objects as datastores can retrieve metrics even though
#current metrics are not supported, and refreshRate isn't set.
#So if thi is the case, I'll just return None.
#For more details see:
#http://communities.vmware.com/message/1623870#1623870
return None
raise UnsupportedPerfIntervalError(
"Current statistics not supported for this "
"entity. Try using an historical interval "
"id instead.", FaultTypes.NOT_SUPPORTED)
return summary.RefreshRate
else:
if not summary.SummarySupported:
raise UnsupportedPerfIntervalError(
"Summary statistics not supported for this "
"entity. Try using current interlval instead "
"(interval=None).", FaultTypes.NOT_SUPPORTED)
if interval not in self._supported_intervals:
raise UnsupportedPerfIntervalError(
"The Interval ID provided is not supported "
"on this server.", FaultTypes.NOT_SUPPORTED)
return self._supported_intervals.get(interval)
def query_available_perf_metric(self, entity, begin_time=None,
end_time=None,
interval_id=None):
"""Retrieves available performance metrics for the specified
ManagedObject between the optional beginTime and endTime. These are the
performance statistics that are available for the given time interval.
entity [mor]: The ManagedObject for which available performance metrics
are queried.
begin_time [time tuple]: The time from which available performance
metrics are gathered. Corresponds to server time. When the beginTime
is omitted, the returned metrics start from the first available
metric in the system.
end_time [time tuple]: The time up to which available performance
metrics are gathered. Corresponds to server time. When the endTime
is omitted, the returned result includes up to the most recent
metric value.
interval_id [int]: Specify a particular interval that the query is
interested in. Acceptable intervals are the refreshRate returned in
QueryProviderSummary, which is used to retrieve available metrics
for real-time performance statistics, or one of the historical
intervals, which are used to retrieve available metrics for
historical performance statistics. If interval is not specified,
system returns available metrics for historical statistics.
"""
if begin_time:
begin_time[6] = 0
if end_time:
end_time[6] = 0
try:
request = VI.QueryAvailablePerfMetricRequestMsg()
mor_pm = request.new__this(self._mor)
mor_pm.set_attribute_type(self._mor.get_attribute_type())
request.set_element__this(mor_pm)
mor_entity = request.new_entity(entity)
mor_entity.set_attribute_type(entity.get_attribute_type())
request.set_element_entity(mor_entity)
if begin_time:
request.set_element_beginTime(begin_time)
if end_time:
request.set_element_endTime(end_time)
if interval_id:
request.set_element_intervalId(interval_id)
do_perf_metric_id = self._server._proxy.QueryAvailablePerfMetric(
request)._returnval
return do_perf_metric_id
except (VI.ZSI.FaultException), e:
raise VIApiException(e)
def query_perf_provider_summary(self, entity):
"""Returns a ProviderSummary object for a ManagedObject for which
performance statistics can be queried. Also indicates whether current or
summary statistics are supported. If the input managed entity is not a
performance provider, an InvalidArgument exception is thrown.
entity [mor]: The ManagedObject for which available performance metrics
are queried.
"""
if not entity:
raise VIException("No Entity specified.",FaultTypes.PARAMETER_ERROR)
try:
request = VI.QueryPerfProviderSummaryRequestMsg()
mor_qpps = request.new__this(self._mor)
mor_qpps.set_attribute_type(self._mor.get_attribute_type())
request.set_element__this(mor_qpps)
qpps_entity = request.new_entity(entity)
qpps_entity.set_attribute_type(entity.get_attribute_type())
request.set_element_entity(qpps_entity)
qpps = self._server._proxy.QueryPerfProviderSummary(
request)._returnval
return qpps
except (VI.ZSI.FaultException), e:
raise VIApiException(e)
def query_perf_counter(self, counter_id):
"""Retrieves counter information for the list of counter IDs passed in.
counter_id [list]: list of integers containing the counter IDs.
"""
if counter_id:
if not isinstance(counter_id, list):
raise VIException("counter_id must be a list",
FaultTypes.PARAMETER_ERROR)
else:
raise VIException("No counter_id specified.",
FaultTypes.PARAMETER_ERROR)
try:
request = VI.QueryPerfCounterRequestMsg()
mor_qpc = request.new__this(self._mor)
mor_qpc.set_attribute_type(self._mor.get_attribute_type())
request.set_element__this(mor_qpc)
request.set_element_counterId(counter_id)
qpc = self._server._proxy.QueryPerfCounter(request)._returnval
return qpc
except (VI.ZSI.FaultException), e:
raise VIApiException(e)
def query_perf(self, entity, format='normal', interval_id=None,
max_sample=None, metric_id=None, start_time=None,
composite=False):
"""Returns performance statistics for the entity. The client can limit
the returned information by specifying a list of metrics and a suggested
sample interval ID. Server accepts either the refreshRate or one of the
historical intervals as input interval.
entity [mor]: The ManagedObject managed object whose performance
statistics are being queried.
format [string]: The format to be used while returning the statistics.
interval_id [int]: The interval (samplingPeriod) in seconds for which
performance statistics are queried. There is a set of intervals for
historical statistics. Refer HistoricalInterval for more more
information about these intervals. To retrieve the greatest
available level of detail, the provider's refreshRate may be used
for this property.
max_sample [int]: The maximum number of samples to be returned from
server. The number of samples returned are more recent samples in
the time range specified. For example, if the user specifies a
maxSample of 1, but not a given time range, the most recent sample
collected is returned. This parameter can be used only when querying
for real-time statistics by setting the intervalId parameter to the
provider's refreshRate.
This argument is ignored for historical statistics.
metric_id: [PerfMetricId]: The performance metrics to be retrieved.
start_time [timetuple]: The time from which statistics are to be
retrieved. Corresponds to server time. When startTime is omitted,
the returned metrics start from the first available metric in the
system. When a startTime is specified, the returned samples do not
include the sample at startTime.
composite: [bool]: If true requests QueryPerfComposite method instead of
QuerPerf.
"""
if interval_id:
if not isinstance(interval_id, int) or interval_id < 0:
raise VIException("interval_id must be a positive integer",
FaultTypes.PARAMETER_ERROR)
if max_sample:
if not isinstance(max_sample, int) or max_sample < 0:
raise VIException("max_sample must be a positive integer",
FaultTypes.PARAMETER_ERROR)
if metric_id:
if not isinstance(metric_id, list):
raise VIException("metric_id must be a list of integers",
FaultTypes.PARAMETER_ERROR)
try:
if composite:
request = VI.QueryPerfCompositeRequestMsg()
else:
request = VI.QueryPerfRequestMsg()
mor_qp = request.new__this(self._mor)
mor_qp.set_attribute_type(self._mor.get_attribute_type())
request.set_element__this(mor_qp)
query_spec = request.new_querySpec()
spec_entity = query_spec.new_entity(entity)
spec_entity.set_attribute_type(entity.get_attribute_type())
query_spec.set_element_entity(spec_entity)
if format != "normal":
if format == "csv":
query_spec.set_element_format(format)
else:
raise VIException("accepted formats are 'normal' and 'csv'",
FaultTypes.PARAMETER_ERROR)
if interval_id:
query_spec.set_element_intervalId(interval_id)
if max_sample:
query_spec.set_element_maxSample(max_sample)
if metric_id:
query_spec.set_element_metricId(metric_id)
if start_time:
query_spec.set_element_startTime(start_time)
if composite:
request.set_element_querySpec(query_spec)
query_perf = self._server._proxy.QueryPerfComposite(
request)._returnval
else:
request.set_element_querySpec([query_spec])
query_perf = self._server._proxy.QueryPerf(request)._returnval
return query_perf
except (VI.ZSI.FaultException), e:
raise VIApiException(e) |
|
export_entries_by_col.py | import hail as hl
from hail.typecheck import typecheck
@typecheck(mt=hl.MatrixTable, path=str, batch_size=int, bgzip=bool, header_json_in_file=bool, use_string_key_as_file_name=bool)
def export_entries_by_col(mt: hl.MatrixTable,
path: str,
batch_size: int = 256,
bgzip: bool = True,
header_json_in_file: bool = True,
use_string_key_as_file_name: bool = False):
| """Export entries of the `mt` by column as separate text files.
Examples
--------
>>> range_mt = hl.utils.range_matrix_table(10, 10)
>>> range_mt = range_mt.annotate_entries(x = hl.rand_unif(0, 1))
>>> hl.experimental.export_entries_by_col(range_mt, 'output/cols_files')
Notes
-----
This function writes a directory with one file per column in `mt`. The
files contain one tab-separated field (with header) for each row field
and entry field in `mt`. The column fields of `mt` are written as JSON
in the first line of each file, prefixed with a ``#``.
The above will produce a directory at ``output/cols_files`` with the
following files:
.. code-block:: text
$ ls -l output/cols_files
total 80
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 index.tsv
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-00.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-01.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-02.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-03.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-04.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-05.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-06.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-07.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-08.tsv.bgz
-rw-r--r-- 1 hail-dev wheel 712 Jan 25 17:19 part-09.tsv.bgz
$ zcat output/cols_files/part-00.tsv.bgz
#{"col_idx":0}
row_idx x
0 6.2501e-02
1 7.0083e-01
2 3.6452e-01
3 4.4170e-01
4 7.9177e-02
5 6.2392e-01
6 5.9920e-01
7 9.7540e-01
8 8.4848e-01
9 3.7423e-01
Due to overhead and file system limits related to having large numbers
of open files, this function will iteratively export groups of columns.
The `batch_size` parameter can control the size of these groups.
Parameters
----------
mt : :class:`.MatrixTable`
path : :obj:`int`
Path (directory to write to.
batch_size : :obj:`int`
Number of columns to write per iteration.
bgzip : :obj:`bool`
BGZip output files.
header_json_in_file : :obj:`bool`
Include JSON header in each component file (if False, only written to index.tsv)
"""
if use_string_key_as_file_name and not (len(mt.col_key) == 1 and mt.col_key[0].dtype == hl.tstr):
raise ValueError(f'parameter "use_string_key_as_file_name" requires a single string column key, found {list(mt.col_key.dtype.values())}')
hl.utils.java.Env.backend().execute(
hl.ir.MatrixToValueApply(mt._mir,
{'name': 'MatrixExportEntriesByCol',
'parallelism': batch_size,
'path': path,
'bgzip': bgzip,
'headerJsonInFile': header_json_in_file,
'useStringKeyAsFileName': use_string_key_as_file_name})
) |
|
status.rs | // Heavily borrowed from the http crate, would re-export since it is already a dependency
// but we have our own range of constants.
use std::convert::TryFrom;
use std::error::Error;
use std::fmt;
use std::num::NonZeroU16;
use std::str::FromStr;
/// A possible error value when converting a `StatusCode` from a `u16` or `&str`
///
/// This error indicates that the supplied input was not a valid number, was less
/// than 100, or was greater than 999.
pub struct InvalidStatusCode {}
impl fmt::Debug for InvalidStatusCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("InvalidStatusCode").finish()
}
}
impl fmt::Display for InvalidStatusCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("invalid status code")
}
}
impl Error for InvalidStatusCode {}
impl InvalidStatusCode {
fn new() -> InvalidStatusCode {
InvalidStatusCode {}
}
}
/// An NATS status code.
///
/// Constants are provided for known status codes.
///
/// Status code values in the range 100-999 (inclusive) are supported by this
/// type. Values in the range 100-599 are semantically classified by the most
/// significant digit. See [`StatusCode::is_success`], etc.
///
/// # Examples
///
/// ```
/// use async_nats::StatusCode;
///
/// assert_eq!(StatusCode::OK.as_u16(), 200);
/// assert!(StatusCode::OK.is_success());
/// ```
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct StatusCode(NonZeroU16);
impl StatusCode {
/// Converts a u16 to a status code.
///
/// The function validates the correctness of the supplied u16. It must be
/// greater or equal to 100 and less than 1000.
///
/// # Example
///
/// ```
/// use async_nats::status::StatusCode;
///
/// let ok = StatusCode::from_u16(200).unwrap();
/// assert_eq!(ok, StatusCode::OK);
///
/// let err = StatusCode::from_u16(99);
/// assert!(err.is_err());
///
/// let err = StatusCode::from_u16(1000);
/// assert!(err.is_err());
/// ```
#[inline]
pub fn from_u16(src: u16) -> Result<StatusCode, InvalidStatusCode> {
if !(100..1000).contains(&src) {
return Err(InvalidStatusCode::new());
}
NonZeroU16::new(src)
.map(StatusCode)
.ok_or_else(InvalidStatusCode::new)
}
/// Converts a &[u8] to a status code
pub fn from_bytes(src: &[u8]) -> Result<StatusCode, InvalidStatusCode> {
if src.len() != 3 {
return Err(InvalidStatusCode::new());
}
let a = src[0].wrapping_sub(b'0') as u16;
let b = src[1].wrapping_sub(b'0') as u16;
let c = src[2].wrapping_sub(b'0') as u16;
if a == 0 || a > 9 || b > 9 || c > 9 {
return Err(InvalidStatusCode::new());
}
let status = (a * 100) + (b * 10) + c;
NonZeroU16::new(status)
.map(StatusCode)
.ok_or_else(InvalidStatusCode::new)
}
/// Returns the `u16` corresponding to this `StatusCode`.
///
/// # Example
///
/// ```
/// let status = async_nats::StatusCode::OK;
/// assert_eq!(status.as_u16(), 200);
/// ```
#[inline]
pub fn as_u16(&self) -> u16 {
(*self).into()
}
/// Check if status is within 100-199.
#[inline]
pub fn is_informational(&self) -> bool {
(100..200).contains(&self.0.get())
}
/// Check if status is within 200-299.
#[inline]
pub fn | (&self) -> bool {
(200..300).contains(&self.0.get())
}
/// Check if status is within 300-399.
#[inline]
pub fn is_redirection(&self) -> bool {
(300..400).contains(&self.0.get())
}
/// Check if status is within 400-499.
#[inline]
pub fn is_client_error(&self) -> bool {
(400..500).contains(&self.0.get())
}
/// Check if status is within 500-599.
#[inline]
pub fn is_server_error(&self) -> bool {
(500..600).contains(&self.0.get())
}
}
impl fmt::Debug for StatusCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, f)
}
}
/// Formats the status code.
///
/// # Example
///
/// ```
/// # use async_nats::StatusCode;
/// assert_eq!(format!("{}", StatusCode::OK), "200");
/// ```
impl fmt::Display for StatusCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// TODO(caspervonb) display a canonical statically known reason / human readable description of the status
write!(f, "{}", u16::from(*self))
}
}
impl Default for StatusCode {
#[inline]
fn default() -> StatusCode {
StatusCode::OK
}
}
impl PartialEq<u16> for StatusCode {
#[inline]
fn eq(&self, other: &u16) -> bool {
self.as_u16() == *other
}
}
impl PartialEq<StatusCode> for u16 {
#[inline]
fn eq(&self, other: &StatusCode) -> bool {
*self == other.as_u16()
}
}
impl From<StatusCode> for u16 {
#[inline]
fn from(status: StatusCode) -> u16 {
status.0.get()
}
}
impl FromStr for StatusCode {
type Err = InvalidStatusCode;
fn from_str(s: &str) -> Result<StatusCode, InvalidStatusCode> {
StatusCode::from_bytes(s.as_ref())
}
}
impl<'a> From<&'a StatusCode> for StatusCode {
#[inline]
fn from(t: &'a StatusCode) -> Self {
*t
}
}
impl<'a> TryFrom<&'a [u8]> for StatusCode {
type Error = InvalidStatusCode;
#[inline]
fn try_from(t: &'a [u8]) -> Result<Self, Self::Error> {
StatusCode::from_bytes(t)
}
}
impl<'a> TryFrom<&'a str> for StatusCode {
type Error = InvalidStatusCode;
#[inline]
fn try_from(t: &'a str) -> Result<Self, Self::Error> {
t.parse()
}
}
impl TryFrom<u16> for StatusCode {
type Error = InvalidStatusCode;
#[inline]
fn try_from(t: u16) -> Result<Self, Self::Error> {
StatusCode::from_u16(t)
}
}
impl StatusCode {
pub const IDLE_HEARBEAT: StatusCode = StatusCode(unsafe { NonZeroU16::new_unchecked(100) });
pub const OK: StatusCode = StatusCode(unsafe { NonZeroU16::new_unchecked(200) });
pub const NOT_FOUND: StatusCode = StatusCode(unsafe { NonZeroU16::new_unchecked(404) });
pub const TIMEOUT: StatusCode = StatusCode(unsafe { NonZeroU16::new_unchecked(408) });
pub const NO_RESPONDERS: StatusCode = StatusCode(unsafe { NonZeroU16::new_unchecked(503) });
}
| is_success |
manifold-data-deprovision-button.tsx | import { h, Component, Prop, Element, Watch, Event, EventEmitter } from '@stencil/core';
import { GraphqlFetch } from '../../utils/graphqlFetch';
import { connection } from '../../global/app';
import logger, { loadMark } from '../../utils/logger';
import {
DeleteResourceMutation,
ResourceIdQuery,
ResourceIdQueryVariables,
DeleteResourceMutationVariables,
} from '../../types/graphql';
import resourceIdQuery from '../queries/resource-id.graphql';
import deleteMutation from './delete.graphql';
interface SuccessMessage {
message: string;
resourceLabel: string;
resourceId: string;
ownerId?: string;
}
interface ErrorMessage {
message: string;
ownerId?: string;
resourceId?: string;
resourceLabel: string;
}
@Component({ tag: 'manifold-data-deprovision-button' })
export class ManifoldDataDeprovisionButton {
@Element() el: HTMLElement;
/** _(hidden)_ Passed by `<manifold-connection>` */
@Prop() graphqlFetch?: GraphqlFetch = connection.graphqlFetch;
@Prop() disabled?: boolean;
@Prop() ownerId?: string;
/** resource ID */
@Prop({ mutable: true }) resourceId?: string = '';
/** The label of the resource to deprovision */
@Prop() resourceLabel?: string;
@Prop() loading?: boolean = false;
@Event({ eventName: 'manifold-deprovisionButton-click', bubbles: true }) click: EventEmitter;
@Event({ eventName: 'manifold-deprovisionButton-error', bubbles: true }) error: EventEmitter;
@Event({ eventName: 'manifold-deprovisionButton-success', bubbles: true }) success: EventEmitter;
@Watch('resourceLabel') labelChange(newLabel: string) {
// fetch new ID regardless on label change
this.fetchResourceId(newLabel);
}
@loadMark()
componentWillLoad() {
// fetch resource ID
if (this.resourceLabel && !this.resourceId) {
this.fetchResourceId(this.resourceLabel);
}
}
async deprovision() {
if (!this.graphqlFetch || this.loading) {
return;
}
if (!this.resourceId) {
console.error('Property “resourceId” is missing');
return;
}
this.click.emit({
resourceId: this.resourceId,
resourceLabel: this.resourceLabel || '',
ownerId: this.ownerId,
});
// Note(drew): because we send resourceId here, we DO NOT need owner
const variables: DeleteResourceMutationVariables = { resourceId: this.resourceId };
const { data, errors } = await this.graphqlFetch<DeleteResourceMutation>({
query: deleteMutation,
variables,
element: this.el,
});
| // success
const success: SuccessMessage = {
message: `${data.deleteResource.data.label} successfully deleted`,
ownerId: this.ownerId,
resourceId: data.deleteResource.data.id,
resourceLabel: data.deleteResource.data.label,
};
this.success.emit(success);
}
if (errors) {
errors.forEach(({ message }) => {
const error: ErrorMessage = {
message,
ownerId: this.ownerId,
resourceLabel: this.resourceLabel || '',
resourceId: this.resourceId,
};
this.error.emit(error);
});
}
}
async fetchResourceId(resourceLabel: string) {
if (!this.graphqlFetch) {
return;
}
const variables: ResourceIdQueryVariables = { resourceLabel, owner: this.ownerId };
const { data } = await this.graphqlFetch<ResourceIdQuery>({
query: resourceIdQuery,
variables,
element: this.el,
});
if (data && data.resource) {
this.resourceId = data.resource.id;
}
}
@logger()
render() {
return (
<button
type="submit"
onClick={() => this.deprovision()}
disabled={this.disabled || !this.resourceId}
>
<slot />
</button>
);
}
} | if (data && data.deleteResource) { |
__init__.py | """
Text Parsers to find url from content.
Every url item should contain:
- url
- location(`filepath:row:column`)
"""
from abc import abstractmethod
from typing import List | def __init__(self, url: str, path: str, row: int, column: int):
"""init link object
:param str url: link's href
:param str path: where found this link, file path
:param int row: where found this link, line number
:param int column: where found this link, chars after line beginning
"""
self.__url = url
self.__path = path
self.__row = row
self.__column = column
@property
def url(self) -> str:
return self.__url
@property
def path(self) -> str:
return self.__path
@property
def row(self) -> int:
return self.__row
@property
def column(self) -> int:
return self.__column
@property
def location(self) -> str:
return f"{self.path}:{self.row}:{self.column}"
@path.setter
def path(self, other: str):
self.__path = other
class Parser:
@abstractmethod
def parse(self, text: str) -> List[Link]:
pass
@abstractmethod
def parse_file(self, path: str) -> List[Link]:
pass |
class Link: |
properties.py | # orm/properties.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""MapperProperty implementations.
This is a private module which defines the behavior of invidual ORM-
mapped attributes.
"""
from sqlalchemy import sql, util, log, exc as sa_exc
from sqlalchemy.sql.util import ClauseAdapter, criterion_as_pairs, \
join_condition, _shallow_annotate
from sqlalchemy.sql import operators, expression
from sqlalchemy.orm import attributes, dependency, mapper, \
object_mapper, strategies, configure_mappers
from sqlalchemy.orm.util import CascadeOptions, _class_to_mapper, \
_orm_annotate, _orm_deannotate
from sqlalchemy.orm.interfaces import MANYTOMANY, MANYTOONE, \
MapperProperty, ONETOMANY, PropComparator, StrategizedProperty
mapperlib = util.importlater("sqlalchemy.orm", "mapperlib")
NoneType = type(None)
__all__ = ('ColumnProperty', 'CompositeProperty', 'SynonymProperty',
'ComparableProperty', 'RelationshipProperty', 'RelationProperty')
from descriptor_props import CompositeProperty, SynonymProperty, \
ComparableProperty,ConcreteInheritedProperty
class ColumnProperty(StrategizedProperty):
"""Describes an object attribute that corresponds to a table column.
Public constructor is the :func:`.orm.column_property` function.
"""
def __init__(self, *columns, **kwargs):
"""Construct a ColumnProperty.
Note the public constructor is the :func:`.orm.column_property` function.
:param \*columns: The list of `columns` describes a single
object property. If there are multiple tables joined
together for the mapper, this list represents the equivalent
column as it appears across each table.
:param group:
:param deferred:
:param comparator_factory:
:param descriptor:
:param expire_on_flush:
:param extension:
"""
self._orig_columns = [expression._labeled(c) for c in columns]
self.columns = [expression._labeled(_orm_deannotate(c))
for c in columns]
self.group = kwargs.pop('group', None)
self.deferred = kwargs.pop('deferred', False)
self.instrument = kwargs.pop('_instrument', True)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
self.descriptor = kwargs.pop('descriptor', None)
self.extension = kwargs.pop('extension', None)
self.active_history = kwargs.pop('active_history', False)
self.expire_on_flush = kwargs.pop('expire_on_flush', True)
if 'doc' in kwargs:
self.doc = kwargs.pop('doc')
else:
for col in reversed(self.columns):
doc = getattr(col, 'doc', None)
if doc is not None:
self.doc = doc
break
else:
self.doc = None
if kwargs:
raise TypeError(
"%s received unexpected keyword argument(s): %s" % (
self.__class__.__name__,
', '.join(sorted(kwargs.keys()))))
util.set_creation_order(self)
if not self.instrument:
self.strategy_class = strategies.UninstrumentedColumnLoader
elif self.deferred:
self.strategy_class = strategies.DeferredColumnLoader
else:
self.strategy_class = strategies.ColumnLoader
def instrument_class(self, mapper):
if not self.instrument:
return
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc
)
def do_init(self):
super(ColumnProperty, self).do_init()
if len(self.columns) > 1 and \
set(self.parent.primary_key).issuperset(self.columns):
util.warn(
("On mapper %s, primary key column '%s' is being combined "
"with distinct primary key column '%s' in attribute '%s'. "
"Use explicit properties to give each column its own mapped "
"attribute name.") % (self.parent, self.columns[1],
self.columns[0], self.key))
def copy(self):
return ColumnProperty(
deferred=self.deferred,
group=self.group,
active_history=self.active_history,
*self.columns)
def _getcommitted(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
return state.get_impl(self.key).\
get_committed_value(state, dict_, passive=passive)
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
if not self.instrument:
return
elif self.key in source_dict:
value = source_dict[self.key]
if not load:
dest_dict[self.key] = value
else:
impl = dest_state.get_impl(self.key)
impl.set(dest_state, dest_dict, value, None)
elif dest_state.has_identity and self.key not in dest_dict:
dest_state.expire_attributes(dest_dict, [self.key])
class Comparator(PropComparator):
@util.memoized_instancemethod
def __clause_element__(self):
if self.adapter:
return self.adapter(self.prop.columns[0])
else:
return self.prop.columns[0]._annotate({
"parententity": self.mapper,
"parentmapper":self.mapper})
def operate(self, op, *other, **kwargs):
return op(self.__clause_element__(), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
col = self.__clause_element__()
return op(col._bind_param(op, other), col, **kwargs)
# TODO: legacy..do we need this ? (0.5)
ColumnComparator = Comparator
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
log.class_logger(ColumnProperty)
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
Of note here is the :class:`.RelationshipProperty.Comparator`
class, which implements comparison operations for scalar-
and collection-referencing mapped attributes.
"""
strategy_wildcard_key = 'relationship:*'
def __init__(self, argument,
secondary=None, primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False, extension=None,
viewonly=False, lazy=True,
collection_class=None, passive_deletes=False,
passive_updates=True, remote_side=None,
enable_typechecks=True, join_depth=None,
comparator_factory=None,
single_parent=False, innerjoin=False,
doc=None,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
strategy_class=None, _local_remote_pairs=None,
query_class=None):
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.load_on_pending = load_on_pending
self.comparator_factory = comparator_factory or \
RelationshipProperty.Comparator
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if strategy_class:
self.strategy_class = strategy_class
elif self.lazy== 'dynamic':
from sqlalchemy.orm import dynamic
self.strategy_class = dynamic.DynaLoader
else:
self.strategy_class = strategies.factory(self.lazy)
self._reverse_property = set()
if cascade is not False:
self.cascade = CascadeOptions(cascade)
else:
self.cascade = CascadeOptions("save-update, merge")
if self.passive_deletes == 'all' and \
("delete" in self.cascade or
"delete-orphan" in self.cascade):
raise sa_exc.ArgumentError(
"Can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade")
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive")
self.backref = None
else:
self.backref = backref
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce comparison operations for :func:`~.orm.relationship`-based
attributes."""
def __init__(self, prop, mapper, of_type=None, adapter=None):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self.mapper = mapper
self.adapter = adapter
if of_type:
self._of_type = _class_to_mapper(of_type)
def adapted(self, adapter):
"""Return a copy of this PropComparator which will use the
given adaption function on the local side of generated
expressions.
"""
return self.__class__(self.property, self.mapper,
getattr(self, '_of_type', None),
adapter)
@property
def parententity(self):
return self.property.parent
def __clause_element__(self):
elem = self.property.parent._with_polymorphic_selectable
if self.adapter:
return self.adapter(elem)
else:
return elem
def of_type(self, cls):
"""Produce a construct that represents a particular 'subtype' of
attribute for the parent class.
Currently this is usable in conjunction with :meth:`.Query.join`
and :meth:`.Query.outerjoin`.
"""
return RelationshipProperty.Comparator(
self.property,
self.mapper,
cls, adapter=self.adapter)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError('in_() not yet supported for '
'relationships. For a simple many-to-one, use '
'in_() against the set of foreign key values.')
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (NoneType, expression._Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(self.property._optimized_compare(
None, adapt_source=self.adapter))
elif self.property.uselist:
raise sa_exc.InvalidRequestError("Can't compare a colle"
"ction to an object or collection; use "
"contains() to test for membership.")
else:
return _orm_annotate(self.property._optimized_compare(other,
adapt_source=self.adapter))
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, '_of_type', None):
target_mapper = self._of_type
to_selectable = target_mapper._with_polymorphic_selectable
if self.property._is_self_referential:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
to_selectable = None
if self.adapter:
source_selectable = self.__clause_element__()
else:
source_selectable = None
pj, sj, source, dest, secondary, target_adapter = \
self.property._create_joins(dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if criterion is not None and target_adapter:
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate({'no_replacement_traverse': True})
crit = j & criterion
return sql.exists([1], crit, from_obj=dest).\
correlate(source._annotate({'_orm_adapt':True}))
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.id==my_table.related_id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. "
"Use any().")
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use ==")
clause = self.property._optimized_compare(other,
adapt_source=self.adapter)
if self.property.secondaryjoin is not None:
clause.negation_clause = \
self.__negated_contains_or_equals(other)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
o = state.obj() # strong ref
return sql.bindparam(x, unique=True, callable_=lambda : \
self.property.mapper._get_committed_attr_by_column(o,
col))
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(*[
sql.or_(
adapt(x) != state_bindparam(adapt(x), state, y),
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
criterion = sql.and_(*[x==y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.\
mapper.\
primary_key_from_instance(other))
])
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`~.expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (NoneType, expression._Null)):
if self.property.direction == MANYTOONE:
return sql.or_(*[x != None for x in
self.property._calculated_foreign_keys])
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError("Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership.")
else:
return self.__negated_contains_or_equals(other)
@util.memoized_property
def property(self):
if mapperlib.module._new_mappers:
configure_mappers()
return self.prop
def compare(self, op, value,
value_is_parent=False,
alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
return ~sql.exists([1], self.primaryjoin)
else:
return self._optimized_compare(None,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return self._optimized_compare(value,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
def _optimized_compare(self, value, value_is_parent=False,
adapt_source=None,
alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
return self._get_strategy(strategies.LazyLoader).lazy_clause(value,
reverse_direction=not value_is_parent,
alias_secondary=alias_secondary,
adapt_source=adapt_source)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load, _recursive):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if not "merge" in self.cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).\
get(source_state, source_dict)
if hasattr(instances, '_sa_adapter'):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttrbiuteImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(dest_state,
dest_dict, self.key)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key)._set_iterable(dest_state,
dest_dict, dest_list)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(dest_state,
dest_dict, obj, None)
def cascade_iterator(self, type_, state, dict_, visited_states, halt_on=None):
#assert type_ in self.cascade
# only actively lazy load on the 'delete' cascade
if type_ != 'delete' or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == 'save-update':
tuples = state.manager[self.key].impl.\
get_all_pending(state, dict_)
else:
tuples = state.value_as_iterable(dict_, self.key,
passive=passive)
skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \
not in self.cascade
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
self.key,
self.parent.class_,
c.__class__
))
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _compile_mappers=False)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError('reverse_property %r on '
'relationship %s references relationship %s, which '
'does not reference mapper %s' % (key, self, other,
self.parent))
if self.direction in (ONETOMANY, MANYTOONE) and self.direction \
== other.direction:
raise sa_exc.ArgumentError('%s and back-reference %s are '
'both of the same direction %r. Did you mean to '
'set remote_side on the many-to-one side ?'
% (other, self, self.direction))
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
if isinstance(self.argument, type):
mapper_ = mapper.class_mapper(self.argument,
compile=False)
elif isinstance(self.argument, mapper.Mapper):
mapper_ = self.argument
elif util.callable(self.argument):
# accept a callable to suit various deferred-
# configurational schemes
mapper_ = mapper.class_mapper(self.argument(),
compile=False)
else:
raise sa_exc.ArgumentError("relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(self.argument)))
assert isinstance(mapper_, mapper.Mapper), mapper_
return mapper_
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
:class:`.Mapper`."""
return self.target
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._determine_joins()
self._determine_synchronize_pairs()
self._determine_direction()
self._determine_local_remote_pairs()
self._post_init()
self._generate_backref()
super(RelationshipProperty, self).do_init()
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if not self.is_primary() \
and not mapper.class_mapper(
self.parent.class_,
compile=False).has_property(self.key):
raise sa_exc.ArgumentError("Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' " % (self.key,
self.parent.class_.__name__,
self.parent.class_.__name__))
# check for conflicting relationship() on superclass
if not self.parent.concrete:
for inheriting in self.parent.iterate_to_root():
if inheriting is not self.parent \
and inheriting.has_property(self.key):
util.warn("Warning: relationship '%s' on mapper "
"'%s' supersedes the same relationship "
"on inherited mapper '%s'; this can "
"cause dependency issues during flush"
% (self.key, self.parent, inheriting))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
'order_by',
'primaryjoin',
'secondaryjoin',
'secondary',
'_user_defined_foreign_keys',
'remote_side',
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in 'primaryjoin', 'secondaryjoin':
val = getattr(self, attr)
if val is not None:
setattr(self, attr, _orm_deannotate(
expression._only_column_elements(val, attr))
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
self.target = self.mapper.mapped_table
if self.cascade.delete_orphan:
self.mapper.primary_mapper().delete_orphans.append(
(self.key, self.parent.class_)
)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError("Property '" + self.key
+ "' specified with secondary join condition but "
"no secondary argument")
# if join conditions were not specified, figure them out based
# on foreign keys
def _search_for_join(mapper, table):
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
return join_condition(mapper.mapped_table, table,
a_subset=mapper.local_table)
try:
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = _search_for_join(self.mapper,
self.secondary)
if self.primaryjoin is None:
self.primaryjoin = _search_for_join(self.parent,
self.secondary)
else:
if self.primaryjoin is None:
self.primaryjoin = _search_for_join(self.parent,
self.target)
except sa_exc.ArgumentError, e:
raise sa_exc.ArgumentError("Could not determine join "
"condition between parent/child tables on "
"relationship %s. Specify a 'primaryjoin' "
"expression. If 'secondary' is present, "
"'secondaryjoin' is needed as well."
% self)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if self.secondary is not None \
and self.secondary.c.contains_column(c):
continue
if not self.parent.mapped_table.c.contains_column(c) and \
not self.target.c.contains_column(c):
return False
return True
def _sync_pairs_from_join(self, join_condition, primary):
"""Determine a list of "source"/"destination" column pairs
based on the given join condition, as well as the
foreign keys argument.
"source" would be a column referenced by a foreign key,
and "destination" would be the column who has a foreign key
reference to "source".
"""
fks = self._user_defined_foreign_keys
# locate pairs
eq_pairs = criterion_as_pairs(join_condition,
consider_as_foreign_keys=fks,
any_operator=self.viewonly)
# couldn't find any fks, but we have
# "secondary" - assume the "secondary" columns
# are the fks
if not eq_pairs and \
self.secondary is not None and \
not fks:
fks = set(self.secondary.c)
eq_pairs = criterion_as_pairs(join_condition,
consider_as_foreign_keys=fks,
any_operator=self.viewonly)
if eq_pairs:
util.warn("No ForeignKey objects were present "
"in secondary table '%s'. Assumed referenced "
"foreign key columns %s for join condition '%s' "
"on relationship %s" % (
self.secondary.description,
", ".join(sorted(["'%s'" % col for col in fks])),
join_condition,
self
))
# Filter out just to columns that are mapped.
# If viewonly, allow pairs where the FK col
# was part of "foreign keys" - the column it references
# may be in an un-mapped table - see
# test.orm.test_relationships.ViewOnlyComplexJoin.test_basic
# for an example of this.
eq_pairs = [(l, r) for (l, r) in eq_pairs
if self._columns_are_mapped(l, r)
or self.viewonly and
r in fks]
if eq_pairs:
return eq_pairs
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if not self.viewonly and criterion_as_pairs(join_condition,
consider_as_foreign_keys=self._user_defined_foreign_keys,
any_operator=True):
err = "Could not locate any "\
"foreign-key-equated, locally mapped column "\
"pairs for %s "\
"condition '%s' on relationship %s." % (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self
)
if not self._user_defined_foreign_keys:
err += " Ensure that the "\
"referencing Column objects have a "\
"ForeignKey present, or are otherwise part "\
"of a ForeignKeyConstraint on their parent "\
"Table, or specify the foreign_keys parameter "\
"to this relationship."
err += " For more "\
"relaxed rules on join conditions, the "\
"relationship may be marked as viewonly=True."
raise sa_exc.ArgumentError(err)
else:
|
def _determine_synchronize_pairs(self):
"""Resolve 'primary'/foreign' column pairs from the primaryjoin
and secondaryjoin arguments.
"""
if self.local_remote_pairs:
if not self._user_defined_foreign_keys:
raise sa_exc.ArgumentError(
"foreign_keys argument is "
"required with _local_remote_pairs argument")
self.synchronize_pairs = []
for l, r in self.local_remote_pairs:
if r in self._user_defined_foreign_keys:
self.synchronize_pairs.append((l, r))
elif l in self._user_defined_foreign_keys:
self.synchronize_pairs.append((r, l))
else:
self.synchronize_pairs = self._sync_pairs_from_join(
self.primaryjoin,
True)
self._calculated_foreign_keys = util.column_set(
r for (l, r) in
self.synchronize_pairs)
if self.secondaryjoin is not None:
self.secondary_synchronize_pairs = self._sync_pairs_from_join(
self.secondaryjoin,
False)
self._calculated_foreign_keys.update(
r for (l, r) in
self.secondary_synchronize_pairs)
else:
self.secondary_synchronize_pairs = None
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
This is derived from the primaryjoin, presence of "secondary",
and in the case of self-referential the "remote side".
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
elif self._refers_to_parent_table():
# self referential defaults to ONETOMANY unless the "remote"
# side is present and does not reference any foreign key
# columns
if self.local_remote_pairs:
remote = [r for (l, r) in self.local_remote_pairs]
elif self.remote_side:
remote = self.remote_side
else:
remote = None
if not remote or self._calculated_foreign_keys.difference(l for (l,
r) in self.synchronize_pairs).intersection(remote):
self.direction = ONETOMANY
else:
self.direction = MANYTOONE
else:
parentcols = util.column_set(self.parent.mapped_table.c)
targetcols = util.column_set(self.mapper.mapped_table.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(
self._calculated_foreign_keys)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(
self._calculated_foreign_keys)
if onetomany_fk and manytoone_fk:
# fks on both sides. do the same test only based on the
# local side.
referents = [c for (c, f) in self.synchronize_pairs]
onetomany_local = parentcols.intersection(referents)
manytoone_local = targetcols.intersection(referents)
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns are present in both the parent "
"and the child's mapped tables. Specify "
"'foreign_keys' argument." % self)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError("Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self)
if self.cascade.delete_orphan and not self.single_parent \
and (self.direction is MANYTOMANY or self.direction
is MANYTOONE):
util.warn('On %s, delete-orphan cascade is not supported '
'on a many-to-many or many-to-one relationship '
'when single_parent is not set. Set '
'single_parent=True on the relationship().'
% self)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn("On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only."
% self)
def _determine_local_remote_pairs(self):
"""Determine pairs of columns representing "local" to
"remote", where "local" columns are on the parent mapper,
"remote" are on the target mapper.
These pairs are used on the load side only to generate
lazy loading clauses.
"""
if not self.local_remote_pairs and not self.remote_side:
# the most common, trivial case. Derive
# local/remote pairs from the synchronize pairs.
eq_pairs = util.unique_list(
self.synchronize_pairs +
(self.secondary_synchronize_pairs or []))
if self.direction is MANYTOONE:
self.local_remote_pairs = [(r, l) for l, r in eq_pairs]
else:
self.local_remote_pairs = eq_pairs
# "remote_side" specified, derive from the primaryjoin
# plus remote_side, similarly to how synchronize_pairs
# were determined.
elif self.remote_side:
if self.local_remote_pairs:
raise sa_exc.ArgumentError('remote_side argument is '
'redundant against more detailed '
'_local_remote_side argument.')
if self.direction is MANYTOONE:
self.local_remote_pairs = [(r, l) for (l, r) in
criterion_as_pairs(self.primaryjoin,
consider_as_referenced_keys=self.remote_side,
any_operator=True)]
else:
self.local_remote_pairs = \
criterion_as_pairs(self.primaryjoin,
consider_as_foreign_keys=self.remote_side,
any_operator=True)
if not self.local_remote_pairs:
raise sa_exc.ArgumentError('Relationship %s could '
'not determine any local/remote column '
'pairs from remote side argument %r'
% (self, self.remote_side))
# else local_remote_pairs were sent explcitly via
# ._local_remote_pairs.
# create local_side/remote_side accessors
self.local_side = util.ordered_column_set(
l for l, r in self.local_remote_pairs)
self.remote_side = util.ordered_column_set(
r for l, r in self.local_remote_pairs)
# check that the non-foreign key column in the local/remote
# collection is mapped. The foreign key
# which the individual mapped column references directly may
# itself be in a non-mapped table; see
# test.orm.test_relationships.ViewOnlyComplexJoin.test_basic
# for an example of this.
if self.direction is ONETOMANY:
for col in self.local_side:
if not self._columns_are_mapped(col):
raise sa_exc.ArgumentError(
"Local column '%s' is not "
"part of mapping %s. Specify remote_side "
"argument to indicate which column lazy join "
"condition should compare against." % (col,
self.parent))
elif self.direction is MANYTOONE:
for col in self.remote_side:
if not self._columns_are_mapped(col):
raise sa_exc.ArgumentError(
"Remote column '%s' is not "
"part of mapping %s. Specify remote_side "
"argument to indicate which column lazy join "
"condition should bind." % (col, self.mapper))
def _generate_backref(self):
if not self.is_primary():
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, basestring):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if mapper.has_property(backref_key):
raise sa_exc.ArgumentError("Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'" % (backref_key,
self, mapper))
if self.secondary is not None:
pj = kwargs.pop('primaryjoin', self.secondaryjoin)
sj = kwargs.pop('secondaryjoin', self.primaryjoin)
else:
pj = kwargs.pop('primaryjoin', self.primaryjoin)
sj = kwargs.pop('secondaryjoin', None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref against "
"a non-secondary relationship."
)
foreign_keys = kwargs.pop('foreign_keys',
self._user_defined_foreign_keys)
parent = self.parent.primary_mapper()
kwargs.setdefault('viewonly', self.viewonly)
kwargs.setdefault('post_update', self.post_update)
kwargs.setdefault('passive_updates', self.passive_updates)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
pj,
sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs
)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
self.logger.info('%s setup primary join %s', self,
self.primaryjoin)
self.logger.info('%s setup secondary join %s', self,
self.secondaryjoin)
self.logger.info('%s synchronize pairs [%s]', self,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.synchronize_pairs))
self.logger.info('%s secondary synchronize pairs [%s]', self,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.secondary_synchronize_pairs or []))
self.logger.info('%s local/remote pairs [%s]', self,
','.join('(%s / %s)' % (l, r) for (l, r) in
self.local_remote_pairs))
self.logger.info('%s relationship direction %s', self,
self.direction)
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = \
dependency.DependencyProcessor.from_relationship(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._get_strategy(strategies.LazyLoader)
return strategy.use_get
def _refers_to_parent_table(self):
pt = self.parent.mapped_table
mt = self.mapper.mapped_table
for c, f in self.synchronize_pairs:
if (
pt.is_derived_from(c.table) and \
pt.is_derived_from(f.table) and \
mt.is_derived_from(c.table) and \
mt.is_derived_from(f.table)
):
return True
else:
return False
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def per_property_preprocessors(self, uow):
if not self.viewonly and self._dependency_processor:
self._dependency_processor.per_property_preprocessors(uow)
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
aliased = False
if dest_selectable is None:
if dest_polymorphic and self.mapper.with_polymorphic:
dest_selectable = self.mapper._with_polymorphic_selectable
aliased = True
else:
dest_selectable = self.mapper.mapped_table
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
else:
aliased = True
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
{'no_replacement_traverse':True})
aliased = aliased or (source_selectable is not None)
primaryjoin, secondaryjoin, secondary = self.primaryjoin, \
self.secondaryjoin, self.secondary
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the "_adjust_for_single_table_inheritance()"
# method in Query.
dest_mapper = of_type or self.mapper
single_crit = dest_mapper._single_table_criterion
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias()
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = \
ClauseAdapter(dest_selectable,
equivalents=self.mapper._equivalent_columns).\
chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = \
ClauseAdapter(secondary).\
chain(ClauseAdapter(source_selectable,
equivalents=self.parent._equivalent_columns))
secondaryjoin = \
secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(dest_selectable,
exclude=self.local_side,
equivalents=self.mapper._equivalent_columns)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(source_selectable,
exclude=self.remote_side,
equivalents=self.parent._equivalent_columns))
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.include = target_adapter.exclude = None
else:
target_adapter = None
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.mapper.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
PropertyLoader = RelationProperty = RelationshipProperty
log.class_logger(RelationshipProperty)
| if self._user_defined_foreign_keys:
raise sa_exc.ArgumentError("Could not determine "
"relationship direction for %s condition "
"'%s', on relationship %s, using manual "
"'foreign_keys' setting. Do the columns "
"in 'foreign_keys' represent all, and "
"only, the 'foreign' columns in this join "
"condition? Does the %s Table already "
"have adequate ForeignKey and/or "
"ForeignKeyConstraint objects established "
"(in which case 'foreign_keys' is usually "
"unnecessary)?"
% (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self,
primary and 'mapped' or 'secondary'
))
else:
raise sa_exc.ArgumentError("Could not determine "
"relationship direction for %s condition "
"'%s', on relationship %s. Ensure that the "
"referencing Column objects have a "
"ForeignKey present, or are otherwise part "
"of a ForeignKeyConstraint on their parent "
"Table, or specify the foreign_keys parameter "
"to this relationship."
% (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self
)) |
forms.py | from flask_wtf import FlaskForm
from wtforms import BooleanField, HiddenField, StringField, SubmitField, ValidationError
from wtforms.validators import Length, Required
from .. models import EventFrameTemplateView
class CopyEventFrameTemplateViewForm(FlaskForm):
|
class EventFrameTemplateViewForm(FlaskForm):
name = StringField("Name", validators = [Required(), Length(1, 45)])
description = StringField("Description", validators = [Length(0, 255)])
default = BooleanField("Default")
selectable = BooleanField("Selectable", default = "checked")
eventFrameTemplateId = HiddenField()
eventFrameTemplateViewId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
eventFrameTemplateView = EventFrameTemplateView.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data, Name = field.data).first()
if eventFrameTemplateView is not None:
if self.eventFrameTemplateViewId.data == "":
# Trying to add a new event frame template view using a name that already exists.
validationError = True
else:
if int(self.eventFrameTemplateViewId.data) != eventFrameTemplateView.EventFrameTemplateViewId:
# Trying to change the name of a event frame template view to a name that already exists.
validationError = True
if validationError is True:
raise ValidationError('The name "{}" already exists.'.format(field.data))
| name = StringField("Name", validators = [Required(), Length(1, 45)])
description = StringField("Description", validators = [Length(0, 255)])
default = BooleanField("Default")
eventFrameTemplateId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
eventFrameTemplateView = EventFrameTemplateView.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data, Name = field.data).first()
if eventFrameTemplateView is not None:
# Trying to copy an eventFrameTemplateView using a name that already exists.
validationError = True
if validationError:
raise ValidationError(f'The name "{field.data}" already exists.') |
guarded-users-api-client.service.ts | /**
* This file is auto-generated by the API client generator.
* https://github.com/flowup/api-client-generator
*
* Avoid editing this file manually unless necessary.
* Please report any bugs so they can be addressed in future versions.
*/
/* tslint:disable */
/* eslint-disable */
import { HttpClient, HttpResponse, HttpEvent } from '@angular/common/http';
import { Inject, Injectable, Optional } from '@angular/core';
import { UsersAPIClientInterface } from './users-api-client.interface';
import { Observable } from 'rxjs';
import { tap } from 'rxjs/operators';
import { USE_DOMAIN, USE_HTTP_OPTIONS, UsersAPIClient } from './users-api-client.service';
import { DefaultHttpOptions, HttpOptions } from '../../types';
import * as models from '../../models';
import * as guards from '../../guards';
@Injectable()
export class | extends UsersAPIClient implements UsersAPIClientInterface {
constructor(
readonly httpClient: HttpClient,
@Optional() @Inject(USE_DOMAIN) domain?: string,
@Optional() @Inject(USE_HTTP_OPTIONS) options?: DefaultHttpOptions,
) {
super(httpClient, domain, options);
}
/**
* Get all users.
* This provides a dump of every user, in the order that they signed up for GitHub.
* Note: Pagination is powered exclusively by the since parameter. Use the Link
* header to get the URL for the next page of users.
*
* Response generated for [ 200 ] HTTP response code.
*/
getUsers(
args?: UsersAPIClientInterface['getUsersParams'],
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<models.Users>;
getUsers(
args?: UsersAPIClientInterface['getUsersParams'],
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<models.Users>>;
getUsers(
args?: UsersAPIClientInterface['getUsersParams'],
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<models.Users>>;
getUsers(
args: UsersAPIClientInterface['getUsersParams'] = {},
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<models.Users | HttpResponse<models.Users> | HttpEvent<models.Users>> {
return super.getUsers(args, requestHttpOptions, observe)
.pipe(tap((res: any) => guards.isUsers(res) || console.error(`TypeGuard for response 'models.Users' caught inconsistency.`, res)));
}
/**
* Get a single user.
* Response generated for [ 200 ] HTTP response code.
*/
getUsersUsername(
args: Exclude<UsersAPIClientInterface['getUsersUsernameParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<models.Users>;
getUsersUsername(
args: Exclude<UsersAPIClientInterface['getUsersUsernameParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<models.Users>>;
getUsersUsername(
args: Exclude<UsersAPIClientInterface['getUsersUsernameParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<models.Users>>;
getUsersUsername(
args: Exclude<UsersAPIClientInterface['getUsersUsernameParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<models.Users | HttpResponse<models.Users> | HttpEvent<models.Users>> {
return super.getUsersUsername(args, requestHttpOptions, observe)
.pipe(tap((res: any) => guards.isUsers(res) || console.error(`TypeGuard for response 'models.Users' caught inconsistency.`, res)));
}
/**
* If you are authenticated as the given user, you will see your private events. Otherwise, you'll only see public events.
* Response generated for [ default ] HTTP response code.
*/
getUsersUsernameEvents(
args: Exclude<UsersAPIClientInterface['getUsersUsernameEventsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<void>;
getUsersUsernameEvents(
args: Exclude<UsersAPIClientInterface['getUsersUsernameEventsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<void>>;
getUsersUsernameEvents(
args: Exclude<UsersAPIClientInterface['getUsersUsernameEventsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<void>>;
getUsersUsernameEvents(
args: Exclude<UsersAPIClientInterface['getUsersUsernameEventsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<void | HttpResponse<void> | HttpEvent<void>> {
return super.getUsersUsernameEvents(args, requestHttpOptions, observe);
}
/**
* This is the user's organization dashboard. You must be authenticated as the user to view this.
* Response generated for [ default ] HTTP response code.
*/
getUsersUsernameEventsOrg(
args: Exclude<UsersAPIClientInterface['getUsersUsernameEventsOrgParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<void>;
getUsersUsernameEventsOrg(
args: Exclude<UsersAPIClientInterface['getUsersUsernameEventsOrgParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<void>>;
getUsersUsernameEventsOrg(
args: Exclude<UsersAPIClientInterface['getUsersUsernameEventsOrgParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<void>>;
getUsersUsernameEventsOrg(
args: Exclude<UsersAPIClientInterface['getUsersUsernameEventsOrgParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<void | HttpResponse<void> | HttpEvent<void>> {
return super.getUsersUsernameEventsOrg(args, requestHttpOptions, observe);
}
/**
* List a user's followers
* Response generated for [ 200 ] HTTP response code.
*/
getUsersUsernameFollowers(
args: Exclude<UsersAPIClientInterface['getUsersUsernameFollowersParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<models.Users>;
getUsersUsernameFollowers(
args: Exclude<UsersAPIClientInterface['getUsersUsernameFollowersParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<models.Users>>;
getUsersUsernameFollowers(
args: Exclude<UsersAPIClientInterface['getUsersUsernameFollowersParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<models.Users>>;
getUsersUsernameFollowers(
args: Exclude<UsersAPIClientInterface['getUsersUsernameFollowersParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<models.Users | HttpResponse<models.Users> | HttpEvent<models.Users>> {
return super.getUsersUsernameFollowers(args, requestHttpOptions, observe)
.pipe(tap((res: any) => guards.isUsers(res) || console.error(`TypeGuard for response 'models.Users' caught inconsistency.`, res)));
}
/**
* Check if one user follows another.
* Response generated for [ 204 ] HTTP response code.
*/
getUsersUsernameFollowingTargetUser(
args: Exclude<UsersAPIClientInterface['getUsersUsernameFollowingTargetUserParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<void>;
getUsersUsernameFollowingTargetUser(
args: Exclude<UsersAPIClientInterface['getUsersUsernameFollowingTargetUserParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<void>>;
getUsersUsernameFollowingTargetUser(
args: Exclude<UsersAPIClientInterface['getUsersUsernameFollowingTargetUserParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<void>>;
getUsersUsernameFollowingTargetUser(
args: Exclude<UsersAPIClientInterface['getUsersUsernameFollowingTargetUserParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<void | HttpResponse<void> | HttpEvent<void>> {
return super.getUsersUsernameFollowingTargetUser(args, requestHttpOptions, observe);
}
/**
* List a users gists.
* Response generated for [ 200 ] HTTP response code.
*/
getUsersUsernameGists(
args: Exclude<UsersAPIClientInterface['getUsersUsernameGistsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<models.Gists>;
getUsersUsernameGists(
args: Exclude<UsersAPIClientInterface['getUsersUsernameGistsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<models.Gists>>;
getUsersUsernameGists(
args: Exclude<UsersAPIClientInterface['getUsersUsernameGistsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<models.Gists>>;
getUsersUsernameGists(
args: Exclude<UsersAPIClientInterface['getUsersUsernameGistsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<models.Gists | HttpResponse<models.Gists> | HttpEvent<models.Gists>> {
return super.getUsersUsernameGists(args, requestHttpOptions, observe)
.pipe(tap((res: any) => guards.isGists(res) || console.error(`TypeGuard for response 'models.Gists' caught inconsistency.`, res)));
}
/**
* List public keys for a user.
* Lists the verified public keys for a user. This is accessible by anyone.
*
* Response generated for [ 200 ] HTTP response code.
*/
getUsersUsernameKeys(
args: Exclude<UsersAPIClientInterface['getUsersUsernameKeysParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<models.Gitignore>;
getUsersUsernameKeys(
args: Exclude<UsersAPIClientInterface['getUsersUsernameKeysParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<models.Gitignore>>;
getUsersUsernameKeys(
args: Exclude<UsersAPIClientInterface['getUsersUsernameKeysParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<models.Gitignore>>;
getUsersUsernameKeys(
args: Exclude<UsersAPIClientInterface['getUsersUsernameKeysParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<models.Gitignore | HttpResponse<models.Gitignore> | HttpEvent<models.Gitignore>> {
return super.getUsersUsernameKeys(args, requestHttpOptions, observe)
.pipe(tap((res: any) => guards.isGitignore(res) || console.error(`TypeGuard for response 'models.Gitignore' caught inconsistency.`, res)));
}
/**
* List all public organizations for a user.
* Response generated for [ 200 ] HTTP response code.
*/
getUsersUsernameOrgs(
args: Exclude<UsersAPIClientInterface['getUsersUsernameOrgsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<models.Gitignore>;
getUsersUsernameOrgs(
args: Exclude<UsersAPIClientInterface['getUsersUsernameOrgsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<models.Gitignore>>;
getUsersUsernameOrgs(
args: Exclude<UsersAPIClientInterface['getUsersUsernameOrgsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<models.Gitignore>>;
getUsersUsernameOrgs(
args: Exclude<UsersAPIClientInterface['getUsersUsernameOrgsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<models.Gitignore | HttpResponse<models.Gitignore> | HttpEvent<models.Gitignore>> {
return super.getUsersUsernameOrgs(args, requestHttpOptions, observe)
.pipe(tap((res: any) => guards.isGitignore(res) || console.error(`TypeGuard for response 'models.Gitignore' caught inconsistency.`, res)));
}
/**
* These are events that you'll only see public events.
* Response generated for [ default ] HTTP response code.
*/
getUsersUsernameReceivedEvents(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReceivedEventsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<void>;
getUsersUsernameReceivedEvents(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReceivedEventsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<void>>;
getUsersUsernameReceivedEvents(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReceivedEventsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<void>>;
getUsersUsernameReceivedEvents(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReceivedEventsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<void | HttpResponse<void> | HttpEvent<void>> {
return super.getUsersUsernameReceivedEvents(args, requestHttpOptions, observe);
}
/**
* List public events that a user has received
* Response generated for [ default ] HTTP response code.
*/
getUsersUsernameReceivedEventsPublic(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReceivedEventsPublicParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<void>;
getUsersUsernameReceivedEventsPublic(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReceivedEventsPublicParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<void>>;
getUsersUsernameReceivedEventsPublic(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReceivedEventsPublicParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<void>>;
getUsersUsernameReceivedEventsPublic(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReceivedEventsPublicParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<void | HttpResponse<void> | HttpEvent<void>> {
return super.getUsersUsernameReceivedEventsPublic(args, requestHttpOptions, observe);
}
/**
* List public repositories for the specified user.
* Response generated for [ 200 ] HTTP response code.
*/
getUsersUsernameRepos(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReposParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<models.Repos>;
getUsersUsernameRepos(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReposParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<models.Repos>>;
getUsersUsernameRepos(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReposParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<models.Repos>>;
getUsersUsernameRepos(
args: Exclude<UsersAPIClientInterface['getUsersUsernameReposParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<models.Repos | HttpResponse<models.Repos> | HttpEvent<models.Repos>> {
return super.getUsersUsernameRepos(args, requestHttpOptions, observe)
.pipe(tap((res: any) => guards.isRepos(res) || console.error(`TypeGuard for response 'models.Repos' caught inconsistency.`, res)));
}
/**
* List repositories being starred by a user.
* Response generated for [ default ] HTTP response code.
*/
getUsersUsernameStarred(
args: Exclude<UsersAPIClientInterface['getUsersUsernameStarredParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<void>;
getUsersUsernameStarred(
args: Exclude<UsersAPIClientInterface['getUsersUsernameStarredParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<void>>;
getUsersUsernameStarred(
args: Exclude<UsersAPIClientInterface['getUsersUsernameStarredParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<void>>;
getUsersUsernameStarred(
args: Exclude<UsersAPIClientInterface['getUsersUsernameStarredParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<void | HttpResponse<void> | HttpEvent<void>> {
return super.getUsersUsernameStarred(args, requestHttpOptions, observe);
}
/**
* List repositories being watched by a user.
* Response generated for [ default ] HTTP response code.
*/
getUsersUsernameSubscriptions(
args: Exclude<UsersAPIClientInterface['getUsersUsernameSubscriptionsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'body',
): Observable<void>;
getUsersUsernameSubscriptions(
args: Exclude<UsersAPIClientInterface['getUsersUsernameSubscriptionsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'response',
): Observable<HttpResponse<void>>;
getUsersUsernameSubscriptions(
args: Exclude<UsersAPIClientInterface['getUsersUsernameSubscriptionsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe?: 'events',
): Observable<HttpEvent<void>>;
getUsersUsernameSubscriptions(
args: Exclude<UsersAPIClientInterface['getUsersUsernameSubscriptionsParams'], undefined>,
requestHttpOptions?: HttpOptions,
observe: any = 'body',
): Observable<void | HttpResponse<void> | HttpEvent<void>> {
return super.getUsersUsernameSubscriptions(args, requestHttpOptions, observe);
}
}
| GuardedUsersAPIClient |
radio-button-screenshot-test.tsx | import {openStoryPage} from '../test-utils'; | const DEVICES = ['MOBILE_IOS', 'MOBILE_ANDROID'] as const;
test.each(DEVICES)(`RadioGroup`, async (device) => {
await openStoryPage({
id: 'components-forms-radio-button--default',
device,
});
const image = await page.screenshot();
expect(image).toMatchImageSnapshot();
}); | |
id.rs | //! jsonrpc id field
/// Request Id
#[derive(Debug, PartialEq, Clone, Hash, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
#[serde(untagged)]
pub enum Id {
/// No id (notification)
Null,
/// Numeric id
Num(u64),
/// String id
Str(String),
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn id_deserialization() {
let s = r#""2""#;
let deserialized: Id = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, Id::Str("2".into()));
let s = r#"2"#;
let deserialized: Id = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, Id::Num(2));
let s = r#""2x""#;
let deserialized: Id = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, Id::Str("2x".to_owned()));
let s = r#"[null, 0, 2, "3"]"#;
let deserialized: Vec<Id> = serde_json::from_str(s).unwrap();
assert_eq!(
deserialized,
vec![Id::Null, Id::Num(0), Id::Num(2), Id::Str("3".into())]
);
}
#[test]
fn id_serialization() |
}
| {
let d = vec![
Id::Null,
Id::Num(0),
Id::Num(2),
Id::Num(3),
Id::Str("3".to_owned()),
Id::Str("test".to_owned()),
];
let serialized = serde_json::to_string(&d).unwrap();
assert_eq!(serialized, r#"[null,0,2,3,"3","test"]"#);
} |
chip8.js | import Instruction from '../utils/instruction.js';
import Screen from './screen.js';
import Keyboard from './keyboard.js'
import Sound from './sound.js';
/**
* chip8 cpu. This handles reading and executing instructions from the ROM,
* manages what to render on the screen, what audio to play.
* Is the general CPU and memory of the chip8
*
*
* all the instruction handling, memory handling, screen rendering, etc.
* are all done after Cowgod's Chip-8 Technical Reference v1.0
*
*
* http://devernay.free.fr/hacks/chip8/C8TECH10.HTM
*/
export default class Chip8 {
/**
* @param {Screen} screen handle all drawing/redrawing of sprites
* @param {Keyboard} keyboard handle all keyboard interaction
* @param {Sound} sound sound context to handle sound on/off
*/
constructor(screen, keyboard, sound) {
this.screen = screen || new Screen();
this.keyboard = keyboard || new Keyboard();
this.sound = sound || new Sound();
this.instruction = new Instruction();
// cycle execution speed
this.speed = 10;
this.soundOff = true;
}
/**
* Resets whole cpu state
*/
resetState() {
// 16 8-bit registers
this.v = new Uint8Array(16);
// 4096 byte RAM, first 512 bytes are reserved for the interpreter (stuff like font)
// program starts at 512
this.memory = new Uint8Array(1024 * 4);
this.stack = [];
this.screen.clearScreen();
this.keyboard.clear();
// memory addr register
this.i = 0;
this.programCounter = 0x200;
// points to topmost level of the stack
this.stackPointer = 0;
this.delayTimer = 0;
this.soundTimer = 0;
this.pause = false;
this.loadFontsIntoState();
}
/**
* @param {Uint8Array} ROM binary data
*/
loadROM(ROM) {
for (let i = 0, size = ROM.length; i < size; i += 1) {
// start loading ROM in memory from 0x200
this.memory[0x200 + i] = ROM[i];
}
}
/**
* Emulates cpu cycle
*/
emulateCycle() {
for (let i = 0; i < this.speed; i += 1) {
if (!this.pause) {
// each instruction is 2 bytes (16bit) long
// read value in memory of current PC and bitshift to left by 8
const firstByte = this.memory[this.programCounter] << 8;
// read next value (PC++) in memory
const secondByte = this.memory[this.programCounter + 1];
// add both values together by bitwise OR to create a 16bit (2 byte) instruction
// this is because the memory of chip8 is only 8 bit (1byte)
this.instruction.setInstructionCode(firstByte | secondByte);
this.performInstruction(this.instruction);
}
}
if (!this.pause) {
this.updateTimers();
}
}
performInstruction(instructionCode) {
// to signal that this instruction executed and move to next
this.programCounter += 2;
switch (instructionCode.getCategory()) {
case 0x0: this.operationCode0(instructionCode); break;
case 0x1: this.operationCode1(instructionCode); break;
case 0x2: this.operationCode2(instructionCode); break;
case 0x3: this.operationCode3(instructionCode); break;
case 0x4: this.operationCode4(instructionCode); break;
case 0x5: this.operationCode5(instructionCode); break;
case 0x6: this.operationCode6(instructionCode); break;
case 0x7: this.operationCode7(instructionCode); break;
case 0x8: this.operationCode8(instructionCode); break;
case 0x9: this.operationCode9(instructionCode); break;
case 0xA: this.operationCodeA(instructionCode); break;
case 0xB: this.operationCodeB(instructionCode); break;
case 0xC: this.operationCodeC(instructionCode); break;
case 0xD: this.operationCodeD(instructionCode); break;
case 0xE: this.operationCodeE(instructionCode); break;
case 0xF: this.operationCodeF(instructionCode); break;
default: throw new Error(`Unknown opcode ${instructionCode.getInstructionCode().toString(16)}`);
}
}
operationCode0(instruction) {
switch (instruction.getKK()) {
case 0xE0: this.screen.clearScreen(); break;
// return from subroutine
case 0xEE:
// sets program counter to address at top of stack
this.programCounter = this.stack[this.stackPointer];
this.stackPointer = this.stackPointer -= 1;
break;
default: break;
}
}
/**
* Interpreter sets address to NNN
* @param {Instruction} instruction
*/
operationCode1(instruction) {
this.programCounter = instruction.getAddr();
}
/**
* Calls subroutine at NNN
* @param {Instruction} instruction
*/
operationCode2(instruction) {
this.stackPointer += 1; |
/**
* Skip next instruction if Vx = kk
* @param {Instruction} instruction
*/
operationCode3(instruction) {
if (this.v[instruction.getX()] === instruction.getKK()) {
this.programCounter += 2;
}
}
/**
* Skip next instruction if Vx != KK
* @param {Instruction} instruction
*/
operationCode4(instruction) {
if (this.v[instruction.getX()] !== instruction.getKK()) {
this.programCounter += 2;
}
}
/**
* Skip next instruction if Vx = Vy
* @param {Instruction} instruction
*/
operationCode5(instruction) {
if (this.v[instruction.getX()] === this.v[instruction.getY()]) {
this.programCounter += 2;
}
}
/**
* Set Vx to KK
* @param {Instruction} instruction
*/
operationCode6(instruction) {
this.v[instruction.getX()] = instruction.getKK();
}
/**
* Set Vx = Vx + KK
* @param {Instruction} instruction
*/
operationCode7(instruction) {
const val = instruction.getKK() + this.v[instruction.getX()];
this.v[instruction.getX()] = val;
}
/**
* Handle any instruction regarding the 16 8-bit registers
* @param {Instruction} instruction
*/
operationCode8(instruction) {
const x = instruction.getX();
const y = instruction.getY();
switch (instruction.getSubCategory()) {
case 0x0: this.v[x] = this.v[y]; break;
case 0x1: this.v[x] |= this.v[y]; break;
case 0x2: this.v[x] &= this.v[y]; break;
case 0x3: this.v[x] ^= this.v[y]; break;
case 0x4:
this.v[x] += this.v[y];
if (this.v[x] > 0xFF) {
this.v[0xF] = 1;
} else {
this.v[0xF] = 0;
}
break;
case 0x5:
if (this.v[x] > this.v[y]) {
this.v[0xF] = 1;
} else {
this.v[0xF] = 0;
}
this.v[x] -= this.v[y];
break;
case 0x6:
// get the last bit of Vx and assign
this.v[0xF] = this.v[x] & 0x1;
// use bitwise shift to divide by 2
this.v[x] >>= 1;
break;
case 0x7:
if (this.v[y] > this.v[x]) {
this.v[0xF] = 1;
} else {
this.v[0xF] = 0;
}
this.v[x] = this.v[y] - this.v[x];
break;
case 0xE:
this.v[0xF] = +(this.v[x] & 0x80);
// multiply by 2
this.v[x] = this.v[x] << 1;
break;
default: throw new Error(`Unknown opcode ${instruction.getInstructionCode().toString(16)}`);
}
}
/**
* Skip next instruction if Vx != Vy
* @param {Instruction} instruction
*/
operationCode9(instruction) {
if (this.v[instruction.getX()] !== this.v[instruction.getY()]) {
this.programCounter += 2;
}
}
/**
* Set memory address register to NNN
* @param {Instruction} instruction
*/
operationCodeA(instruction) {
this.i = instruction.getAddr();
}
/**
* Set programCounter to NNN + V0x0
* @param {Instruction} instruction
*/
operationCodeB(instruction) {
this.programCounter = instruction.getAddr() + this.v[0x0];
}
/**
* Set Vx = random byte bitwise AND KK
* @param {Instruction} instruction
*/
operationCodeC(instruction) {
// generate random number between 0 and 255
const val = Math.floor(Math.random() * 0xFF);
// bitwise AND it with KK of the instruction and assign to vX
this.v[instruction.getX()] = val & instruction.getKK();
}
/**
* Draws n-byte sprite starting at memory[i] at (Vx, Vy)
* @param {Instruction} instruction
*/
operationCodeD(instruction) {
let sprite;
const width = 8;
const height = instruction.getSubCategory();
const xPortion = instruction.getX();
const yPortion = instruction.getY();
this.v[0xF] = 0;
// read N bytes from memory
// to know where to draw vertically along the Y axis
for (let y = 0; y < height; y += 1) {
sprite = this.memory[this.i + y];
// draw certain pixels horizontally along the X axis
for (let x = 0; x < width; x += 1) {
// if sprite is to be drawn
if ((sprite & 0x80) > 0) {
// if no pixel was erased
if (this.screen.setPixels(this.v[xPortion] + x, this.v[yPortion] + y)) {
this.v[0xF] = 0;
} else {
this.v[0xF] = 1;
}
}
sprite <<= 1;
}
this.screen.vfFrame = this.v[0xF];
this.screen.render();
}
}
/**
* Handle all instructions related to key pressing
* @param {Instruction} instruction
*/
operationCodeE(instruction) {
switch (instruction.getKK()) {
// skip next instruction if key with value Vx is pressed
case 0x9E:
if (this.keyboard.isKeyPressed(this.v[instruction.getX()])) {
this.programCounter += 2;
}
break;
// skip next instruction if key with the value Vx is not pressed
case 0xA1:
if (!this.keyboard.isKeyPressed(this.v[instruction.getX()])) {
this.programCounter += 2;
}
break;
default: throw new Error(`Unknown opcode ${instruction.getInstructionCode().toString(16)}`);
}
}
operationCodeF(instruction) {
switch (instruction.getKK()) {
case 0x07: this.operationCodeF07(instruction); break;
case 0x0A: this.operationCodeF0A(instruction); break;
case 0x15: this.operationCodeF15(instruction); break;
case 0x18: this.operationCodeF18(instruction); break;
case 0x1E: this.operationCodeF1E(instruction); break;
case 0x29: this.operationCodeF29(instruction); break;
case 0x33: this.operationCodeF33(instruction); break;
case 0x55: this.operationCodeF55(instruction); break;
case 0x65: this.operationCodeF65(instruction); break;
default: throw new Error(`Unknown opcode ${instruction.getInstructionCode().toString(16)}`);
}
}
/**
* Set Vx = delay timer
* @param {Instruction} instruction
*/
operationCodeF07(instruction) {
this.v[instruction.getX()] = this.delayTimer;
}
/**
* Wait for key press. store value of key in Vx
* @param {Instruction} instruction
*/
operationCodeF0A(instruction) {
this.pause = true;
this.keyboard.onNextKeyPress = function onNextKeyPress(key) {
this.v[instruction.getX()] = key;
this.pause = false;
}.bind(this);
}
/**
* Set delay timer = Vx
* @param {Instruction} instruction
*/
operationCodeF15(instruction) {
this.delayTimer = this.v[instruction.getX()];
}
/**
* Set sound timer = Vx
* @param {Instruction} instruction
*/
operationCodeF18(instruction) {
this.soundTimer = this.v[instruction.getX()];
}
/**
* Set i register = i + Vx
* @param {Instruction} instruction
*/
operationCodeF1E(instruction) {
this.i += this.v[instruction.getX()];
}
/**
* Set register i = location of sprite for digit Vx
* @param {Instruction} instruction
*/
operationCodeF29(instruction) {
this.i = this.v[instruction.getX()] * 5;
}
/**
* Store hundreds, tens and ones of Vx in memory 1, 2 , 3
* @param {Instruction} instruction
*/
operationCodeF33(instruction) {
let number = this.v[instruction.getX()];
for (let i = 3; i > 0; i -= 1) {
// parse and assign the first (from right) mnumber
this.memory[this.i + i - 1] = parseInt(number % 10, 10);
// divide by 10 to shave off a decimal
number /= 10;
}
}
/**
* Set registers V0 to Vx in memory starting at location I (register)
* @param {Instruction} instruction
*/
operationCodeF55(instruction) {
for (let i = 0; i <= instruction.getX(); i += 1) {
this.memory[this.i + i] = this.v[i];
}
}
/**
* Read registers V0 to Vx from memory starting at location I (register)
* @param {Instruction} instruction
*/
operationCodeF65(instruction) {
for (let i = 0; i <= instruction.getX(); i += 1) {
this.v[i] = this.memory[this.i + i];
}
}
/**
* Updates delay and sound timers after every cycle
*/
updateTimers() {
if (this.delayTimer > 0) {
this.delayTimer -= 1;
}
if (this.soundTimer > 0) {
if (this.soundTimer === 1) {
if (!this.soundOff) {
this.sound.start();
}
}
this.soundTimer -= 1;
}
}
/**
* loads the hexadecimal values of the FONTS from the technical reference into memory
* starting at location 0
*/
loadFontsIntoState() {
const fonts = [
// 0
0xF0, 0x90, 0x90, 0x90, 0xF0,
// 1
0x20, 0x60, 0x20, 0x20, 0x70,
// 2
0xF0, 0x10, 0xF0, 0x80, 0xF0,
// 3
0xF0, 0x10, 0xF0, 0x10, 0xF0,
// 4
0x90, 0x90, 0xF0, 0x10, 0x10,
// 5
0xF0, 0x80, 0xF0, 0x10, 0xF0,
// 6
0xF0, 0x80, 0xF0, 0x90, 0xF0,
// 7
0xF0, 0x10, 0x20, 0x40, 0x40,
// 8
0xF0, 0x90, 0xF0, 0x90, 0xF0,
// 9
0xF0, 0x90, 0xF0, 0x10, 0xF0,
// A
0xF0, 0x90, 0xF0, 0x90, 0x90,
// B
0xE0, 0x90, 0xE0, 0x90, 0xE0,
// C
0xF0, 0x80, 0x80, 0x80, 0xF0,
// D
0xE0, 0x90, 0x90, 0x90, 0xE0,
// E
0xF0, 0x80, 0xF0, 0x80, 0xF0,
// F
0xF0, 0x80, 0xF0, 0x80, 0x80,
];
for (let i = 0; i < fonts.length; i += 1) {
this.memory[i] = fonts[i];
}
}
} | // put the program counter on the top of the stack
this.stack[this.stackPointer] = this.programCounter;
this.programCounter = instruction.getAddr();
} |
test_delete.py | #!python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import util
def test_delete(): | util.delete('d1', force=True)
return 'delete OK'
def main():
s = test_delete()
util.send_response('text', s)
main() | util.copy_file('a.txt', 'a.txt.bak')
util.copy_dir('d1', 'd1_bak')
util.delete('a.txt') |
extensionService.test.ts | import { projectPathValidation } from "./extensionService";
import { IVSCodeObject } from "../../types/vscode";
import {
EXTENSION_COMMANDS
} from "../constants";
xdescribe("wizardSelectionSelector", () => {
let mockVsCode: IVSCodeObject;
let callbackExtension: Function;
describe("validate project name", () => {
let mockCallbackProjectPathValidation = {};
describe("not exist => valid", () => { | window.addEventListener = jest.fn((event, cb) => {
callbackExtension = cb;
});
mockCallbackProjectPathValidation = {
data:{
command:EXTENSION_COMMANDS.PROJECT_PATH_VALIDATION,
payload:{
scope:222,
projectPathValidation:{
isValid:true
}
}
}
};
const postMessage = jest.fn(() =>callbackExtension(mockCallbackProjectPathValidation));
mockVsCode = { postMessage };
})
it("is valid, dont exist",(resolve)=>{
projectPathValidation("dfss","sdfsdf", mockVsCode).then((event: any)=>{
expect(event.data.payload.projectPathValidation.isValid).toBeTruthy();
resolve();
})
})
});
});
}); | beforeEach(()=>{ |
demo.py | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_coco_json
path_to_coco_json = "/home/matthias/Data/Ubuntu/data/datasets/porta_filter/front3d/coco_data/coco_annotations.json"
path_to_images = "/home/matthias/Data/Ubuntu/data/datasets/porta_filter/front3d"
# path_to_config_yaml = "/home/matthias/Data/Ubuntu/git/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
DatasetCatalog.register("porta_filter", lambda: load_coco_json(path_to_coco_json, path_to_images))
MetadataCatalog.get("porta_filter").set(thing_classes=["porta filter"], json_file=path_to_coco_json, image_root=path_to_images)
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
|
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
assert args.output is None, "output not yet supported with --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cam.release()
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
codec, file_ext = (
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
)
if codec == ".mp4v":
warnings.warn("x264 codec not available, switching to mp4v")
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + file_ext
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
| cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg |
genSplitPDBlists.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 16:23:55 2020
@author: dmattox
"""
import os
import dill
import lec_gly as LecGly
from bSiteResiFeatures import plipFile
os.chdir(LecGly.homeDir)
##########################
outDir = './data/structures/bsites/batchLists/'
if not os.path.exists(outDir):
os.makedirs(outDir) |
##########################
with open(plipFile, "rb") as pickleFH:
allPLIP = dill.load(pickleFH)
def chunks(lst, n): # Function borrowed from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from a list"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
splitLst = chunks(list(allPLIP.keys()), maxBatchLstLength)
for i,lst in enumerate(splitLst):
with open(outDir + 'pdbList_' + str(i) + '.txt', 'w') as outFH:
for pdb in lst:
outFH.write(pdb + '\n') | ##########################
maxBatchLstLength = 50 # generates 28 lists from 1365 PDB IDs, 27 lists of 50 and 1 of 15 |
App.tsx | import React from "react";
import PaymentTable from "./Payments/PaymentTable";
import PaymentTotals from "./Payments/PaymentTotals";
import PaymentChart from "./Payments/PaymentChart";
import Title from "./Title";
import "./App.css";
const App: React.FC = () => {
return (
<div className="flex flex-col w-screen p-8 lg:p-16 App">
<header> | <h1 className="text-5xl text-blue-800">Recur</h1>
</header>
<div className="flex flex-col lg:flex-row mb-4">
<div className="lg:w-1/2 lg:mr-4">
<PaymentTotals currency="EUR" />
<PaymentTable currency="EUR" />
</div>
<div className="lg:w-1/2 lg:ml-4 md:mt-0 mt-12">
<Title title="Overzicht" />
<PaymentChart />
</div>
</div>
</div>
);
};
export default App; | |
isCounterResetCustomIdentValue.js | /* @flow */
'use strict';
const _ = require('lodash');
const keywordSets = require('../reference/keywordSets');
/** |
module.exports = function(value /*: string*/) /*: boolean*/ {
const valueLowerCase = value.toLowerCase();
if (
keywordSets.counterResetKeywords.has(valueLowerCase) ||
_.isFinite(parseInt(valueLowerCase))
) {
return false;
}
return true;
}; | * Check value is a custom ident
*/ |
GameContainer.js | import React, { Component } from 'react';
import { connect } from 'react-redux';
import { fetchGame } from '../actions/fetchGame';
import { answeredCorrect } from '../actions/answeredCorrect';
import { answeredIncorrect } from '../actions/answeredIncorrect';
import Game from '../components/Game';
import Summary from '../components/Summary';
import Button from 'react-bootstrap/Button';
import { BACKEND_URL } from '../api/backendUrl';
class GameContainer extends Component {
state = {
gameOver: false
}
componentDidMount() {
this.props.fetchGame(this.props.location.state.configObject);
}
renderGameState = () => {
if (this.state.gameOver) {
return <Summary game={this.props.game} />
} else {
return (
<div className="game">
<Game
game={this.props.game}
answeredCorrect={this.props.answeredCorrect}
answeredIncorrect={this.props.answeredIncorrect}
/>
<Button
id="finish-game"
disabled={this.buttonState()}
onClick={this.finishGame}
>
Finish Game
</Button>
<br></br>
<br></br>
</div>
)
}
}
buttonState = () => this.props.game.answered === this.props.game.questions.length ? false : true
finishGame = () => {
const token = localStorage.getItem("jwt");
let configObject = {
method: "PATCH",
headers: {
"Content-Type": "application/json",
Accept: "application/json",
Authorization: `Bearer ${token}`
},
body: JSON.stringify({
"game": {
"id": this.props.game.id,
"score": this.props.game.correct
}
})
}
fetch(`${BACKEND_URL}/${this.props.game.id}`, configObject).then(resp => resp.json()).then(resp => console.log(resp["status"]));
this.setState({
gameOver: true
});
}
render() {
return(
<div className="user-component">
{this.props.game.loading ? <h1>Loading Game...</h1>
: this.renderGameState()}
</div>
)
}
}
function mapStateToProps(state) {
return { game: state.game }
}
function | (dispatch) {
return {
fetchGame: (configObject) => {
dispatch(fetchGame(configObject))
},
answeredCorrect: () => {
dispatch(answeredCorrect())
},
answeredIncorrect: () => {
dispatch(answeredIncorrect())
}
};
}
export default connect(mapStateToProps, mapDispatchToProps)(GameContainer); | mapDispatchToProps |
events.controller.d.ts | import { EventsService } from "./events.service";
import { CreateEventsDto } from "./dto/create-events.dto";
import { Events } from "./events.model"; | private eventService;
constructor(eventService: EventsService);
create(dto: CreateEventsDto): Promise<Events>;
} | export declare class EventsController { |
PushedScreen.js | import React from 'react';
import { Platform, StyleSheet, Text, View } from 'react-native';
const styles = StyleSheet.create({
container: {
flex: 1,
justifyContent: 'center',
alignItems: 'center',
backgroundColor: '#F5FCFF',
},
welcome: {
fontSize: 20,
textAlign: 'center',
margin: 10,
},
instructions: {
textAlign: 'center',
color: '#333333',
marginBottom: 5,
},
});
function | () {
return (
<View style={styles.container}>
<Text style={styles.welcome}>Welcome to React Native!</Text>
<Text style={styles.instructions}>To get started, edit PushedScreen.js</Text>
<Text style={styles.instructions}>
{Platform.select({
ios: 'Press Cmd+R to reload,\nCmd+D or shake for dev menu',
android:
'Double tap R on your keyboard to reload,\n' +
'Shake or press menu button for dev menu',
})}
</Text>
</View>
);
}
export default PushedScreen;
| PushedScreen |
test_default.py | """Module containing the tests for the default scenario."""
# Standard Python Libraries
import os
# Third-Party Libraries
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
def | (host):
"""Test that the appropriate packages were installed."""
distribution = host.system_info.distribution
codename = host.system_info.codename
# Docker package
if (
distribution == "debian" and (codename == "bullseye" or codename is None)
) or distribution == "kali":
# Debian Bullseye is not yet supported by the official Docker
# package repo
#
# https://docs.docker.com/engine/install/debian/
assert host.package("docker.io").is_installed
elif distribution == "fedora":
# Only Moby is available for Feodra 32 and 33
#
# https://docs.docker.com/engine/install/fedora/
assert host.package("moby-engine").is_installed
else:
assert host.package("docker-ce").is_installed
# docker-compose package
assert host.package("docker-compose").is_installed
# Docker python library
if distribution == "debian" and codename == "stretch":
# Our Stretch AMIs are still using Python 2
assert host.package("python-docker").is_installed
else:
assert host.package("python3-docker").is_installed
@pytest.mark.parametrize("svc", ["docker"])
def test_services(host, svc):
"""Test that the services were enabled."""
assert host.service(svc).is_enabled
@pytest.mark.parametrize("command", ["docker-compose version"])
def test_command(host, command):
"""Test that appropriate commands are available."""
assert host.run(command).rc == 0
| test_packages |
constructor-checks.js | "use strict";
/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
Object.defineProperty(exports, "__esModule", { value: true });
const schematics_1 = require("@angular/cdk/schematics"); | * signature types don't need to be stored here because the signature will be determined
* automatically through type checking.
*/
exports.constructorChecks = {
[schematics_1.TargetVersion.V8]: [
{
pr: 'https://github.com/angular/components/pull/15647',
changes: ['MatFormField', 'MatTabLink', 'MatVerticalStepper']
},
{
pr: 'https://github.com/angular/components/pull/15757',
changes: ['MatBadge']
},
{
pr: 'https://github.com/angular/components/issues/15734',
changes: ['MatButton', 'MatAnchor']
},
{
pr: 'https://github.com/angular/components/pull/15761',
changes: ['MatSpinner', 'MatProgressSpinner']
},
{
pr: 'https://github.com/angular/components/pull/15723',
changes: ['MatList', 'MatListItem']
},
{
pr: 'https://github.com/angular/components/pull/15722',
changes: ['MatExpansionPanel']
},
{
pr: 'https://github.com/angular/components/pull/15737',
changes: ['MatTabHeader', 'MatTabBody']
},
{
pr: 'https://github.com/angular/components/pull/15806',
changes: ['MatSlideToggle']
},
{
pr: 'https://github.com/angular/components/pull/15773',
changes: ['MatDrawerContainer']
}
],
[schematics_1.TargetVersion.V7]: [
{
pr: 'https://github.com/angular/components/pull/11706',
changes: ['MatDrawerContent'],
},
{
pr: 'https://github.com/angular/components/pull/11706',
changes: ['MatSidenavContent']
}
],
[schematics_1.TargetVersion.V6]: [
{
pr: 'https://github.com/angular/components/pull/9190',
changes: ['NativeDateAdapter'],
},
{
pr: 'https://github.com/angular/components/pull/10319',
changes: ['MatAutocomplete'],
},
{
pr: 'https://github.com/angular/components/pull/10344',
changes: ['MatTooltip'],
},
{
pr: 'https://github.com/angular/components/pull/10389',
changes: ['MatIconRegistry'],
},
{
pr: 'https://github.com/angular/components/pull/9775',
changes: ['MatCalendar'],
},
]
};
//# sourceMappingURL=constructor-checks.js.map | /**
* List of class names for which the constructor signature has been changed. The new constructor |
HardWare.ts | export class | {
id?: number;
name?: string;
regexPattern?: string;
status?: string;
createdBy?: string;
updatedBy?: string;
createdAt?: number;
updatedAt?: number;
parentId?: number;
children?: Array<HardWareRule>;
} | HardWareRule |
rust.rs | use std::io;
use rand::Rng;
fn | () {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
println!("The secret number is: {}", secret_number);
println!("Please input your guess.");
let mut guess = String::new();
io::stdin().read_line(&mut guess)
.expect("Failed to read line");
println!("You guessed: {}", guess);
}
| main |
adapter.ts | import { JupyterFrontEnd } from '@jupyterlab/application';
import { Dialog, showDialog } from '@jupyterlab/apputils';
import { CodeEditor } from '@jupyterlab/codeeditor';
import { DocumentRegistry, IDocumentWidget } from '@jupyterlab/docregistry';
import { ILogPayload } from '@jupyterlab/logconsole';
import { nullTranslator, TranslationBundle } from '@jupyterlab/translation';
import { JSONObject } from '@lumino/coreutils';
import { Signal } from '@lumino/signaling';
import { ICommandContext } from '../command_manager';
import { LSPConnection } from '../connection';
import {
DocumentConnectionManager,
IDocumentConnectionData,
ISocketConnectionOptions
} from '../connection_manager';
import { EditorAdapter } from '../editor_integration/editor_adapter';
import { IFeature, IFeatureEditorIntegration } from '../feature';
import { ILSPExtension, ILSPLogConsole } from '../index';
import { LanguageIdentifier } from '../lsp';
import { IRootPosition, IVirtualPosition } from '../positioning';
import { IForeignContext, VirtualDocument } from '../virtual/document';
import { IVirtualEditor } from '../virtual/editor';
import IEditor = CodeEditor.IEditor;
import IButton = Dialog.IButton;
import createButton = Dialog.createButton;
export class | {
/**
* The text message to be shown on the statusbar
*/
message: string;
changed: Signal<StatusMessage, void>;
private timer: number;
constructor() {
this.message = '';
this.changed = new Signal(this);
this.timer = null;
}
/**
* Set the text message and (optionally) the timeout to remove it.
* @param message
* @param timeout - number of ms to until the message is cleaned;
* -1 if the message should stay up indefinitely;
* defaults to 3000ms (3 seconds)
*/
set(message: string, timeout: number = 1000 * 3) {
this.expire_timer();
this.message = message;
this.changed.emit();
if (timeout !== -1) {
this.timer = window.setTimeout(this.clear.bind(this), timeout);
}
}
clear() {
this.message = '';
this.changed.emit();
}
private expire_timer() {
if (this.timer !== null) {
window.clearTimeout(this.timer);
this.timer = 0;
}
}
}
/**
* The values should follow the https://microsoft.github.io/language-server-protocol/specification guidelines
*/
const mime_type_language_map: JSONObject = {
'text/x-rsrc': 'r',
'text/x-r-source': 'r',
// currently there are no LSP servers for IPython we are aware of
'text/x-ipython': 'python'
};
export interface IEditorChangedData {
editor: CodeEditor.IEditor;
}
/**
* Foreign code: low level adapter is not aware of the presence of foreign languages;
* it operates on the virtual document and must not attempt to infer the language dependencies
* as this would make the logic of inspections caching impossible to maintain, thus the WidgetAdapter
* has to handle that, keeping multiple connections and multiple virtual documents.
*/
export abstract class WidgetAdapter<T extends IDocumentWidget> {
protected adapters: Map<
VirtualDocument.id_path,
EditorAdapter<IVirtualEditor<IEditor>>
>;
public adapterConnected: Signal<WidgetAdapter<T>, IDocumentConnectionData>;
public isConnected: boolean;
public connection_manager: DocumentConnectionManager;
public status_message: StatusMessage;
public trans: TranslationBundle;
protected isDisposed = false;
console: ILSPLogConsole;
protected app: JupyterFrontEnd;
public activeEditorChanged: Signal<WidgetAdapter<T>, IEditorChangedData>;
public editorAdded: Signal<WidgetAdapter<T>, IEditorChangedData>;
public editorRemoved: Signal<WidgetAdapter<T>, IEditorChangedData>;
public update_finished: Promise<void>;
/**
* (re)create virtual document using current path and language
*/
abstract create_virtual_document(): VirtualDocument;
abstract get_editor_index_at(position: IVirtualPosition): number;
abstract get_editor_index(ce_editor: CodeEditor.IEditor): number;
abstract get_editor_wrapper(ce_editor: CodeEditor.IEditor): HTMLElement;
// note: it could be using namespace/IOptions pattern,
// but I do not know how to make it work with the generic type T
// (other than using 'any' in the IOptions interface)
protected constructor(protected extension: ILSPExtension, public widget: T) {
this.app = extension.app;
this.connection_manager = extension.connection_manager;
this.adapterConnected = new Signal(this);
this.activeEditorChanged = new Signal(this);
this.editorRemoved = new Signal(this);
this.editorAdded = new Signal(this);
this.adapters = new Map();
this.status_message = new StatusMessage();
this.isConnected = false;
this.console = extension.console.scope('WidgetAdapter');
this.trans = (extension.translator || nullTranslator).load(
'jupyterlab-lsp'
);
// set up signal connections
this.widget.context.saveState.connect(this.on_save_state, this);
this.connection_manager.closed.connect(this.on_connection_closed, this);
this.widget.disposed.connect(this.dispose, this);
}
protected get foreign_code_extractors() {
return this.extension.foreign_code_extractors;
}
protected get code_overrides() {
return this.extension.code_overrides;
}
on_connection_closed(
manager: DocumentConnectionManager,
{ virtual_document }: IDocumentConnectionData
) {
this.console.log(
'connection closed, disconnecting adapter',
virtual_document.id_path
);
if (virtual_document !== this.virtual_editor?.virtual_document) {
return;
}
this.dispose();
}
dispose() {
if (this.isDisposed) {
return;
}
if (this.virtual_editor?.virtual_document) {
this.disconnect_adapter(this.virtual_editor?.virtual_document);
}
this.widget.context.saveState.disconnect(this.on_save_state, this);
this.connection_manager.closed.disconnect(this.on_connection_closed, this);
this.widget.disposed.disconnect(this.dispose, this);
this.disconnect();
// just to be sure
this.virtual_editor = null;
this.app = null;
this.widget = null;
this.connection_manager = null;
this.widget = null;
this.isDisposed = true;
}
virtual_editor: IVirtualEditor<IEditor>;
abstract get document_path(): string;
abstract get mime_type(): string;
get widget_id(): string {
return this.widget.id;
}
get language(): LanguageIdentifier {
// the values should follow https://microsoft.github.io/language-server-protocol/specification guidelines,
// see the table in https://microsoft.github.io/language-server-protocol/specification#textDocumentItem
if (mime_type_language_map.hasOwnProperty(this.mime_type)) {
return mime_type_language_map[this.mime_type] as string;
} else {
let without_parameters = this.mime_type.split(';')[0];
let [type, subtype] = without_parameters.split('/');
if (type === 'application' || type === 'text') {
if (subtype.startsWith('x-')) {
return subtype.substr(2);
} else {
return subtype;
}
} else {
return this.mime_type;
}
}
}
abstract get language_file_extension(): string;
disconnect() {
this.connection_manager.unregister_document(
this.virtual_editor.virtual_document
);
this.widget.context.model.contentChanged.disconnect(
this.onContentChanged,
this
);
// pretend that all editors were removed to trigger the disconnection of even handlers
// they will be connected again on new connection
for (let editor of this.editors) {
this.editorRemoved.emit({
editor: editor
});
}
for (let adapter of this.adapters.values()) {
adapter.dispose();
}
this.adapters.clear();
this.virtual_editor.dispose();
}
// equivalent to triggering didClose and didOpen, as per syncing specification,
// but also reloads the connection; used during file rename (or when it was moved)
protected reload_connection() {
// ignore premature calls (before the editor was initialized)
if (this.virtual_editor == null) {
return;
}
// disconnect all existing connections (and dispose adapters)
this.disconnect();
// recreate virtual document using current path and language
// as virtual editor assumes it gets the virtual document at init,
// just dispose virtual editor (which disposes virtual document too)
// and re-initialize both virtual editor and document
this.init_virtual();
// reconnect
this.connect_document(this.virtual_editor.virtual_document, true).catch(
this.console.warn
);
}
protected on_save_state(context: any, state: DocumentRegistry.SaveState) {
// ignore premature calls (before the editor was initialized)
if (this.virtual_editor == null) {
return;
}
if (state === 'completed') {
// note: must only be send to the appropriate connections as
// some servers (Julia) break if they receive save notification
// for a document that was not opened before, see:
// https://github.com/krassowski/jupyterlab-lsp/issues/490
const documents_to_save = [this.virtual_editor.virtual_document];
for (let virtual_document of documents_to_save) {
let connection = this.connection_manager.connections.get(
virtual_document.uri
);
this.console.log(
'Sending save notification for',
virtual_document.uri,
'to',
connection
);
connection.sendSaved(virtual_document.document_info);
for (let foreign of virtual_document.foreign_documents.values()) {
documents_to_save.push(foreign);
}
}
}
}
abstract activeEditor: CodeEditor.IEditor;
abstract get editors(): CodeEditor.IEditor[];
/**
* public for use in tests (but otherwise could be private)
*/
public update_documents() {
if (this.isDisposed) {
this.console.warn('Cannot update documents: adapter disposed');
return;
}
return this.virtual_editor.virtual_document.update_manager.update_documents(
this.editors.map(ce_editor => {
return {
ce_editor: ce_editor,
value: this.virtual_editor.get_editor_value(ce_editor)
};
})
);
}
get has_multiple_editors(): boolean {
return this.editors.length > 1;
}
protected async on_connected(data: IDocumentConnectionData) {
let { virtual_document } = data;
this.connect_adapter(data.virtual_document, data.connection);
this.adapterConnected.emit(data);
this.isConnected = true;
await this.update_documents().then(() => {
// refresh the document on the LSP server
this.document_changed(virtual_document, virtual_document, true);
this.console.log(
'virtual document(s) for',
this.document_path,
'have been initialized'
);
});
// Note: the logger extension behaves badly with non-default names
// as it changes the source to the active file afterwards anyways
const loggerSourceName = virtual_document.uri;
const logger = this.extension.user_console.getLogger(loggerSourceName);
data.connection.serverNotifications['$/logTrace'].connect(
(connection, message) => {
this.console.log(
data.connection.serverIdentifier,
'trace',
virtual_document.uri,
message
);
}
);
data.connection.serverNotifications['window/logMessage'].connect(
(connection, message) => {
this.console.log(
data.connection.serverIdentifier,
virtual_document.uri,
message
);
logger.log({
type: 'text',
data: connection.serverIdentifier + ': ' + message.message
} as ILogPayload);
}
);
data.connection.serverNotifications['window/showMessage'].connect(
(connection, message) => {
this.console.log(
data.connection.serverIdentifier,
virtual_document.uri,
message.message
);
void showDialog({
title: this.trans.__('Message from ') + connection.serverIdentifier,
body: message.message
});
}
);
data.connection.serverRequests['window/showMessageRequest'].setHandler(
async params => {
this.console.log(
data.connection.serverIdentifier,
virtual_document.uri,
params
);
const actionItems = params.actions;
const buttons = actionItems.map(action => {
return createButton({
label: action.title
});
});
const result = await showDialog<IButton>({
title:
this.trans.__('Message from ') + data.connection.serverIdentifier,
body: params.message,
buttons: buttons
});
const choice = buttons.indexOf(result.button);
if (choice === -1) {
return;
}
return actionItems[choice];
}
);
}
/**
* Opens a connection for the document. The connection may or may
* not be initialized, yet, and depending on when this is called, the client
* may not be fully connected.
*
* @param virtual_document a VirtualDocument
* @param send_open whether to open the document immediately
*/
protected async connect_document(
virtual_document: VirtualDocument,
send_open = false
): Promise<void> {
virtual_document.changed.connect(this.document_changed, this);
virtual_document.foreign_document_opened.connect(
this.on_foreign_document_opened,
this
);
const connection_context = await this.connect(virtual_document).catch(
this.console.warn
);
if (!send_open) {
return;
}
if (connection_context && connection_context.connection) {
connection_context.connection.sendOpenWhenReady(
virtual_document.document_info
);
} else {
this.console.warn(
`Connection for ${virtual_document.path} was not opened`
);
}
}
private create_virtual_editor(
options: IVirtualEditor.IOptions
): IVirtualEditor<IEditor> {
let editorType = this.extension.editor_type_manager.findBestImplementation(
this.editors
);
if (editorType == null) {
return null;
}
let virtualEditorConstructor = editorType.implementation;
return new virtualEditorConstructor(options);
}
protected init_virtual() {
let virtual_editor = this.create_virtual_editor({
adapter: this,
virtual_document: this.create_virtual_document()
});
if (virtual_editor == null) {
this.console.error(
'Could not initialize a VirtualEditor for adapter: ',
this
);
return;
}
this.virtual_editor = virtual_editor;
this.connect_contentChanged_signal();
}
/**
* Handler for opening a document contained in a parent document. The assumption
* is that the editor already exists for this, and as such the document
* should be queued for immediate opening.
*
* @param host the VirtualDocument that contains the VirtualDocument in another language
* @param context information about the foreign VirtualDocument
*/
protected async on_foreign_document_opened(
host: VirtualDocument,
context: IForeignContext
) {
const { foreign_document } = context;
await this.connect_document(foreign_document, true);
foreign_document.foreign_document_closed.connect(
this.on_foreign_document_closed,
this
);
}
private on_foreign_document_closed(
host: VirtualDocument,
context: IForeignContext
) {
const { foreign_document } = context;
foreign_document.foreign_document_closed.disconnect(
this.on_foreign_document_closed,
this
);
foreign_document.foreign_document_opened.disconnect(
this.on_foreign_document_opened,
this
);
foreign_document.changed.disconnect(this.document_changed, this);
}
document_changed(
virtual_document: VirtualDocument,
document: VirtualDocument,
is_init = false
) {
if (this.isDisposed) {
this.console.warn('Cannot swap document: adapter disposed');
return;
}
// TODO only send the difference, using connection.sendSelectiveChange()
let connection = this.connection_manager.connections.get(
virtual_document.uri
);
let adapter = this.adapters.get(virtual_document.id_path);
if (!connection?.isReady) {
this.console.log('Skipping document update signal: connection not ready');
return;
}
if (adapter == null) {
this.console.log('Skipping document update signal: adapter not ready');
return;
}
// this.virtual_editor.console.log(
// 'LSP: virtual document',
// virtual_document.id_path,
// 'has changed sending update'
// );
connection.sendFullTextChange(
virtual_document.value,
virtual_document.document_info
);
// the first change (initial) is not propagated to features,
// as it has no associated CodeMirrorChange object
if (!is_init) {
// guarantee that the virtual editor won't perform an update of the virtual documents while
// the changes are recorded...
// TODO this is not ideal - why it solves the problem of some errors,
// it introduces an unnecessary delay. A better way could be to invalidate some of the updates when a new one comes in.
// but maybe not every one (then the outdated state could be kept for too long fo a user who writes very quickly)
// also we would not want to invalidate the updates for the purpose of autocompletion (the trigger characters)
this.virtual_editor.virtual_document.update_manager
.with_update_lock(async () => {
await adapter.updateAfterChange();
})
.then()
.catch(this.console.warn);
}
}
connect_adapter(
virtual_document: VirtualDocument,
connection: LSPConnection,
features: IFeature[] = null
): EditorAdapter<any> {
let adapter = this.create_adapter(virtual_document, connection, features);
this.adapters.set(virtual_document.id_path, adapter);
return adapter;
}
private disconnect_adapter(virtual_document: VirtualDocument) {
let adapter = this.adapters.get(virtual_document.id_path);
this.adapters.delete(virtual_document.id_path);
if (adapter != null) {
adapter.dispose();
}
}
public get_features(virtual_document: VirtualDocument) {
let adapter = this.adapters.get(virtual_document.id_path);
return adapter?.features;
}
private async connect(virtual_document: VirtualDocument) {
let language = virtual_document.language;
this.console.log(`will connect using language: ${language}`);
let options: ISocketConnectionOptions = {
virtual_document,
language,
document_path: this.document_path
};
let connection = await this.connection_manager.connect(options);
await this.on_connected({ virtual_document, connection });
return {
connection,
virtual_document
};
}
/**
* Connect the change signal in order to update all virtual documents after a change.
*
* Update to the state of a notebook may be done without a notice on the CodeMirror level,
* e.g. when a cell is deleted. Therefore a JupyterLab-specific signals are watched instead.
*
* While by not using the change event of CodeMirror editors we loose an easy way to send selective,
* (range) updates this can be still implemented by comparison of before/after states of the
* virtual documents, which is even more resilient and -obviously - editor-independent.
*/
private connect_contentChanged_signal() {
this.widget.context.model.contentChanged.connect(
this.onContentChanged,
this
);
}
private create_adapter(
virtual_document: VirtualDocument,
connection: LSPConnection,
features: IFeature[] = null
): EditorAdapter<IVirtualEditor<IEditor>> {
let adapter_features = new Array<
IFeatureEditorIntegration<IVirtualEditor<IEditor>>
>();
if (features === null) {
features = this.extension.feature_manager.features;
}
for (let feature of features) {
let featureEditorIntegrationConstructor = feature.editorIntegrationFactory.get(
this.virtual_editor.editor_name
);
let integration = new featureEditorIntegrationConstructor({
feature: feature,
virtual_editor: this.virtual_editor,
virtual_document: virtual_document,
connection: connection,
status_message: this.status_message,
settings: feature.settings,
adapter: this,
trans: this.trans
});
adapter_features.push(integration);
}
let adapter = new EditorAdapter(
this.virtual_editor,
virtual_document,
adapter_features,
this.console
);
this.console.log('Adapter for', this.document_path, 'is ready.');
// the client is now fully ready: signal to the server that the document is "open"
connection.sendOpenWhenReady(virtual_document.document_info);
return adapter;
}
private async onContentChanged(_slot: any) {
// update the virtual documents (sending the updates to LSP is out of scope here)
this.update_finished = this.update_documents().catch(this.console.warn);
await this.update_finished;
}
get_position_from_context_menu(): IRootPosition {
// Note: could also try using this.app.contextMenu.menu.contentNode position.
// Note: could add a guard on this.app.contextMenu.menu.isAttached
// get the first node as it gives the most accurate approximation
let leaf_node = this.app.contextMenuHitTest(() => true);
let { left, top } = leaf_node.getBoundingClientRect();
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
let event = this.app._contextMenuEvent;
// if possible, use more accurate position from the actual event
// (but this relies on an undocumented and unstable feature)
if (event !== undefined) {
left = event.clientX;
top = event.clientY;
event.stopPropagation();
}
return this.virtual_editor.window_coords_to_root_position({
left: left,
top: top
});
}
abstract context_from_active_document(): ICommandContext | null;
get_context(root_position: IRootPosition): ICommandContext {
let document = this.virtual_editor.document_at_root_position(root_position);
let virtual_position = this.virtual_editor.root_position_to_virtual_position(
root_position
);
return {
document,
connection: this.connection_manager.connections.get(document.uri),
virtual_position,
root_position,
features: this.get_features(document),
editor: this.virtual_editor,
app: this.app,
adapter: this
};
}
get_context_from_context_menu(): ICommandContext {
let root_position = this.get_position_from_context_menu();
return this.get_context(root_position);
}
abstract get wrapper_element(): HTMLElement;
}
| StatusMessage |
world.go | // Code generated by eevee. DO NOT EDIT!
package model
import (
"context"
"encoding/json"
"rapidashplugin/dao"
"rapidashplugin/entity"
"sort"
"strconv"
"golang.org/x/xerrors"
)
type WorldFinder interface {
FindAll(context.Context) (*Worlds, error)
FindByID(context.Context, uint64) (*World, error)
FindByIDs(context.Context, []uint64) (*Worlds, error)
}
type World struct {
*entity.World
worldDAO dao.World
isAlreadyCreated bool
savedValue entity.World
conv ModelConverter
}
type Worlds struct {
values []*World
}
type WorldsCollection []*Worlds
func | (value *entity.World, worldDAO dao.World) *World {
return &World{
World: value,
worldDAO: worldDAO,
}
}
func NewWorlds(entities entity.Worlds) *Worlds {
return &Worlds{values: make([]*World, 0, len(entities))}
}
func (m *Worlds) newWorlds(values []*World) *Worlds {
return &Worlds{values: values}
}
func (m *Worlds) Each(iter func(*World)) {
if m == nil {
return
}
for _, value := range m.values {
iter(value)
}
}
func (m *Worlds) EachIndex(iter func(int, *World)) {
if m == nil {
return
}
for idx, value := range m.values {
iter(idx, value)
}
}
func (m *Worlds) EachWithError(iter func(*World) error) error {
if m == nil {
return nil
}
for _, value := range m.values {
if err := iter(value); err != nil {
return xerrors.Errorf("failed to iteration: %w", err)
}
}
return nil
}
func (m *Worlds) EachIndexWithError(iter func(int, *World) error) error {
if m == nil {
return nil
}
for idx, value := range m.values {
if err := iter(idx, value); err != nil {
return xerrors.Errorf("failed to iteration: %w", err)
}
}
return nil
}
func (m *Worlds) Map(mapFunc func(*World) *World) *Worlds {
if m == nil {
return nil
}
mappedValues := []*World{}
for _, value := range m.values {
mappedValue := mapFunc(value)
if mappedValue != nil {
mappedValues = append(mappedValues, mappedValue)
}
}
return m.newWorlds(mappedValues)
}
func (m *Worlds) Any(cond func(*World) bool) bool {
if m == nil {
return false
}
for _, value := range m.values {
if cond(value) {
return true
}
}
return false
}
func (m *Worlds) Some(cond func(*World) bool) bool {
return m.Any(cond)
}
func (m *Worlds) IsIncluded(cond func(*World) bool) bool {
return m.Any(cond)
}
func (m *Worlds) All(cond func(*World) bool) bool {
if m == nil {
return false
}
for _, value := range m.values {
if !cond(value) {
return false
}
}
return true
}
func (m *Worlds) Sort(compare func(*World, *World) bool) {
if m == nil {
return
}
sort.Slice(m.values, func(i, j int) bool {
return compare(m.values[i], m.values[j])
})
}
func (m *Worlds) SortStable(compare func(*World, *World) bool) {
if m == nil {
return
}
sort.SliceStable(m.values, func(i, j int) bool {
return compare(m.values[i], m.values[j])
})
}
func (m *Worlds) Find(cond func(*World) bool) *World {
if m == nil {
return nil
}
for _, value := range m.values {
if cond(value) {
return value
}
}
return nil
}
func (m *Worlds) Filter(filter func(*World) bool) *Worlds {
if m == nil {
return nil
}
filteredValues := []*World{}
for _, value := range m.values {
if filter(value) {
filteredValues = append(filteredValues, value)
}
}
return m.newWorlds(filteredValues)
}
func (m *Worlds) IsEmpty() bool {
if m == nil {
return true
}
if len(m.values) == 0 {
return true
}
return false
}
func (m *Worlds) At(idx int) *World {
if m == nil {
return nil
}
if idx < 0 {
return nil
}
if len(m.values) > idx {
return m.values[idx]
}
return nil
}
func (m *Worlds) First() *World {
if m == nil {
return nil
}
if len(m.values) > 0 {
return m.values[0]
}
return nil
}
func (m *Worlds) Last() *World {
if m == nil {
return nil
}
if len(m.values) > 0 {
return m.values[len(m.values)-1]
}
return nil
}
func (m *Worlds) Compact() *Worlds {
if m == nil {
return nil
}
compactedValues := []*World{}
for _, value := range m.values {
if value == nil {
continue
}
compactedValues = append(compactedValues, value)
}
return m.newWorlds(compactedValues)
}
func (m *Worlds) Add(args ...*World) *Worlds {
if m == nil {
return nil
}
for _, value := range args {
m.values = append(m.values, value)
}
return m
}
func (m *Worlds) Merge(args ...*Worlds) *Worlds {
if m == nil {
return nil
}
for _, arg := range args {
for _, value := range arg.values {
m.values = append(m.values, value)
}
}
return m
}
func (m *Worlds) Len() int {
if m == nil {
return 0
}
return len(m.values)
}
func (m *WorldsCollection) Merge() *Worlds {
if m == nil {
return nil
}
if len(*m) == 0 {
return nil
}
if len(*m) == 1 {
return (*m)[0]
}
values := []*World{}
for _, collection := range *m {
for _, value := range collection.values {
values = append(values, value)
}
}
return (*m)[0].newWorlds(values)
}
func (m *World) ToJSON(ctx context.Context) ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
if r, ok := interface{}(m).(BeforeRenderer); ok {
if err := r.BeforeRender(ctx); err != nil {
return nil, xerrors.Errorf("failed to BeforeRender: %w", err)
}
}
buf := []byte{}
buf = append(buf, '{')
buf = append(buf, "\"id\":"...)
buf = strconv.AppendUint(buf, m.ID, 10)
buf = append(buf, ',')
buf = append(buf, "\"name\":"...)
buf = append(buf, strconv.Quote(m.Name)...)
buf = append(buf, '}')
return buf, nil
}
func (m *World) ToJSONWithOption(ctx context.Context, option *RenderOption) ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
if r, ok := interface{}(m).(BeforeRenderer); ok {
if err := r.BeforeRender(ctx); err != nil {
return nil, xerrors.Errorf("failed to BeforeRender: %w", err)
}
}
buf := []byte{}
isWritten := false
buf = append(buf, '{')
if option.Exists("id") {
buf = append(buf, "\"id\":"...)
buf = strconv.AppendUint(buf, m.ID, 10)
isWritten = true
}
if option.Exists("name") {
if isWritten {
buf = append(buf, ',')
}
buf = append(buf, "\"name\":"...)
buf = append(buf, strconv.Quote(m.Name)...)
isWritten = true
}
buf = append(buf, '}')
return buf, nil
}
func (m *Worlds) ToJSON(ctx context.Context) ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
if r, ok := interface{}(m).(BeforeRenderer); ok {
if err := r.BeforeRender(ctx); err != nil {
return nil, xerrors.Errorf("failed to BeforeRender: %w", err)
}
}
buf := []byte{}
buf = append(buf, '[')
for idx, value := range m.values {
if idx != 0 {
buf = append(buf, ',')
}
bytes, err := value.ToJSON(ctx)
if err != nil {
return nil, xerrors.Errorf("cannot render to JSON: %w", err)
}
buf = append(buf, bytes...)
}
buf = append(buf, ']')
return buf, nil
}
func (m *Worlds) ToJSONWithOption(ctx context.Context, option *RenderOption) ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
if r, ok := interface{}(m).(BeforeRenderer); ok {
if err := r.BeforeRender(ctx); err != nil {
return nil, xerrors.Errorf("failed to BeforeRender: %w", err)
}
}
buf := []byte{}
buf = append(buf, '[')
for idx, value := range m.values {
if idx != 0 {
buf = append(buf, ',')
}
bytes, err := value.ToJSONWithOption(ctx, option)
if err != nil {
return nil, xerrors.Errorf("cannot render to JSON: %w", err)
}
buf = append(buf, bytes...)
}
buf = append(buf, ']')
return buf, nil
}
func (m *World) MarshalJSON() ([]byte, error) {
bytes, err := m.ToJSON(context.Background())
if err != nil {
return nil, xerrors.Errorf("cannot render to JSON: %w", err)
}
return bytes, nil
}
func (m *World) MarshalJSONContext(ctx context.Context) ([]byte, error) {
bytes, err := m.ToJSON(ctx)
if err != nil {
return nil, xerrors.Errorf("cannot render to JSON: %w", err)
}
return bytes, nil
}
func (m *Worlds) MarshalJSON() ([]byte, error) {
bytes, err := m.ToJSON(context.Background())
if err != nil {
return nil, xerrors.Errorf("cannot render to JSON: %w", err)
}
return bytes, nil
}
func (m *Worlds) MarshalJSONContext(ctx context.Context) ([]byte, error) {
bytes, err := m.ToJSON(ctx)
if err != nil {
return nil, xerrors.Errorf("cannot render to JSON: %w", err)
}
return bytes, nil
}
func (m *World) UnmarshalJSON(bytes []byte) error {
var value struct {
*entity.World
}
if err := json.Unmarshal(bytes, &value); err != nil {
return xerrors.Errorf("failed to unmarshal: %w", err)
}
m.World = value.World
return nil
}
func (m *Worlds) UnmarshalJSON(bytes []byte) error {
var values []*World
if err := json.Unmarshal(bytes, &values); err != nil {
return xerrors.Errorf("failed to unmarshal: %w", err)
}
m.values = values
return nil
}
func (m *World) ToMap(ctx context.Context) (map[string]interface{}, error) {
if m == nil {
return nil, nil
}
if r, ok := interface{}(m).(BeforeRenderer); ok {
if err := r.BeforeRender(ctx); err != nil {
return nil, xerrors.Errorf("failed to BeforeRender: %w", err)
}
}
value := map[string]interface{}{}
value["id"] = m.ID
value["name"] = m.Name
return value, nil
}
func (m *World) ToMapWithOption(ctx context.Context, option *RenderOption) (map[string]interface{}, error) {
if m == nil {
return nil, nil
}
if r, ok := interface{}(m).(BeforeRenderer); ok {
if err := r.BeforeRender(ctx); err != nil {
return nil, xerrors.Errorf("failed to BeforeRender: %w", err)
}
}
value := map[string]interface{}{}
if option.Exists("id") {
value["id"] = m.ID
}
if option.Exists("name") {
value["name"] = m.Name
}
return value, nil
}
func (m *Worlds) ToMap(ctx context.Context) ([]map[string]interface{}, error) {
if m == nil {
return nil, nil
}
if r, ok := interface{}(m).(BeforeRenderer); ok {
if err := r.BeforeRender(ctx); err != nil {
return nil, xerrors.Errorf("failed to BeforeRender: %w", err)
}
}
value := []map[string]interface{}{}
for _, v := range m.values {
mapValue, err := v.ToMap(ctx)
if err != nil {
return nil, xerrors.Errorf("cannot render to map: %w", err)
}
value = append(value, mapValue)
}
return value, nil
}
func (m *Worlds) ToMapWithOption(ctx context.Context, option *RenderOption) ([]map[string]interface{}, error) {
if m == nil {
return nil, nil
}
if r, ok := interface{}(m).(BeforeRenderer); ok {
if err := r.BeforeRender(ctx); err != nil {
return nil, xerrors.Errorf("failed to BeforeRender: %w", err)
}
}
value := []map[string]interface{}{}
for _, v := range m.values {
mapValue, err := v.ToMapWithOption(ctx, option)
if err != nil {
return nil, xerrors.Errorf("cannot render to map: %w", err)
}
value = append(value, mapValue)
}
return value, nil
}
func (m *World) SetConverter(conv ModelConverter) {
m.conv = conv
}
func (m *World) Create(ctx context.Context) error {
if m.worldDAO == nil {
// for testing
return nil
}
if m.isAlreadyCreated {
return xerrors.New("this instance has already created")
}
if err := m.worldDAO.Create(ctx, m.World); err != nil {
return xerrors.Errorf("failed to Create: %w", err)
}
m.savedValue = *m.World
m.isAlreadyCreated = true
return nil
}
func (m *World) Update(ctx context.Context) error {
if m.worldDAO == nil {
// for testing
return nil
}
isRequiredUpdate := false
if m.savedValue.ID != m.ID {
isRequiredUpdate = true
}
if m.savedValue.Name != m.Name {
isRequiredUpdate = true
}
if !isRequiredUpdate {
return nil
}
if err := m.worldDAO.Update(ctx, m.World); err != nil {
return xerrors.Errorf("failed to Update: %w", err)
}
m.savedValue = *m.World
return nil
}
func (m *World) Delete(ctx context.Context) error {
if m.worldDAO == nil {
// for testing
return nil
}
if err := m.worldDAO.DeleteByID(ctx, m.ID); err != nil {
return xerrors.Errorf("failed to Delete: %w", err)
}
return nil
}
func (m *World) SetAlreadyCreated(isAlreadyCreated bool) {
m.isAlreadyCreated = isAlreadyCreated
}
func (m *World) SetSavedValue(savedValue *entity.World) {
m.savedValue = *savedValue
}
func (m *World) Save(ctx context.Context) error {
if m.isAlreadyCreated {
if err := m.Update(ctx); err != nil {
return xerrors.Errorf("failed to Update: %w", err)
}
return nil
}
if err := m.Create(ctx); err != nil {
return xerrors.Errorf("failed to Create: %w", err)
}
return nil
}
func (m *Worlds) Create(ctx context.Context) error {
if err := m.EachWithError(func(v *World) error {
if err := v.Create(ctx); err != nil {
return xerrors.Errorf("failed to Create: %w", err)
}
return nil
}); err != nil {
return xerrors.Errorf("interrupt iteration for Worlds: %w", err)
}
return nil
}
func (m *Worlds) Update(ctx context.Context) error {
if err := m.EachWithError(func(v *World) error {
if err := v.Update(ctx); err != nil {
return xerrors.Errorf("failed to Update: %w", err)
}
return nil
}); err != nil {
return xerrors.Errorf("interrupt iteration for Worlds: %w", err)
}
return nil
}
func (m *Worlds) Save(ctx context.Context) error {
if err := m.EachWithError(func(v *World) error {
if err := v.Save(ctx); err != nil {
return xerrors.Errorf("failed to Save: %w", err)
}
return nil
}); err != nil {
return xerrors.Errorf("interrupt iteration for Worlds: %w", err)
}
return nil
}
func (m *Worlds) UniqueID() *Worlds {
if m == nil {
return nil
}
filterMap := map[uint64]struct{}{}
return m.Filter(func(value *World) bool {
if _, exists := filterMap[value.ID]; exists {
return false
}
filterMap[value.ID] = struct{}{}
return true
})
}
func (m *Worlds) GroupByID() map[uint64]*Worlds {
if m == nil {
return nil
}
values := map[uint64]*Worlds{}
for _, value := range m.values {
if _, exists := values[value.ID]; !exists {
values[value.ID] = &Worlds{}
}
values[value.ID].Add(value)
}
return values
}
func (m *Worlds) IDs() []uint64 {
if m == nil {
return nil
}
values := []uint64{}
for _, value := range m.values {
values = append(values, value.ID)
}
return values
}
func (m *Worlds) UniqueName() *Worlds {
if m == nil {
return nil
}
filterMap := map[string]struct{}{}
return m.Filter(func(value *World) bool {
if _, exists := filterMap[value.Name]; exists {
return false
}
filterMap[value.Name] = struct{}{}
return true
})
}
func (m *Worlds) GroupByName() map[string]*Worlds {
if m == nil {
return nil
}
values := map[string]*Worlds{}
for _, value := range m.values {
if _, exists := values[value.Name]; !exists {
values[value.Name] = &Worlds{}
}
values[value.Name].Add(value)
}
return values
}
func (m *Worlds) Names() []string {
if m == nil {
return nil
}
values := []string{}
for _, value := range m.values {
values = append(values, value.Name)
}
return values
}
func (m *Worlds) FirstByID(a0 uint64) *World {
if m == nil {
return nil
}
for _, value := range m.values {
if value.ID != a0 {
continue
}
return value
}
return nil
}
func (m *Worlds) FilterByID(a0 uint64) *Worlds {
if m == nil {
return nil
}
values := []*World{}
for _, value := range m.values {
if value.ID != a0 {
continue
}
values = append(values, value)
}
return m.newWorlds(values)
}
| NewWorld |
hourly_usage_attribution_response.py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.hourly_usage_attribution_body import HourlyUsageAttributionBody
from datadog_api_client.v1.model.hourly_usage_attribution_metadata import HourlyUsageAttributionMetadata
globals()["HourlyUsageAttributionBody"] = HourlyUsageAttributionBody
globals()["HourlyUsageAttributionMetadata"] = HourlyUsageAttributionMetadata
class HourlyUsageAttributionResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
|
attribute_map = {
"metadata": "metadata",
"usage": "usage",
}
read_only_vars = {}
def __init__(self, *args, **kwargs):
"""HourlyUsageAttributionResponse - a model defined in OpenAPI
Keyword Args:
metadata (HourlyUsageAttributionMetadata): [optional]
usage ([HourlyUsageAttributionBody]): [optional] Get the hourly usage attribution by tag(s).
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(HourlyUsageAttributionResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| lazy_import()
return {
"metadata": (HourlyUsageAttributionMetadata,),
"usage": ([HourlyUsageAttributionBody],),
} |
utils.rs | use {
crate::{
error::MetaplexError,
state::{
AuctionManager, AuctionManagerStatus, BidRedemptionTicket, Key,
OriginalAuthorityLookup, Store, WhitelistedCreator, WinningConfigItem,
MAX_BID_REDEMPTION_TICKET_SIZE, PREFIX,
},
},
arrayref::array_ref,
borsh::{BorshDeserialize, BorshSerialize},
solana_program::{
account_info::AccountInfo,
borsh::try_from_slice_unchecked,
entrypoint::ProgramResult,
msg,
program::{invoke, invoke_signed},
program_error::ProgramError,
program_pack::{IsInitialized, Pack},
pubkey::Pubkey,
system_instruction,
sysvar::{rent::Rent, Sysvar},
},
spl_auction::processor::{AuctionData, AuctionState, BidderMetadata},
spl_token::instruction::{set_authority, AuthorityType},
spl_token_metadata::{
instruction::update_metadata_accounts,
state::{Metadata, EDITION},
},
spl_token_vault::instruction::create_withdraw_tokens_instruction,
std::convert::TryInto,
};
/// assert initialized account
pub fn assert_initialized<T: Pack + IsInitialized>(
account_info: &AccountInfo,
) -> Result<T, ProgramError> {
let account: T = T::unpack_unchecked(&account_info.data.borrow())?;
if !account.is_initialized() {
Err(MetaplexError::Uninitialized.into())
} else {
Ok(account)
}
}
pub fn assert_rent_exempt(rent: &Rent, account_info: &AccountInfo) -> ProgramResult {
if !rent.is_exempt(account_info.lamports(), account_info.data_len()) {
Err(MetaplexError::NotRentExempt.into())
} else {
Ok(())
}
}
pub fn assert_owned_by(account: &AccountInfo, owner: &Pubkey) -> ProgramResult {
if account.owner != owner {
Err(MetaplexError::IncorrectOwner.into())
} else {
Ok(())
}
}
pub fn assert_signer(account_info: &AccountInfo) -> ProgramResult {
if !account_info.is_signer {
Err(ProgramError::MissingRequiredSignature)
} else {
Ok(())
}
}
pub fn assert_store_safety_vault_manager_match(
auction_manager: &AuctionManager,
safety_deposit_info: &AccountInfo,
vault_info: &AccountInfo,
token_vault_program: &Pubkey,
) -> ProgramResult {
if auction_manager.vault != *vault_info.key {
return Err(MetaplexError::AuctionManagerVaultMismatch.into());
}
let data = safety_deposit_info.data.borrow();
let vault_key = Pubkey::new_from_array(*array_ref![data, 1, 32]);
let token_mint_key = Pubkey::new_from_array(*array_ref![data, 33, 32]);
assert_derivation(
&token_vault_program,
safety_deposit_info,
&[
spl_token_vault::state::PREFIX.as_bytes(),
vault_info.key.as_ref(),
token_mint_key.as_ref(),
],
)?;
if *vault_info.key != vault_key {
return Err(MetaplexError::SafetyDepositBoxVaultMismatch.into());
}
Ok(())
}
pub fn assert_at_least_one_creator_matches_or_store_public_and_all_verified(
program_id: &Pubkey,
auction_manager: &AuctionManager,
metadata: &Metadata,
whitelisted_creator_info: &AccountInfo,
store_info: &AccountInfo,
) -> ProgramResult {
let store = Store::from_account_info(store_info)?;
if store.public {
return Ok(());
}
if let Some(creators) = &metadata.data.creators {
// does it exist? It better!
let existing_whitelist_creator: WhitelistedCreator =
match WhitelistedCreator::from_account_info(whitelisted_creator_info) {
Ok(val) => val,
Err(_) => return Err(MetaplexError::InvalidWhitelistedCreator.into()),
};
if !existing_whitelist_creator.activated {
return Err(MetaplexError::WhitelistedCreatorInactive.into());
}
let mut found = false;
for creator in creators {
// Now find at least one creator that can make this pda in the list
let (key, _) = Pubkey::find_program_address(
&[
PREFIX.as_bytes(),
program_id.as_ref(),
auction_manager.store.as_ref(),
creator.address.as_ref(),
],
program_id,
);
if key == *whitelisted_creator_info.key {
found = true;
}
if !creator.verified {
return Err(MetaplexError::CreatorHasNotVerifiedMetadata.into());
}
}
if found {
return Ok(());
}
}
Err(MetaplexError::InvalidWhitelistedCreator.into())
}
pub fn assert_authority_correct(
auction_manager: &AuctionManager,
authority_info: &AccountInfo,
) -> ProgramResult {
if auction_manager.authority != *authority_info.key {
return Err(MetaplexError::AuctionManagerAuthorityMismatch.into());
}
assert_signer(authority_info)?;
Ok(())
}
/// Create account almost from scratch, lifted from
/// https://github.com/solana-labs/solana-program-library/blob/7d4873c61721aca25464d42cc5ef651a7923ca79/associated-token-account/program/src/processor.rs#L51-L98
#[inline(always)]
pub fn create_or_allocate_account_raw<'a>(
program_id: Pubkey,
new_account_info: &AccountInfo<'a>,
rent_sysvar_info: &AccountInfo<'a>,
system_program_info: &AccountInfo<'a>,
payer_info: &AccountInfo<'a>,
size: usize,
signer_seeds: &[&[u8]],
) -> Result<(), ProgramError> {
let rent = &Rent::from_account_info(rent_sysvar_info)?;
let required_lamports = rent
.minimum_balance(size)
.max(1)
.saturating_sub(new_account_info.lamports());
if required_lamports > 0 {
msg!("Transfer {} lamports to the new account", required_lamports);
invoke(
&system_instruction::transfer(&payer_info.key, new_account_info.key, required_lamports),
&[
payer_info.clone(),
new_account_info.clone(),
system_program_info.clone(),
],
)?;
}
msg!("Allocate space for the account");
invoke_signed(
&system_instruction::allocate(new_account_info.key, size.try_into().unwrap()),
&[new_account_info.clone(), system_program_info.clone()],
&[&signer_seeds],
)?;
msg!("Assign the account to the owning program");
invoke_signed(
&system_instruction::assign(new_account_info.key, &program_id),
&[new_account_info.clone(), system_program_info.clone()],
&[&signer_seeds],
)?;
msg!("Completed assignation!");
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn transfer_safety_deposit_box_items<'a>(
token_vault_program: AccountInfo<'a>,
destination: AccountInfo<'a>,
safety_deposit_box: AccountInfo<'a>,
safety_deposit_token_store: AccountInfo<'a>,
vault: AccountInfo<'a>,
fraction_mint: AccountInfo<'a>,
vault_authority: AccountInfo<'a>,
transfer_authority: AccountInfo<'a>,
rent: AccountInfo<'a>,
amount: u64,
signer_seeds: &[&[u8]],
) -> ProgramResult {
invoke_signed(
&create_withdraw_tokens_instruction(
*token_vault_program.key,
*destination.key,
*safety_deposit_box.key,
*safety_deposit_token_store.key,
*vault.key,
*fraction_mint.key,
*vault_authority.key,
*transfer_authority.key,
amount,
),
&[
token_vault_program,
destination,
safety_deposit_box,
safety_deposit_token_store,
vault,
fraction_mint,
vault_authority,
transfer_authority,
rent,
],
&[&signer_seeds],
)?;
Ok(())
}
pub fn transfer_metadata_ownership<'a>(
token_metadata_program: AccountInfo<'a>,
metadata_info: AccountInfo<'a>,
update_authority: AccountInfo<'a>,
new_update_authority: AccountInfo<'a>,
signer_seeds: &[&[u8]],
) -> ProgramResult {
invoke_signed(
&update_metadata_accounts(
*token_metadata_program.key,
*metadata_info.key,
*update_authority.key,
Some(*new_update_authority.key),
None,
Some(true),
),
&[
update_authority,
new_update_authority,
metadata_info,
token_metadata_program,
],
&[&signer_seeds],
)?;
Ok(())
}
pub fn transfer_mint_authority<'a>(
new_authority_seeds: &[&[u8]],
new_authority_key: &Pubkey,
new_authority_info: &AccountInfo<'a>,
mint_info: &AccountInfo<'a>,
mint_authority_info: &AccountInfo<'a>,
token_program_info: &AccountInfo<'a>,
) -> ProgramResult {
msg!("Setting mint authority");
invoke_signed(
&set_authority(
token_program_info.key,
mint_info.key,
Some(new_authority_key),
AuthorityType::MintTokens,
mint_authority_info.key,
&[&mint_authority_info.key],
)
.unwrap(),
&[
mint_authority_info.clone(),
mint_info.clone(),
token_program_info.clone(),
new_authority_info.clone(),
],
&[new_authority_seeds],
)?;
msg!("Setting freeze authority");
invoke_signed(
&set_authority(
token_program_info.key,
mint_info.key,
Some(&new_authority_key),
AuthorityType::FreezeAccount,
mint_authority_info.key,
&[&mint_authority_info.key],
)
.unwrap(),
&[
mint_authority_info.clone(),
mint_info.clone(),
token_program_info.clone(),
new_authority_info.clone(),
],
&[new_authority_seeds],
)?;
Ok(())
}
pub struct CommonRedeemReturn {
pub redemption_bump_seed: u8,
pub auction_manager: AuctionManager,
pub auction: AuctionData,
pub bidder_metadata: BidderMetadata,
pub rent: Rent,
pub win_index: Option<usize>,
pub token_metadata_program: Pubkey,
}
pub struct CommonRedeemCheckArgs<'a> {
pub program_id: &'a Pubkey,
pub auction_manager_info: &'a AccountInfo<'a>,
pub safety_deposit_token_store_info: &'a AccountInfo<'a>,
pub destination_info: &'a AccountInfo<'a>,
pub bid_redemption_info: &'a AccountInfo<'a>,
pub safety_deposit_info: &'a AccountInfo<'a>,
pub vault_info: &'a AccountInfo<'a>,
pub auction_info: &'a AccountInfo<'a>,
pub bidder_metadata_info: &'a AccountInfo<'a>,
pub bidder_info: &'a AccountInfo<'a>,
pub token_program_info: &'a AccountInfo<'a>,
pub token_vault_program_info: &'a AccountInfo<'a>,
pub token_metadata_program_info: &'a AccountInfo<'a>,
pub store_info: &'a AccountInfo<'a>,
pub rent_info: &'a AccountInfo<'a>,
pub is_participation: bool,
pub overwrite_win_index: Option<usize>,
}
#[allow(clippy::too_many_arguments)]
pub fn common_redeem_checks(
args: CommonRedeemCheckArgs,
) -> Result<CommonRedeemReturn, ProgramError> {
let CommonRedeemCheckArgs {
program_id,
auction_manager_info,
safety_deposit_token_store_info,
destination_info,
bid_redemption_info,
safety_deposit_info,
vault_info,
auction_info,
bidder_metadata_info,
bidder_info,
token_program_info,
token_vault_program_info,
token_metadata_program_info,
rent_info,
store_info,
is_participation,
overwrite_win_index,
} = args;
let rent = &Rent::from_account_info(&rent_info)?;
let mut auction_manager: AuctionManager =
AuctionManager::from_account_info(auction_manager_info)?;
let auction = AuctionData::from_account_info(auction_info)?;
let store_data = store_info.data.borrow();
let bidder_metadata: BidderMetadata;
let auction_program = Pubkey::new_from_array(*array_ref![store_data, 2, 32]);
let token_vault_program = Pubkey::new_from_array(*array_ref![store_data, 34, 32]);
let token_metadata_program = Pubkey::new_from_array(*array_ref![store_data, 66, 32]);
let token_program = Pubkey::new_from_array(*array_ref![store_data, 98, 32]);
let mut redemption_bump_seed: u8 = 0;
if overwrite_win_index.is_some() {
// Auctioneer coming through, need to stub bidder metadata since it will not exist.
bidder_metadata = BidderMetadata {
bidder_pubkey: *bidder_info.key,
auction_pubkey: *auction_info.key,
last_bid: 0,
last_bid_timestamp: 0,
cancelled: false,
};
if *bidder_info.key != auction_manager.authority {
return Err(MetaplexError::MustBeAuctioneer.into());
}
} else {
bidder_metadata = BidderMetadata::from_account_info(bidder_metadata_info)?;
assert_owned_by(bidder_metadata_info, &auction_program)?;
assert_derivation(
&auction_program,
bidder_metadata_info,
&[
spl_auction::PREFIX.as_bytes(),
auction_program.as_ref(),
auction_info.key.as_ref(),
bidder_info.key.as_ref(),
"metadata".as_bytes(),
],
)?;
if bidder_metadata.bidder_pubkey != *bidder_info.key {
return Err(MetaplexError::BidderMetadataBidderMismatch.into());
}
let redemption_path = [
PREFIX.as_bytes(),
auction_manager.auction.as_ref(),
bidder_metadata_info.key.as_ref(),
];
let (redemption_key, actual_redemption_bump_seed) =
Pubkey::find_program_address(&redemption_path, &program_id);
redemption_bump_seed = actual_redemption_bump_seed;
if redemption_key != *bid_redemption_info.key {
return Err(MetaplexError::BidRedemptionMismatch.into());
}
}
let mut win_index = auction.is_winner(bidder_info.key);
if let Some(index) = overwrite_win_index {
let winner_at = auction.winner_at(index);
if winner_at.is_some() {
return Err(MetaplexError::AuctioneerCantClaimWonPrize.into());
} else {
win_index = overwrite_win_index
}
}
if !bid_redemption_info.data_is_empty() && overwrite_win_index.is_none() {
let bid_redemption: BidRedemptionTicket =
BidRedemptionTicket::from_account_info(bid_redemption_info)?;
let possible_items_to_redeem = match win_index {
Some(val) => auction_manager.settings.winning_configs[val].items.len(),
None => 0,
};
if (is_participation && bid_redemption.participation_redeemed)
|| (!is_participation
&& bid_redemption.items_redeemed == possible_items_to_redeem as u8)
{
return Err(MetaplexError::BidAlreadyRedeemed.into());
}
}
assert_signer(bidder_info)?;
assert_owned_by(&destination_info, token_program_info.key)?;
assert_owned_by(&auction_manager_info, &program_id)?;
assert_owned_by(safety_deposit_token_store_info, token_program_info.key)?;
if !bid_redemption_info.data_is_empty() {
assert_owned_by(bid_redemption_info, &program_id)?;
}
assert_owned_by(safety_deposit_info, &token_vault_program)?;
assert_owned_by(vault_info, &token_vault_program)?;
assert_owned_by(auction_info, &auction_program)?;
assert_owned_by(store_info, &program_id)?;
assert_store_safety_vault_manager_match(
&auction_manager,
&safety_deposit_info,
&vault_info,
&token_vault_program,
)?;
// looking out for you!
assert_rent_exempt(rent, &destination_info)?;
if auction_manager.auction != *auction_info.key {
return Err(MetaplexError::AuctionManagerAuctionMismatch.into());
}
if *store_info.key != auction_manager.store {
return Err(MetaplexError::AuctionManagerStoreMismatch.into());
}
if token_program != *token_program_info.key {
return Err(MetaplexError::AuctionManagerTokenProgramMismatch.into());
}
if token_vault_program != *token_vault_program_info.key {
return Err(MetaplexError::AuctionManagerTokenVaultProgramMismatch.into());
}
if token_metadata_program != *token_metadata_program_info.key {
return Err(MetaplexError::AuctionManagerTokenMetadataProgramMismatch.into());
}
if auction.state != AuctionState::Ended {
return Err(MetaplexError::AuctionHasNotEnded.into());
}
// No-op if already set.
auction_manager.state.status = AuctionManagerStatus::Disbursing;
Ok(CommonRedeemReturn {
redemption_bump_seed,
auction_manager,
auction,
bidder_metadata,
rent: *rent,
win_index,
token_metadata_program,
})
}
pub struct CommonRedeemFinishArgs<'a> {
pub program_id: &'a Pubkey,
pub auction_manager: AuctionManager,
pub auction_manager_info: &'a AccountInfo<'a>,
pub bidder_metadata_info: &'a AccountInfo<'a>,
pub rent_info: &'a AccountInfo<'a>,
pub system_info: &'a AccountInfo<'a>,
pub payer_info: &'a AccountInfo<'a>,
pub bid_redemption_info: &'a AccountInfo<'a>,
pub winning_index: Option<usize>,
pub redemption_bump_seed: u8,
pub bid_redeemed: bool,
pub participation_redeemed: bool,
pub winning_item_index: Option<usize>,
pub overwrite_win_index: Option<usize>,
}
#[allow(clippy::too_many_arguments)]
pub fn common_redeem_finish(args: CommonRedeemFinishArgs) -> ProgramResult {
let CommonRedeemFinishArgs {
program_id,
mut auction_manager,
auction_manager_info,
bidder_metadata_info,
rent_info,
system_info,
payer_info,
bid_redemption_info,
winning_index,
redemption_bump_seed,
bid_redeemed,
participation_redeemed,
winning_item_index,
overwrite_win_index,
} = args;
if bid_redeemed {
if let Some(index) = winning_index {
if let Some(item_index) = winning_item_index {
auction_manager.state.winning_config_states[index].items[item_index].claimed = true;
}
}
}
if (bid_redeemed || participation_redeemed) && overwrite_win_index.is_none() {
let redemption_seeds = &[
PREFIX.as_bytes(),
auction_manager.auction.as_ref(),
bidder_metadata_info.key.as_ref(),
&[redemption_bump_seed],
];
if bid_redemption_info.data_is_empty() {
create_or_allocate_account_raw(
*program_id,
&bid_redemption_info,
&rent_info,
&system_info,
&payer_info,
MAX_BID_REDEMPTION_TICKET_SIZE,
redemption_seeds,
)?;
}
let mut bid_redemption = BidRedemptionTicket::from_account_info(bid_redemption_info)?;
bid_redemption.key = Key::BidRedemptionTicketV1;
| } else if bid_redeemed {
bid_redemption.items_redeemed += 1;
}
bid_redemption.serialize(&mut *bid_redemption_info.data.borrow_mut())?;
}
let mut open_claims = false;
for state in &auction_manager.state.winning_config_states {
for item in &state.items {
if !item.claimed {
open_claims = true;
break;
}
}
}
if !open_claims {
auction_manager.state.status = AuctionManagerStatus::Finished
}
auction_manager.serialize(&mut *auction_manager_info.data.borrow_mut())?;
Ok(())
}
pub struct CommonWinningConfigCheckReturn {
pub winning_config_item: WinningConfigItem,
pub winning_item_index: Option<usize>,
}
pub fn common_winning_config_checks(
auction_manager: &AuctionManager,
safety_deposit_info: &AccountInfo,
winning_index: usize,
) -> Result<CommonWinningConfigCheckReturn, ProgramError> {
let winning_config = &auction_manager.settings.winning_configs[winning_index];
let winning_config_state = &auction_manager.state.winning_config_states[winning_index];
let mut winning_item_index = None;
for i in 0..winning_config.items.len() {
let order: usize = 97;
if winning_config.items[i].safety_deposit_box_index
== safety_deposit_info.data.borrow()[order]
{
winning_item_index = Some(i);
break;
}
}
let winning_config_item = match winning_item_index {
Some(index) => winning_config.items[index],
None => return Err(MetaplexError::SafetyDepositBoxNotUsedInAuction.into()),
};
let winning_config_state_item = match winning_item_index {
Some(index) => winning_config_state.items[index],
None => return Err(MetaplexError::SafetyDepositBoxNotUsedInAuction.into()),
};
if winning_config_state_item.claimed {
return Err(MetaplexError::PrizeAlreadyClaimed.into());
}
Ok(CommonWinningConfigCheckReturn {
winning_config_item,
winning_item_index,
})
}
#[allow(clippy::too_many_arguments)]
pub fn shift_authority_back_to_originating_user<'a>(
program_id: &Pubkey,
auction_manager: &AuctionManager,
auction_manager_info: &AccountInfo<'a>,
master_metadata_info: &AccountInfo<'a>,
original_authority: &AccountInfo<'a>,
original_authority_lookup_info: &AccountInfo<'a>,
printing_mint_info: &AccountInfo<'a>,
token_program_info: &AccountInfo<'a>,
authority_seeds: &[&[u8]],
) -> ProgramResult {
let original_authority_lookup_seeds = &[
PREFIX.as_bytes(),
&auction_manager.auction.as_ref(),
master_metadata_info.key.as_ref(),
];
let (expected_key, _) =
Pubkey::find_program_address(original_authority_lookup_seeds, &program_id);
if expected_key != *original_authority_lookup_info.key {
return Err(MetaplexError::OriginalAuthorityLookupKeyMismatch.into());
}
let original_authority_lookup: OriginalAuthorityLookup =
OriginalAuthorityLookup::from_account_info(original_authority_lookup_info)?;
if original_authority_lookup.original_authority != *original_authority.key {
return Err(MetaplexError::OriginalAuthorityMismatch.into());
}
transfer_mint_authority(
authority_seeds,
original_authority.key,
original_authority,
printing_mint_info,
auction_manager_info,
token_program_info,
)?;
Ok(())
}
// TODO due to a weird stack access violation bug we had to remove the args struct from this method
// to get redemptions working again after integrating new Auctions program. Try to bring it back one day
#[inline(always)]
pub fn spl_token_transfer<'a: 'b, 'b>(
source: AccountInfo<'a>,
destination: AccountInfo<'a>,
amount: u64,
authority: AccountInfo<'a>,
authority_signer_seeds: &'b [&'b [u8]],
token_program: AccountInfo<'a>,
) -> ProgramResult {
let result = invoke_signed(
&spl_token::instruction::transfer(
token_program.key,
source.key,
destination.key,
authority.key,
&[],
amount,
)?,
&[source, destination, authority, token_program],
&[authority_signer_seeds],
);
result.map_err(|_| MetaplexError::TokenTransferFailed.into())
}
pub fn assert_edition_valid(
program_id: &Pubkey,
mint: &Pubkey,
edition_account_info: &AccountInfo,
) -> ProgramResult {
let edition_seeds = &[
spl_token_metadata::state::PREFIX.as_bytes(),
program_id.as_ref(),
&mint.as_ref(),
EDITION.as_bytes(),
];
let (edition_key, _) = Pubkey::find_program_address(edition_seeds, program_id);
if edition_key != *edition_account_info.key {
return Err(MetaplexError::InvalidEditionKey.into());
}
Ok(())
}
// TODO due to a weird stack access violation bug we had to remove the args struct from this method
// to get redemptions working again after integrating new Auctions program. Try to bring it back one day.
pub fn spl_token_mint_to<'a: 'b, 'b>(
mint: AccountInfo<'a>,
destination: AccountInfo<'a>,
amount: u64,
authority: AccountInfo<'a>,
authority_signer_seeds: &'b [&'b [u8]],
token_program: AccountInfo<'a>,
) -> ProgramResult {
let result = invoke_signed(
&spl_token::instruction::mint_to(
token_program.key,
mint.key,
destination.key,
authority.key,
&[],
amount,
)?,
&[mint, destination, authority, token_program],
&[authority_signer_seeds],
);
result.map_err(|_| MetaplexError::TokenMintToFailed.into())
}
pub fn assert_derivation(
program_id: &Pubkey,
account: &AccountInfo,
path: &[&[u8]],
) -> Result<u8, ProgramError> {
let (key, bump) = Pubkey::find_program_address(&path, program_id);
if key != *account.key {
return Err(MetaplexError::DerivedKeyInvalid.into());
}
Ok(bump)
}
pub fn try_from_slice_checked<T: BorshDeserialize>(
data: &[u8],
data_type: Key,
data_size: usize,
) -> Result<T, ProgramError> {
if (data[0] != data_type as u8 && data[0] != Key::Uninitialized as u8)
|| data.len() != data_size
{
return Err(MetaplexError::DataTypeMismatch.into());
}
let result: T = try_from_slice_unchecked(data)?;
Ok(result)
} | if participation_redeemed {
bid_redemption.participation_redeemed = true |
index.d.ts | // Type definitions for bytebuffer.js 5.0.0
// Project: https://github.com/dcodeIO/bytebuffer.js
// Definitions by: Denis Cappellin <https://github.com/dcappellin>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// Definitions by: SINTEF-9012 <https://github.com/SINTEF-9012>
// Definitions by: Marek Urbanowicz <https://github.com/murbanowicz>
/// <reference types="node" />
import Long = require('long');
declare namespace ByteBuffer {}
export = ByteBuffer;
export as namespace ByteBuffer;
declare class ByteBuffer {
/**
* Constructs a new ByteBuffer.
*/
constructor(capacity?: number, littleEndian?: boolean, noAssert?: boolean);
/**
* Big endian constant that can be used instead of its boolean value. Evaluates to false.
*/
static BIG_ENDIAN: boolean;
/**
* Default initial capacity of 16.
*/
static DEFAULT_CAPACITY: number;
/**
* Default endianess of false for big endian.
*/
static DEFAULT_ENDIAN: boolean;
/**
* Default no assertions flag of false.
*/
static DEFAULT_NOASSERT: boolean;
/**
* Little endian constant that can be used instead of its boolean value. Evaluates to true.
*/
static LITTLE_ENDIAN: boolean;
/**
* Maximum number of bytes required to store a 32bit base 128 variable-length integer.
*/
static MAX_VARINT32_BYTES: number;
/**
* Maximum number of bytes required to store a 64bit base 128 variable-length integer.
*/
static MAX_VARINT64_BYTES: number;
/**
* Metrics representing number of bytes.Evaluates to 2.
*/
static METRICS_BYTES: number;
/**
* Metrics representing number of UTF8 characters.Evaluates to 1.
*/
static METRICS_CHARS: number;
/**
* ByteBuffer version.
*/
static VERSION: string;
/**
* Backing buffer.
*/
buffer: Buffer;
/**
* Absolute limit of the contained data. Set to the backing buffer's capacity upon allocation.
*/
limit: number;
/**
* Whether to use little endian byte order, defaults to false for big endian.
*/
littleEndian: boolean;
/**
* Marked offset.
*/
markedOffset: number;
/**
* Whether to skip assertions of offsets and values, defaults to false.
*/
noAssert: boolean;
/**
* Absolute read/write offset.
*/
offset: number;
/**
* Data view to manipulate the backing buffer. Becomes null if the backing buffer has a capacity of 0.
*/
view: DataView;
/**
* Allocates a new ByteBuffer backed by a buffer of the specified capacity.
*/
static allocate(capacity?: number, littleEndian?: boolean, noAssert?: boolean): ByteBuffer;
/**
* Decodes a base64 encoded string to binary like window.atob does.
*/
static atob(b64: string): string;
/**
* Encodes a binary string to base64 like window.btoa does.
*/
static btoa(str: string): string;
/**
* Calculates the number of UTF8 bytes of a string.
*/
static calculateUTF8Bytes(str: string): number;
/**
* Calculates the number of UTF8 characters of a string.JavaScript itself uses UTF- 16, so that a string's length property does not reflect its actual UTF8 size if it contains code points larger than 0xFFFF.
*/
static calculateUTF8Chars(str: string): number;
/**
* Calculates the number of UTF8 bytes of a string. This is an alias of ByteBuffer#calculateUTF8Bytes.
*/
static calculateString(str: string): number;
/**
* Calculates the actual number of bytes required to store a 32bit base 128 variable-length integer.
*/
static calculateVarint32(value: number): number;
/**
* Calculates the actual number of bytes required to store a 64bit base 128 variable-length integer.
*/
static calculateVarint64(value: number | Long): number;
/**
* Concatenates multiple ByteBuffers into one.
*/
static concat(
buffers: Array<ByteBuffer | Buffer | ArrayBuffer | Uint8Array | string>,
encoding?: string | boolean, | noAssert?: boolean,
): ByteBuffer;
/**
* Decodes a base64 encoded string to a ByteBuffer.
*/
static fromBase64(str: string, littleEndian?: boolean, noAssert?: boolean): ByteBuffer;
/**
* Decodes a binary encoded string, that is using only characters 0x00-0xFF as bytes, to a ByteBuffer.
*/
static fromBinary(str: string, littleEndian?: boolean, noAssert?: boolean): ByteBuffer;
/**
* Decodes a hex encoded string with marked offsets to a ByteBuffer.
*/
static fromDebug(str: string, littleEndian?: boolean, noAssert?: boolean): ByteBuffer;
/**
* Decodes a hex encoded string to a ByteBuffer.
*/
static fromHex(str: string, littleEndian?: boolean, noAssert?: boolean): ByteBuffer;
/**
* Decodes an UTF8 encoded string to a ByteBuffer.
*/
static fromUTF8(str: string, littleEndian?: boolean, noAssert?: boolean): ByteBuffer;
/**
* Gets the backing buffer type.
*/
static isByteBuffer(bb: any): boolean;
/**
* Wraps a buffer or a string. Sets the allocated ByteBuffer's ByteBuffer#offset to 0 and its ByteBuffer#limit to the length of the wrapped data.
* @param buffer Anything that can be wrapped
* @param encoding String encoding if buffer is a string ("base64", "hex", "binary", defaults to "utf8")
* @param littleEndian Whether to use little or big endian byte order. Defaults to ByteBuffer.DEFAULT_ENDIAN.
* @param noAssert Whether to skip assertions of offsets and values. Defaults to ByteBuffer.DEFAULT_NOASSERT.
*/
static wrap(
buffer: ByteBuffer | Buffer | ArrayBuffer | Uint8Array | string,
enc?: string | boolean,
littleEndian?: boolean,
noAssert?: boolean,
): ByteBuffer;
/**
* Decodes a zigzag encoded signed 32bit integer.
*/
static zigZagDecode32(n: number): number;
/**
* Decodes a zigzag encoded signed 64bit integer.
*/
static zigZagDecode64(n: number | Long): Long;
/**
* Zigzag encodes a signed 32bit integer so that it can be effectively used with varint encoding.
*/
static zigZagEncode32(n: number): number;
/**
* Zigzag encodes a signed 64bit integer so that it can be effectively used with varint encoding.
*/
static zigZagEncode64(n: number | Long): Long;
/**
* Switches (to) big endian byte order.
*/
BE(bigEndian?: boolean): this;
/**
* Switches (to) little endian byte order.
*/
LE(bigEndian?: boolean): this;
/**
* Appends some data to this ByteBuffer. This will overwrite any contents behind the specified offset up to the appended data's length.
*/
append(
source: ByteBuffer | Buffer | ArrayBuffer | Uint8Array | string,
encoding?: string | number,
offset?: number,
): this;
/**
* Appends this ByteBuffer's contents to another ByteBuffer. This will overwrite any contents behind the specified offset up to the length of this ByteBuffer's data.
*/
appendTo(target: ByteBuffer, offset?: number): this;
/**
* Enables or disables assertions of argument types and offsets. Assertions are enabled by default but you can opt to disable them if your code already makes sure that everything is valid.
*/
assert(assert: boolean): this;
/**
* Gets the capacity of this ByteBuffer's backing buffer.
*/
capacity(): number;
/**
* Clears this ByteBuffer's offsets by setting ByteBuffer#offset to 0 and
* ByteBuffer#limit to the backing buffer's capacity. Discards ByteBuffer#markedOffset.
*/
clear(): this;
/**
* Creates a cloned instance of this ByteBuffer, preset with this ByteBuffer's values for ByteBuffer#offset, ByteBuffer#markedOffset and ByteBuffer#limit.
*/
clone(copy?: boolean): ByteBuffer;
/**
* Compacts this ByteBuffer to be backed by a ByteBuffer#buffer of its contents' length. Contents are the bytes between ByteBuffer#offset and ByteBuffer#limit. Will set offset = 0 and limit = capacity and adapt ByteBuffer#markedOffset to the same relative position if set.
*/
compact(begin?: number, end?: number): this;
/**
* Creates a copy of this ByteBuffer's contents. Contents are the bytes between ByteBuffer#offset and ByteBuffer#limit.
*/
copy(begin?: number, end?: number): ByteBuffer;
/**
* Copies this ByteBuffer's contents to another ByteBuffer. Contents are the bytes between ByteBuffer#offset and ByteBuffer#limit.
*/
copyTo(target: ByteBuffer, targetOffset?: number, sourceOffset?: number, sourceLimit?: number): this;
/**
* Makes sure that this ByteBuffer is backed by a ByteBuffer#buffer of at least the specified capacity. If the current capacity is exceeded, it will be doubled. If double the current capacity is less than the required capacity, the required capacity will be used instead.
*/
ensureCapacity(capacity: number): this;
/**
* Overwrites this ByteBuffer's contents with the specified value. Contents are the bytes between ByteBuffer#offset and ByteBuffer#limit.
*/
fill(value: number | string, begin?: number, end?: number): this;
/**
* Makes this ByteBuffer ready for a new sequence of write or relative read operations. Sets limit = offset and offset = 0. Make sure always to flip a ByteBuffer when all relative read or write operations are complete.
*/
flip(): this;
/**
* Marks an offset on this ByteBuffer to be used later.
*/
mark(offset?: number): this;
/**
* Sets the byte order.
*/
order(littleEndian: boolean): this;
/**
* Prepends some data to this ByteBuffer. This will overwrite any contents before the specified offset up to the prepended data's length. If there is not enough space available before the specified offset, the backing buffer will be resized and its contents moved accordingly.
*/
prepend(
source: ByteBuffer | string | ArrayBuffer | Buffer,
encoding?: string | number,
offset?: number,
): this;
/**
* Prepends this ByteBuffer to another ByteBuffer. This will overwrite any contents before the specified offset up to the prepended data's length. If there is not enough space available before the specified offset, the backing buffer will be resized and its contents moved accordingly.
*/
prependTo(target: ByteBuffer, offset?: number): this;
/**
* Prints debug information about this ByteBuffer's contents.
*/
printDebug(out?: (text: string) => void): void;
/**
* Reads an 8bit signed integer. This is an alias of ByteBuffer#readInt8.
*/
readByte(offset?: number): number;
/**
* Reads the specified number of bytes
*/
readBytes(length: number, offset?: number): ByteBuffer;
/**
* Reads a NULL-terminated UTF8 encoded string. For this to work the string read must not contain any NULL characters itself.
*/
readCString(): string;
readCString(offset: number): { string: string; length: number };
/**
* Reads a 64bit float. This is an alias of ByteBuffer#readFloat64.
*/
readDouble(offset?: number): number;
/**
* Reads a 32bit float. This is an alias of ByteBuffer#readFloat32.
*/
readFloat(offset?: number): number;
/**
* Reads a 32bit float.
*/
readFloat32(offset?: number): number;
/**
* Reads a 64bit float.
*/
readFloat64(offset?: number): number;
/**
* Reads a length as uint32 prefixed UTF8 encoded string.
*/
readIString(): string;
readIString(offset: number): { string: string; length: number };
/**
* Reads a 32bit signed integer.This is an alias of ByteBuffer#readInt32.
*/
readInt(offset?: number): number;
/**
* Reads a 16bit signed integer.
*/
readInt16(offset?: number): number;
/**
* Reads a 32bit signed integer.
*/
readInt32(offset?: number): number;
/**
* Reads a 64bit signed integer.
*/
readInt64(offset?: number): Long;
/**
* Reads an 8bit signed integer.
*/
readInt8(offset?: number): number;
/**
* Reads a 64bit signed integer. This is an alias of ByteBuffer#readInt64.
*/
readLong(offset?: number): Long;
/**
* Reads a 16bit signed integer. This is an alias of ByteBuffer#readInt16.
*/
readShort(offset?: number): number;
/**
* Reads an UTF8 encoded string. This is an alias of ByteBuffer#readUTF8String.
*/
readString(length: number, metrics?: number): string;
readString(length: number, metrics: number, offset: number): { string: string; length: number };
/**
* Reads an UTF8 encoded string.
*/
readUTF8String(chars: number, metrics?: number): string;
readUTF8String(chars: number, metrics: number, offset: number): { string: string; length: number };
/**
* Reads a 16bit unsigned integer.
*/
readUint16(offset?: number): number;
/**
* Reads a 32bit unsigned integer.
*/
readUint32(offset?: number): number;
/**
* Reads a 64bit unsigned integer.
*/
readUint64(offset?: number): Long;
/**
* Reads an 8bit unsigned integer.
*/
readUint8(offset?: number): number;
/**
* Reads a length as varint32 prefixed UTF8 encoded string.
*/
readVString(): string;
readVString(offset: number): { string: string; length: number };
/**
* Reads a 32bit base 128 variable-length integer.
*/
readVarint32(): number;
readVarint32(offset: number): { value: number; length: number };
/**
* Reads a zig-zag encoded 32bit base 128 variable-length integer.
*/
readVarint32ZigZag(): number;
readVarint32ZigZag(offset: number): { value: number; length: number };
/**
* Reads a 64bit base 128 variable-length integer. Requires Long.js.
*/
readVarint64(): Long;
readVarint64(offset: number): { value: Long; length: number };
/**
* Reads a zig-zag encoded 64bit base 128 variable-length integer. Requires Long.js.
*/
readVarint64ZigZag(): Long;
readVarint64ZigZag(offset: number): { value: Long; length: number };
/**
* Gets the number of remaining readable bytes. Contents are the bytes between ByteBuffer#offset and ByteBuffer#limit, so this returns limit - offset.
*/
remaining(): number;
/**
* Resets this ByteBuffer's ByteBuffer#offset. If an offset has been marked through ByteBuffer#mark before, offset will be set to ByteBuffer#markedOffset, which will then be discarded. If no offset has been marked, sets offset = 0.
*/
reset(): this;
/**
* Resizes this ByteBuffer to be backed by a buffer of at least the given capacity. Will do nothing if already that large or larger.
*/
resize(capacity: number): this;
/**
* Reverses this ByteBuffer's contents
*/
reverse(begin?: number, end?: number): this;
/**
* Skips the next length bytes. This will just advance
*/
skip(length: number): this;
/**
* Slices this ByteBuffer by creating a cloned instance with offset = begin and limit = end.
*/
slice(begin?: number, end?: number): ByteBuffer;
/**
* Returns a raw buffer compacted to contain this ByteBuffer's contents. Contents are the bytes between ByteBuffer#offset and ByteBuffer#limit. Will transparently ByteBuffer#flip this ByteBuffer if offset > limit but the actual offsets remain untouched. This is an alias of ByteBuffer#toBuffer.
*/
toArrayBuffer(forceCopy?: boolean): ArrayBuffer;
/**
* Encodes this ByteBuffer's contents to a base64 encoded string.
*/
toBase64(begin?: number, end?: number): string;
/**
* Encodes this ByteBuffer to a binary encoded string, that is using only characters 0x00-0xFF as bytes.
*/
toBinary(begin?: number, end?: number): string;
/**
* Returns a copy of the backing buffer that contains this ByteBuffer's contents. Contents are the bytes between ByteBuffer#offset and ByteBuffer#limit. Will transparently ByteBuffer#flip this ByteBuffer if offset > limit but the actual offsets remain untouched.
*/
toBuffer(forceCopy?: boolean): Buffer;
/**
*Encodes this ByteBuffer to a hex encoded string with marked offsets. Offset symbols are:
* < : offset,
* ' : markedOffset,
* > : limit,
* | : offset and limit,
* [ : offset and markedOffset,
* ] : markedOffset and limit,
* ! : offset, markedOffset and limit
*/
toDebug(columns?: boolean): string | Array<string>;
/**
* Encodes this ByteBuffer's contents to a hex encoded string.
*/
toHex(begin?: number, end?: number): string;
/**
* Converts the ByteBuffer's contents to a string.
*/
toString(encoding?: string): string;
/**
* Encodes this ByteBuffer's contents between ByteBuffer#offset and ByteBuffer#limit to an UTF8 encoded string.
*/
toUTF8(): string;
/**
* Writes an 8bit signed integer. This is an alias of ByteBuffer#writeInt8.
*/
writeByte(value: number, offset?: number): this;
/**
* Writes an array of bytes. This is an alias for append
*/
writeBytes(
source: ByteBuffer | Buffer | ArrayBuffer | Uint8Array | string,
encoding?: string | number,
offset?: number,
): this;
/**
* Writes a NULL-terminated UTF8 encoded string. For this to work the specified string must not contain any NULL characters itself.
*/
writeCString(str: string, offset?: number): this;
/**
* Writes a 64bit float. This is an alias of ByteBuffer#writeFloat64.
*/
writeDouble(value: number, offset?: number): this;
/**
* Writes a 32bit float. This is an alias of ByteBuffer#writeFloat32.
*/
writeFloat(value: number, offset?: number): this;
/**
* Writes a 32bit float.
*/
writeFloat32(value: number, offset?: number): this;
/**
* Writes a 64bit float.
*/
writeFloat64(value: number, offset?: number): this;
/**
* Writes a length as uint32 prefixed UTF8 encoded string.
*/
writeIString(str: string, offset?: number): this;
/**
* Writes a 32bit signed integer. This is an alias of ByteBuffer#writeInt32.
*/
writeInt(value: number, offset?: number): this;
/**
* Writes a 16bit signed integer.
*/
writeInt16(value: number, offset?: number): this;
/**
* Writes a 32bit signed integer.
*/
writeInt32(value: number, offset?: number): this;
/**
* Writes a 64bit signed integer.
*/
writeInt64(value: number | Long, offset?: number): this;
/**
* Writes an 8bit signed integer.
*/
writeInt8(value: number, offset?: number): this;
/**
* Write a 64bit signed integer. This is an alias of ByteBuffer#writeInt64.
*/
writeLong(value: number | Long, offset?: number): this;
/**
* Writes a 16bit signed integer. This is an alias of ByteBuffer#writeInt16.
*/
writeShort(value: number, offset?: number): this;
/**
* Writes an UTF8 encoded string. This is an alias of ByteBuffer#writeUTF8String.
*/
writeString(str: string): this;
writeString(str: string, offset: number): number;
/**
* Writes an UTF8 encoded string.
*/
writeUTF8String(str: string): this;
writeUTF8String(str: string, offset?: number): number;
/**
* Writes a 16bit unsigned integer.
*/
writeUint16(value: number, offset?: number): this;
/**
* Writes a 32bit unsigned integer.
*/
writeUint32(value: number, offset?: number): this;
/**
* Writes a 64bit unsigned integer.
*/
writeUint64(value: number | Long, offset?: number): this;
/**
* Writes an 8bit unsigned integer.
*/
writeUint8(value: number, offset?: number): this;
/**
* Writes a length as varint32 prefixed UTF8 encoded string.
*/
writeVString(str: string): this;
writeVString(str: string, offset: number): number;
/**
* Writes a 32bit base 128 variable-length integer.
*/
writeVarint32(value: number): this;
writeVarint32(value: number, offset: number): number;
/**
* Writes a zig-zag encoded 32bit base 128 variable-length integer.
*/
writeVarint32ZigZag(value: number): this;
writeVarint32ZigZag(value: number, offset: number): number;
/**
* Writes a 64bit base 128 variable-length integer.
*/
writeVarint64(value: number | Long): this;
writeVarint64(value: number | Long, offset: number): number;
/**
* Writes a zig-zag encoded 64bit base 128 variable-length integer.
*/
writeVarint64ZigZag(value: number | Long): this;
writeVarint64ZigZag(value: number | Long, offset: number): number;
} | litteEndian?: boolean, |
monitor_manager.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package monitor
import (
"encoding/json"
"fmt"
"sort"
"strings"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
v1alpha1validation "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1/validation"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/features"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/manager/member"
"github.com/pingcap/tidb-operator/pkg/manager/meta"
"github.com/pingcap/tidb-operator/pkg/monitor"
"github.com/pingcap/tidb-operator/pkg/util"
utildiscovery "github.com/pingcap/tidb-operator/pkg/util/discovery"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/discovery"
discoverycachedmemory "k8s.io/client-go/discovery/cached/memory"
"k8s.io/klog"
)
type MonitorManager struct {
deps *controller.Dependencies
pvManager monitor.MonitorManager
discoveryInterface discovery.CachedDiscoveryInterface
}
const (
FailedSync = "FailedSync"
SuccessSync = "SuccessSync"
)
func | (deps *controller.Dependencies) *MonitorManager {
return &MonitorManager{
deps: deps,
pvManager: meta.NewReclaimPolicyManager(deps),
discoveryInterface: discoverycachedmemory.NewMemCacheClient(deps.KubeClientset.Discovery()),
}
}
func (m *MonitorManager) SyncMonitor(monitor *v1alpha1.TidbMonitor) error {
if monitor.DeletionTimestamp != nil {
return nil
}
if monitor.Spec.Clusters == nil || len(monitor.Spec.Clusters) < 1 {
klog.Errorf("tm[%s/%s] does not configure the target tidbcluster", monitor.Namespace, monitor.Name)
return nil
}
defaultTidbMonitor(monitor)
if !m.validate(monitor) {
return nil // fatal error, no need to retry on invalid object
}
var firstTc *v1alpha1.TidbCluster
assetStore := NewStore(m.deps.SecretLister)
for _, tcRef := range monitor.Spec.Clusters {
tc, err := m.deps.TiDBClusterLister.TidbClusters(tcRef.Namespace).Get(tcRef.Name)
if err != nil {
rerr := fmt.Errorf("get tm[%s/%s]'s target tc[%s/%s] failed, err: %v", monitor.Namespace, monitor.Name, tcRef.Namespace, tcRef.Name, err)
return rerr
}
// If cluster enable tls
if tc.IsTLSClusterEnabled() {
tcTlsSecretName := util.ClusterClientTLSSecretName(tc.Name)
err := assetStore.addTLSAssets(tc.Namespace, tcTlsSecretName)
if err != nil {
return err
}
}
if firstTc == nil && !tc.IsHeterogeneous() {
firstTc = tc
}
if tc.Status.Monitor != nil {
if tc.Status.Monitor.Name != monitor.Name || tc.Status.Monitor.Namespace != monitor.Namespace {
err := fmt.Errorf("tm[%s/%s]'s target tc[%s/%s] already referenced by TidbMonitor [%s/%s]", monitor.Namespace, monitor.Name, tc.Namespace, tc.Name, tc.Status.Monitor.Namespace, tc.Status.Monitor.Name)
m.deps.Recorder.Event(monitor, corev1.EventTypeWarning, FailedSync, err.Error())
return err
}
}
// TODO: Support validating webhook that forbids the tidbmonitor to update the monitorRef for the tidbcluster whose monitorRef has already
// been set by another TidbMonitor.
// Patch tidbcluster status first to avoid multi tidbmonitor monitoring the same tidbcluster
if !tc.IsHeterogeneous() {
if err := m.patchTidbClusterStatus(tc, monitor); err != nil {
message := fmt.Sprintf("Sync TidbMonitorRef into targetCluster[%s/%s] status failed, err:%v", tc.Namespace, tc.Name, err)
klog.Error(message)
m.deps.Recorder.Event(monitor, corev1.EventTypeWarning, FailedSync, err.Error())
return err
}
}
}
// create or update tls asset secret
err := m.syncAssetSecret(monitor, assetStore)
if err != nil {
return err
}
var firstDc *v1alpha1.DMCluster
if monitor.Spec.DM != nil {
for _, dcRef := range monitor.Spec.DM.Clusters {
dc, err := m.deps.DMClusterLister.DMClusters(dcRef.Namespace).Get(dcRef.Name)
if err != nil {
rerr := fmt.Errorf("get tm[%s/%s]'s target dc[%s/%s] failed, err: %v", monitor.Namespace, monitor.Name, dcRef.Namespace, dcRef.Name, err)
return rerr
} else {
firstDc = dc
break
}
}
}
// Sync Service
if err := m.syncTidbMonitorService(monitor); err != nil {
message := fmt.Sprintf("Sync TidbMonitor[%s/%s] Service failed, err: %v", monitor.Namespace, monitor.Name, err)
m.deps.Recorder.Event(monitor, corev1.EventTypeWarning, FailedSync, message)
return err
}
klog.V(4).Infof("tm[%s/%s]'s service synced", monitor.Namespace, monitor.Name)
// Sync Statefulset
if err := m.syncTidbMonitorStatefulset(firstTc, firstDc, monitor); err != nil {
message := fmt.Sprintf("Sync TidbMonitor[%s/%s] Deployment failed,err:%v", monitor.Namespace, monitor.Name, err)
m.deps.Recorder.Event(monitor, corev1.EventTypeWarning, FailedSync, message)
return err
}
// Sync PV
if monitor.Spec.Persistent {
// syncing all PVs managed by this tidbmonitor
if err := m.pvManager.SyncMonitor(monitor); err != nil {
return err
}
if err := m.syncTidbMonitorPV(monitor); err != nil {
return err
}
klog.V(4).Infof("tm[%s/%s]'s pv synced", monitor.Namespace, monitor.Name)
}
klog.V(4).Infof("tm[%s/%s]'s StatefulSet synced", monitor.Namespace, monitor.Name)
// Sync Ingress
if err := m.syncIngress(monitor); err != nil {
message := fmt.Sprintf("Sync TidbMonitor[%s/%s] Ingress failed,err:%v", monitor.Namespace, monitor.Name, err)
m.deps.Recorder.Event(monitor, corev1.EventTypeWarning, FailedSync, message)
return err
}
klog.V(4).Infof("tm[%s/%s]'s ingress synced", monitor.Namespace, monitor.Name)
return nil
}
func (m *MonitorManager) syncTidbMonitorService(monitor *v1alpha1.TidbMonitor) error {
services := getMonitorService(monitor)
for _, newSvc := range services {
if err := member.CreateOrUpdateService(m.deps.ServiceLister, m.deps.ServiceControl, newSvc, monitor); err != nil {
return err
}
}
return nil
}
func (m *MonitorManager) syncTidbMonitorStatefulset(tc *v1alpha1.TidbCluster, dc *v1alpha1.DMCluster, monitor *v1alpha1.TidbMonitor) error {
ns := monitor.Namespace
name := monitor.Name
cm, err := m.syncTidbMonitorConfig(dc, monitor)
if err != nil {
klog.Errorf("tm[%s/%s]'s configmap failed to sync,err: %v", ns, name, err)
return err
}
secret, err := m.syncTidbMonitorSecret(monitor)
if err != nil {
klog.Errorf("tm[%s/%s]'s secret failed to sync,err: %v", ns, name, err)
return err
}
sa, err := m.syncTidbMonitorRbac(monitor)
if err != nil {
klog.Errorf("tm[%s/%s]'s rbac failed to sync,err: %v", ns, name, err)
return err
}
result, err := m.smoothMigrationToStatefulSet(monitor)
if err != nil {
klog.Errorf("Fail to migrate from deployment to statefulset for tm [%s/%s], err: %v", ns, name, err)
return err
}
if !result {
klog.Infof("Wait for the smooth migration to be done successfully for tm [%s/%s]", ns, name)
return nil
}
newMonitorSts, err := getMonitorStatefulSet(sa, cm, secret, monitor, tc, dc)
if err != nil {
klog.Errorf("Fail to generate statefulset for tm [%s/%s], err: %v", ns, name, err)
return err
}
oldMonitorSetTmp, err := m.deps.StatefulSetLister.StatefulSets(ns).Get(GetMonitorObjectName(monitor))
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("syncTidbMonitorStatefulset: fail to get sts %s for cluster %s/%s, error: %s", GetMonitorObjectName(monitor), ns, name, err)
}
setNotExist := errors.IsNotFound(err)
if setNotExist {
err = member.SetStatefulSetLastAppliedConfigAnnotation(newMonitorSts)
if err != nil {
return err
}
if err := m.deps.StatefulSetControl.CreateStatefulSet(monitor, newMonitorSts); err != nil {
return err
}
return controller.RequeueErrorf("TidbMonitor: [%s/%s], waiting for tidbmonitor running", ns, name)
}
return member.UpdateStatefulSet(m.deps.StatefulSetControl, monitor, newMonitorSts, oldMonitorSetTmp)
}
func (m *MonitorManager) syncTidbMonitorSecret(monitor *v1alpha1.TidbMonitor) (*corev1.Secret, error) {
if monitor.Spec.Grafana == nil {
return nil, nil
}
newSt := getMonitorSecret(monitor)
return m.deps.TypedControl.CreateOrUpdateSecret(monitor, newSt)
}
func (m *MonitorManager) syncTidbMonitorConfig(dc *v1alpha1.DMCluster, monitor *v1alpha1.TidbMonitor) (*corev1.ConfigMap, error) {
if features.DefaultFeatureGate.Enabled(features.AutoScaling) {
// TODO: We need to update the status to tell users we are monitoring extra clusters
// Get all autoscaling clusters for TC, and add them to .Spec.Clusters to
// generate Prometheus config without modifying the original TidbMonitor
cloned := monitor.DeepCopy()
autoTcRefs := []v1alpha1.TidbClusterRef{}
for _, tcRef := range monitor.Spec.Clusters {
r1, err := labels.NewRequirement(label.AutoInstanceLabelKey, selection.Exists, nil)
if err != nil {
klog.Errorf("tm[%s/%s] gets tc[%s/%s]'s autoscaling clusters failed, err: %v", monitor.Namespace, monitor.Name, tcRef.Namespace, tcRef.Name, err)
continue
}
r2, err := labels.NewRequirement(label.BaseTCLabelKey, selection.Equals, []string{tcRef.Name})
if err != nil {
klog.Errorf("tm[%s/%s] gets tc[%s/%s]'s autoscaling clusters failed, err: %v", monitor.Namespace, monitor.Name, tcRef.Namespace, tcRef.Name, err)
continue
}
selector := labels.NewSelector().Add(*r1).Add(*r2)
tcList, err := m.deps.TiDBClusterLister.TidbClusters(tcRef.Namespace).List(selector)
if err != nil {
klog.Errorf("tm[%s/%s] gets tc[%s/%s]'s autoscaling clusters failed, err: %v", monitor.Namespace, monitor.Name, tcRef.Namespace, tcRef.Name, err)
continue
}
for _, autoTc := range tcList {
autoTcRefs = append(autoTcRefs, v1alpha1.TidbClusterRef{
Name: autoTc.Name,
Namespace: autoTc.Namespace,
})
}
}
// Sort Autoscaling TC for stability
sort.Slice(autoTcRefs, func(i, j int) bool {
cmpNS := strings.Compare(autoTcRefs[i].Namespace, autoTcRefs[j].Namespace)
if cmpNS == 0 {
return strings.Compare(autoTcRefs[i].Name, autoTcRefs[j].Name) < 0
}
return cmpNS < 0
})
cloned.Spec.Clusters = append(cloned.Spec.Clusters, autoTcRefs...)
monitor = cloned
}
var monitorClusterInfos []ClusterRegexInfo
for _, tcRef := range monitor.Spec.Clusters {
tc, err := m.deps.TiDBClusterLister.TidbClusters(tcRef.Namespace).Get(tcRef.Name)
if err != nil {
rerr := fmt.Errorf("get tm[%s/%s]'s target tc[%s/%s] failed, err: %v", monitor.Namespace, monitor.Name, tcRef.Namespace, tcRef.Name, err)
return nil, rerr
}
clusterRegex := ClusterRegexInfo{
Name: tcRef.Name,
Namespace: tcRef.Namespace,
}
// If cluster enable tls
if tc.IsTLSClusterEnabled() {
clusterRegex.enableTLS = true
}
monitorClusterInfos = append(monitorClusterInfos, clusterRegex)
}
newCM, err := getMonitorConfigMap(dc, monitor, monitorClusterInfos)
if err != nil {
return nil, err
}
config := monitor.Spec.Prometheus.Config
if config != nil && config.ConfigMapRef != nil && len(config.ConfigMapRef.Name) > 0 {
namespace := monitor.Namespace
if config.ConfigMapRef.Namespace != nil {
namespace = *config.ConfigMapRef.Namespace
}
externalCM, err := m.deps.ConfigMapControl.GetConfigMap(monitor, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: config.ConfigMapRef.Name,
Namespace: namespace,
},
})
if err != nil {
klog.Errorf("tm[%s/%s]'s configMap failed to get,err: %v", namespace, config.ConfigMapRef.Name, err)
return nil, err
}
if externalContent, ok := externalCM.Data["prometheus-config"]; ok {
newCM.Data["prometheus-config"] = externalContent
}
}
return m.deps.TypedControl.CreateOrUpdateConfigMap(monitor, newCM)
}
func (m *MonitorManager) syncTidbMonitorRbac(monitor *v1alpha1.TidbMonitor) (*corev1.ServiceAccount, error) {
sa := getMonitorServiceAccount(monitor)
sa, err := m.deps.TypedControl.CreateOrUpdateServiceAccount(monitor, sa)
if err != nil {
klog.Errorf("tm[%s/%s]'s serviceaccount failed to sync,err: %v", monitor.Namespace, monitor.Name, err)
return nil, err
}
policyRules := []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"pods"},
Verbs: []string{"get", "list", "watch"},
},
}
if supported, err := utildiscovery.IsAPIGroupVersionSupported(m.discoveryInterface, "security.openshift.io/v1"); err != nil {
return nil, err
} else if supported {
// We must use 'anyuid' SecurityContextConstraint to run our container as root.
// https://docs.openshift.com/container-platform/4.3/authentication/managing-security-context-constraints.html
policyRules = append(policyRules, rbac.PolicyRule{
APIGroups: []string{"security.openshift.io"},
ResourceNames: []string{"anyuid"},
Resources: []string{"securitycontextconstraints"},
Verbs: []string{"use"},
})
}
if monitor.Spec.ClusterScoped {
role := getMonitorClusterRole(monitor, policyRules)
role, err = m.deps.TypedControl.CreateOrUpdateClusterRole(monitor, role)
if err != nil {
klog.Errorf("tm[%s/%s]'s clusterrole failed to sync, err: %v", monitor.Namespace, monitor.Name, err)
return nil, err
}
rb := getMonitorClusterRoleBinding(sa, role, monitor)
_, err = m.deps.TypedControl.CreateOrUpdateClusterRoleBinding(monitor, rb)
if err != nil {
klog.Errorf("tm[%s/%s]'s clusterrolebinding failed to sync, err: %v", monitor.Namespace, monitor.Name, err)
return nil, err
}
} else {
role := getMonitorRole(monitor, policyRules)
role, err = m.deps.TypedControl.CreateOrUpdateRole(monitor, role)
if err != nil {
klog.Errorf("tm[%s/%s]'s role failed to sync,err: %v", monitor.Namespace, monitor.Name, err)
return nil, err
}
rb := getMonitorRoleBinding(sa, role, monitor)
_, err = m.deps.TypedControl.CreateOrUpdateRoleBinding(monitor, rb)
if err != nil {
klog.Errorf("tm[%s/%s]'s rolebinding failed to sync,err: %v", monitor.Namespace, monitor.Name, err)
return nil, err
}
}
return sa, nil
}
func (m *MonitorManager) syncIngress(monitor *v1alpha1.TidbMonitor) error {
if err := m.syncPrometheusIngress(monitor); err != nil {
return err
}
return m.syncGrafanaIngress(monitor)
}
func (m *MonitorManager) syncPrometheusIngress(monitor *v1alpha1.TidbMonitor) error {
if monitor.Spec.Prometheus.Ingress == nil {
return m.removeIngressIfExist(monitor, prometheusName(monitor))
}
ingress := getPrometheusIngress(monitor)
_, err := m.deps.TypedControl.CreateOrUpdateIngress(monitor, ingress)
return err
}
func (m *MonitorManager) syncGrafanaIngress(monitor *v1alpha1.TidbMonitor) error {
if monitor.Spec.Grafana == nil || monitor.Spec.Grafana.Ingress == nil {
return m.removeIngressIfExist(monitor, grafanaName(monitor))
}
ingress := getGrafanaIngress(monitor)
_, err := m.deps.TypedControl.CreateOrUpdateIngress(monitor, ingress)
return err
}
// removeIngressIfExist removes Ingress if it exists
func (m *MonitorManager) removeIngressIfExist(monitor *v1alpha1.TidbMonitor, name string) error {
ingress, err := m.deps.IngressLister.Ingresses(monitor.Namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
return m.deps.TypedControl.Delete(monitor, ingress)
}
func (m *MonitorManager) patchTidbClusterStatus(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor) error {
var mergePatch []byte
var err error
grafanaEnabled := true
if monitor.Spec.Grafana == nil {
grafanaEnabled = false
}
mergePatch, err = json.Marshal(map[string]interface{}{
"status": map[string]interface{}{
"monitor": map[string]interface{}{
"name": monitor.Name,
"namespace": monitor.Namespace,
"grafanaEnabled": grafanaEnabled,
},
},
})
if err != nil {
return err
}
_, err = m.deps.TiDBClusterControl.Patch(tc, mergePatch)
return err
}
func (m *MonitorManager) smoothMigrationToStatefulSet(monitor *v1alpha1.TidbMonitor) (bool, error) {
if m.deps.PVLister == nil {
klog.V(4).Infof("Persistent volumes lister is unavailable, skip migrating to statefulset for tm[%s/%s]. This may be caused by no relevant permissions",
monitor.Namespace, monitor.Name)
return true, nil
}
// determine whether there is an old deployment
oldDeploymentName := GetMonitorObjectName(monitor)
oldDeployment, err := m.deps.DeploymentLister.Deployments(monitor.Namespace).Get(oldDeploymentName)
if err == nil {
klog.Infof("The old deployment exists, start smooth migration for tm [%s/%s]", monitor.Namespace, monitor.Name)
// if deployment exist, delete it and wait next reconcile.
err = m.deps.TypedControl.Delete(monitor, &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: oldDeployment.Name,
Namespace: oldDeployment.Namespace,
},
})
if err != nil {
klog.Errorf("Smooth migration for tm[%s/%s], fail to delete the old deployment, err: %v", monitor.Namespace, monitor.Name, err)
return false, err
}
// If enable persistent,operator need to migrate pvc and pv binding relationship.
return !monitor.Spec.Persistent, nil
}
if !errors.IsNotFound(err) {
klog.Errorf("Fail to get deployment for tm [%s/%s], err: %v", monitor.Namespace, monitor.Name, err)
return false, err
}
if !monitor.Spec.Persistent {
return true, nil
}
firstStsPvcName := GetMonitorFirstPVCName(monitor.Name)
deploymentPvcName := GetMonitorObjectName(monitor)
deploymentPvc, err := m.deps.PVCLister.PersistentVolumeClaims(monitor.Namespace).Get(deploymentPvcName)
if err != nil {
if !errors.IsNotFound(err) {
klog.Errorf("Smooth migration for tm[%s/%s], get the PVC of the deployment error: %v", monitor.Namespace, monitor.Name, err)
return false, err
}
// If the PVC of the deployment does not exist and no old PV status, we don't need to migrate.
if monitor.Status.DeploymentStorageStatus == nil || len(monitor.Status.DeploymentStorageStatus.PvName) <= 0 {
return true, nil
}
deploymentPv, err := m.deps.PVLister.Get(monitor.Status.DeploymentStorageStatus.PvName)
if err != nil {
klog.Errorf("Smooth migration for tm[%s/%s], fail to patch PV %s, err: %v", monitor.Namespace, monitor.Name, monitor.Status.DeploymentStorageStatus.PvName, err)
return false, err
}
if deploymentPv.Spec.ClaimRef != nil && deploymentPv.Spec.ClaimRef.Name == firstStsPvcName {
// smooth migration successfully and clean status
monitor.Status.DeploymentStorageStatus = nil
return true, nil
}
err = m.patchPVClaimRef(deploymentPv, firstStsPvcName, monitor)
if err != nil {
klog.Errorf("Smooth migration for tm[%s/%s], fail to patch PV %s, err: %v", monitor.Namespace, monitor.Name, monitor.Status.DeploymentStorageStatus.PvName, err)
return false, err
}
// smooth migration successfully and clean status
monitor.Status.DeploymentStorageStatus = nil
return true, nil
}
if len(deploymentPvc.Spec.VolumeName) <= 0 {
klog.Infof("Smooth migration for tm[%s/%s], old pvc not bind pv and continue create statefulset", monitor.Namespace, monitor.Name)
return true, nil
}
deploymentPv, err := m.deps.PVLister.Get(deploymentPvc.Spec.VolumeName)
if err != nil {
klog.Errorf("Smooth migration for tm[%s/%s], fail to get PV %s, err: %v", monitor.Namespace, monitor.Name, deploymentPvc.Spec.VolumeName, err)
return false, err
}
if deploymentPv.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimDelete {
klog.Errorf("Smooth migration for tm[%s/%s], pv[%s] policy is delete, it must be retain", monitor.Namespace, monitor.Name, deploymentPvc.Spec.VolumeName)
return false, err
}
if deploymentPv.Spec.ClaimRef != nil && deploymentPv.Spec.ClaimRef.Name == firstStsPvcName {
// smooth migration successfully and clean status
monitor.Status.DeploymentStorageStatus = nil
return true, nil
}
// firstly patch status
if monitor.Status.DeploymentStorageStatus == nil {
monitor.Status.DeploymentStorageStatus = &v1alpha1.DeploymentStorageStatus{
PvName: deploymentPvc.Spec.VolumeName,
}
return false, controller.RequeueErrorf("TidbMonitor: [%s/%s] update deploymentStorageStatus requeue", monitor.Namespace, monitor.Name)
//monitor patch status successfully
}
err = m.deps.PVCControl.DeletePVC(monitor, &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentPvcName,
Namespace: monitor.Namespace,
},
})
if err != nil {
klog.Errorf("Fail to delete the PVC %s for tm [%s/%s], err: %v", deploymentPvcName, monitor.Namespace, monitor.Name, err)
return false, err
}
err = m.patchPVClaimRef(deploymentPv, firstStsPvcName, monitor)
if err != nil {
klog.Errorf("Smooth migration for tm[%s/%s], fail to patch PV %s, err: %v", monitor.Namespace, monitor.Name, deploymentPvc.Spec.VolumeName, err)
return false, err
}
// smooth migration successfully and clean status
monitor.Status.DeploymentStorageStatus = nil
return true, nil
}
func (c *MonitorManager) validate(tidbmonitor *v1alpha1.TidbMonitor) bool {
errs := v1alpha1validation.ValidateTidbMonitor(tidbmonitor)
if len(errs) > 0 {
aggregatedErr := errs.ToAggregate()
klog.Errorf("tidbmonitor %s/%s is not valid and must be fixed first, aggregated error: %v", tidbmonitor.GetNamespace(), tidbmonitor.GetName(), aggregatedErr)
c.deps.Recorder.Event(tidbmonitor, corev1.EventTypeWarning, "FailedValidation", aggregatedErr.Error())
return false
}
return true
}
func (m *MonitorManager) syncTidbMonitorPV(tm *v1alpha1.TidbMonitor) error {
ns := tm.GetNamespace()
instanceName := tm.Name
if m.deps.PVLister == nil {
klog.V(4).Infof("Persistent volumes lister is unavailable, skip syncing TidbMonitor %s/%s PVs. This may be caused by no relevant permissions", ns, instanceName)
return nil
}
l, err := label.NewMonitor().Instance(instanceName).Monitor().Selector()
if err != nil {
return err
}
pods, err := m.deps.PodLister.Pods(ns).List(l)
if err != nil {
return fmt.Errorf("fail to list pods for tidbmonitor %s/%s, selector: %s, error: %v", ns, instanceName, l, err)
}
for _, pod := range pods {
// update meta info for pvc
pvcs, err := util.ResolvePVCFromPod(pod, m.deps.PVCLister)
if err != nil {
return err
}
for _, pvc := range pvcs {
if pvc.Spec.VolumeName == "" {
continue
}
// update meta info for pv
pv, err := m.deps.PVLister.Get(pvc.Spec.VolumeName)
if err != nil {
klog.Errorf("Get PV %s error: %v", pvc.Spec.VolumeName, err)
return err
}
_, err = m.deps.PVControl.UpdateMetaInfo(tm, pv)
if err != nil {
return err
}
}
}
return nil
}
func (m *MonitorManager) patchPVClaimRef(pv *corev1.PersistentVolume, patchPvcName string, monitor *v1alpha1.TidbMonitor) error {
if pv.Spec.ClaimRef == nil {
pv.Spec.ClaimRef = &corev1.ObjectReference{}
}
pv.Spec.ClaimRef.Name = patchPvcName
err := m.deps.PVControl.PatchPVClaimRef(monitor, pv, patchPvcName)
if err != nil {
return err
}
return nil
}
func (m *MonitorManager) syncAssetSecret(monitor *v1alpha1.TidbMonitor, store *Store) error {
ns := monitor.Namespace
name := monitor.Name
tlsAssetsSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: GetTLSAssetsSecretName(monitor.Name),
Namespace: monitor.Namespace,
Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []metav1.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Data: make(map[string][]byte, len(store.TLSAssets)),
}
for key, asset := range store.TLSAssets {
tlsAssetsSecret.Data[key.String()] = []byte(asset)
}
_, err := m.deps.TypedControl.CreateOrUpdateSecret(monitor, tlsAssetsSecret)
if err != nil {
klog.Errorf("Fail to sync tm[%s/%s]'s secret assets, err: %v", ns, name, err)
return err
}
return nil
}
| NewMonitorManager |
App.js | import React from "react";
import { Router } from "react-router-dom";
import { ToastContainer } from "react-toastify";
import GlobalStyles from "./styles/global";
import "bootstrap/dist/css/bootstrap.min.css";
import Routes from "./routes";
// import Header from './components/Header';
import './config/ReactotronConfig';
import store from "./store";
import history from "./services/history"; |
function App() {
return (
<Provider store={store}>
<Router history={history}>
<GlobalStyles />
<ToastContainer autoClose={3000} />
{/* <Header /> */}
<Routes />
</Router>
</Provider>
);
}
export default App; | import { Provider } from "react-redux"; |
test_crop2d.py | from typing import Tuple
import pytest
import kornia as kornia
import kornia.testing as utils # test utils
import torch
from torch.testing import assert_allclose
from torch.autograd import gradcheck
class TestBoundingBoxInferring:
def test_bounding_boxes_dim_inferring(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)
assert (h, w) == (2, 3)
def test_bounding_boxes_dim_inferring_batch(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
], [
[2., 2.],
[4., 2.],
[4., 3.],
[2., 3.],
]], device=device, dtype=dtype)
h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)
assert (h.unique().item(), w.unique().item()) == (2, 3)
def test_gradcheck(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
boxes = utils.tensor_to_gradcheck_var(boxes)
assert gradcheck(kornia.kornia.geometry.transform.crop.infer_box_shape,
(boxes,), raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.geometry.transform.crop.infer_box_shape
op_script = torch.jit.script(op)
# Define input
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
actual = op_script(boxes)
expected = op(boxes)
assert_allclose(actual, expected)
class TestCropAndResize:
def test_align_corners_true(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 2, 3
expected = torch.tensor(
[[[[6.0000, 6.5000, 7.0000],
[10.0000, 10.5000, 11.0000]]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
# default should use align_coners True
patches = kornia.crop_and_resize(inp, boxes, (height, width))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_align_corners_false(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 2, 3
expected = torch.tensor(
[[[[6.7222, 7.1667, 7.6111],
[9.3889, 9.8333, 10.2778]]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
patches = kornia.crop_and_resize(inp, boxes, (height, width), align_corners=False)
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_crop_batch(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]], [[
[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]], [[
[7., 15.],
[8., 16.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
], [
[1., 2.],
[3., 2.],
[3., 3.],
[1., 3.],
]], device=device, dtype=dtype) # 2x4x2
patches = kornia.crop_and_resize(inp, boxes, (2, 2))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_crop_batch_broadcast(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]], [[
[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]], [[
[6., 10.],
[7., 11.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
patches = kornia.crop_and_resize(inp, boxes, (2, 2))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)
img = utils.tensor_to_gradcheck_var(img) # to var
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
boxes = utils.tensor_to_gradcheck_var(boxes, requires_grad=False) # to var
assert gradcheck(kornia.crop_and_resize,
(img, boxes, (4, 2),),
raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.crop_and_resize
op_script = torch.jit.script(op)
# Define input
img = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
crop_height, crop_width = 4, 2
actual = op_script(img, boxes, (crop_height, crop_width))
expected = op(img, boxes, (crop_height, crop_width))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
class TestCenterCrop:
def test_center_crop_h2_w4(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[5., 6., 7., 8.],
[9., 10., 11., 12.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (2, 4))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_center_crop_h4_w2(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 4, 2
expected = torch.tensor([[[
[2., 3.],
[6., 7.],
[10., 11.],
[14., 15.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (height, width))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_center_crop_h4_w2_batch(self, device, dtype):
inp = torch.tensor([
[[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.]]],
[[[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.]]]
], device=device, dtype=dtype)
expected = torch.tensor([[[
[2., 3.],
[6., 7.],
[10., 11.],
[14., 15.],
]], [[
[5., 9.],
[6., 10.],
[7., 11.],
[8., 12.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (4, 2))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.center_crop, (img, (4, 2),), raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.center_crop
op_script = torch.jit.script(op)
# Define input
img = torch.ones(1, 2, 5, 4, device=device, dtype=dtype)
actual = op_script(img, (4, 2))
expected = op(img, (4, 2))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
def test_jit_trace(self, device, dtype):
# Define script
op = kornia.center_crop
op_script = torch.jit.script(op)
# Define input
img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)
op_trace = torch.jit.trace(op_script, (img, (torch.tensor(2), torch.tensor(3))))
img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)
# Run
actual = op_trace(img, (torch.tensor(2), torch.tensor(3)))
expected = op(img, (2, 3))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
class TestCropByBoxes:
def test_crop_by_boxes_no_resizing(self, device, dtype):
|
def test_crop_by_boxes_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
src = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
dst = torch.tensor([[
[0., 0.],
[2., 0.],
[2., 1.],
[0., 1.],
]], device=device, dtype=dtype) # 1x4x2
expected = torch.tensor([[[
[6., 6.5, 7.],
[10., 10.5, 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)
src = torch.tensor([[
[1., 0.],
[2., 0.],
[2., 1.],
[1., 1.]]], device=device, dtype=dtype)
dst = torch.tensor([[
[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.]]], device=device, dtype=dtype)
inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var
assert gradcheck(kornia.geometry.transform.crop.crop_by_boxes,
(inp, src, dst,),
raise_exception=True)
class TestCropByTransform:
def test_crop_by_transform_no_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
transform = torch.tensor([[
[1., 0., -1.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 2))
assert_allclose(patches, expected)
def test_crop_by_boxes_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
transform = torch.tensor([[
[2., 0., -2.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
expected = torch.tensor([[[
[6., 6.5, 7.],
[10., 10.5, 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 3))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)
transform = torch.tensor([[
[2., 0., -2.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var
assert gradcheck(kornia.geometry.transform.crop.crop_by_transform_mat,
(inp, transform, (2, 2),),
raise_exception=True)
| inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
src = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
dst = torch.tensor([[
[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.],
]], device=device, dtype=dtype) # 1x4x2
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)
assert_allclose(patches, expected) |
main.rs | // Oliver Kovacs 2022 - logos - MIT
mod json;
use crate::json::*;
static INTRODUCTION_STR: &str =
"Hello, I'm Logos. I can help you to detect fake news.";
static ENTER_TERM_STR: &str =
"Enter a search term!";
static ENTER_NUMBER_STR: &str =
"Enter a number to learn more about an item or 'done' to search for new terms!";
static SOURCE_STR: &str =
"
I use the Google Fact Check Tools API:
https://toolbox.google.com/factcheck/apis
It intern uses mulitple credible fact-checking websites to provide data.
The source of a specific result is always given and linked.
";
#[derive(PartialEq)]
enum State {
Global,
Claim,
}
struct Bot {
name: String,
state: State,
last_claims: Option<Claims>,
}
impl Bot {
fn conversation(&mut self) {
println!("{}: {}\n{}", self.name, INTRODUCTION_STR, ENTER_TERM_STR);
loop {
self.respond(self.read());
}
}
fn read(&self) -> String {
print!("You: ");
read_line()
}
fn preprocess(&self, input: String) -> String {
input
.replace(|c: char| !c.is_alphanumeric(), "")
.to_lowercase()
}
fn respond(&mut self, input: String) {
let input = self.preprocess(input);
print!("{}: ", self.name);
if self.is_exit(&input) {
println!("Goodbye!");
std::process::exit(0);
}
if self.is_source(&input) {
println!("{}", SOURCE_STR);
return;
}
if self.is_hello(&input) {
println!("Hello! {}", ENTER_TERM_STR);
return;
}
if self.state == State::Claim {
self.respond_claim(input);
return;
}
let claims = check(input);
if let Some(claims_new) = claims.clone().claims {
self.last_claims = Some(claims);
self.state = State::Claim;
self.print_claims(claims_new);
println!("{}\n", ENTER_NUMBER_STR);
return;
}
println!("Sorry, I couldn't find anything :(");
}
fn respond_claim(&mut self, input: String) {
if input == "done" {
self.state = State::Global;
println!("{}", ENTER_TERM_STR);
return;
}
if let Ok(index) = input.parse::<usize>() {
let index = index - 1;
let claims = self
.last_claims
.as_ref()
.unwrap()
.claims
.as_ref()
.unwrap();
if index > claims.len() {
println!("The number is too big.");
}
else {
self.print_claim(claims[index].clone());
}
println!("{}\n", ENTER_NUMBER_STR);
return;
}
println!("Invalid.\n{}\n", ENTER_NUMBER_STR);
}
fn is_exit(&self, input: &String) -> bool {
return match input.as_str() {
"bye"
| "goodbye"
| "exit"
| "quit"
| "q"
=> true,
_ => false,
};
}
fn is_source(&self, input: &String) -> bool {
input.contains("source")
}
fn is_hello(&self, input: &String) -> bool {
input == "hi"
|| input == "hii"
|| input == "hey"
|| input == "hello"
|| input == "hallo"
}
fn print_claims(&self, claims: Vec<Claim>) {
println!("\n\nI found the following entries:\n");
claims
.iter()
.enumerate()
.for_each(|(i, element)| {
println!(
" {: >2} : {} ({})",
i + 1,
element.text,
element.claim_review[0].textual_rating
);
});
println!();
}
fn print_claim(&self, claim: Claim) {
println!("\n\n{}\n", claim.text);
if let Some(claimant) = claim.claimant {
println!(" {: <9}: {}", "Claimant", claimant);
}
if let Some(claim_date) = claim.claim_date {
println!(" {: <9}: {}", "Date", claim_date);
}
println!("\nReviews:\n");
claim.claim_review
.into_iter()
.for_each(|review| {
println!(" {: <15}: {}", "Result", review.textual_rating);
if let Some(title) = review.title {
println!(" {: <15}: {}", "Title", title);
}
println!(" {: <15}: {}", "URL", review.url);
if let Some(name) = review.publisher.name {
println!(" {: <15}: {}", "Publisher Name", name);
}
if let Some(site) = review.publisher.site {
println!(" {: <15}: {}", "Publisher Site", site);
}
println!(" {: <15}: {}\n", "Language", review.language_code);
});
}
}
impl std::default::Default for Bot {
fn | () -> Self {
Bot {
name: "Bot".to_string(),
state: State::Global,
last_claims: None,
}
}
}
fn main() {
let mut bot = Bot::default();
bot.conversation();
}
fn read_line() -> String {
let mut out = String::new();
use std::io::Write;
std::io::stdout()
.flush()
.unwrap();
std::io::stdin()
.read_line(&mut out)
.expect("Did not enter a correct string");
if let Some('\n') = out.chars().next_back() {
out.pop();
}
if let Some('\r') = out.chars().next_back() {
out.pop();
}
out
}
fn check(query: String) -> Claims {
let size = 20;
let key = "AIzaSyA0gsrc5ajXHug_De42hQhxgW9GrxDuZdw";
let url = format!(
"https://factchecktools.googleapis.com\
/v1alpha1/claims:search?query={}&pageSize={}&key={}",
query, size, key
);
let res = reqwest::blocking::get(url)
.unwrap()
.text()
.unwrap();
serde_json::from_str(&res)
.expect("invalid response")
}
| default |
make_post.py | #!/usr/bin/env python
from extensions import *
from config import *
from sets import Set
from re import match
def get_wordnik_json(route, extra_params):
params = {
"limit": 1,
"api_key": wordnik_key
}
params.update(extra_params)
request_json = []
while not request_json:
request_json = get_request_json("https://api.wordnik.com/v4/"+route, params)
return request_json
def post_word(route, word):
|
def post_root_word(post_id, word, definition):
# If the definition matches any of these patterns, post
# the word that is referenced in the definition
for pattern in [s + " ([^ ]*)[.]" for s in [".* form of", ".* participle of", "See", "Variant of", ".*[.] See Synonyms at", "Alternative spelling of", "Relating to", "An abbreviation of", "Common misspelling of", "Of or pertaining to", "Superlative of", "Obsolete spelling of", "Informal", "To", "The act or process of", "One who believes in"]] + ["([^ .]*)[.]?", "Alternative capitalization of ([^ ]*)", "In an? ([^ ]*) manner."]:
reference_word = match("^"+pattern+"$", definition)
if reference_word:
root_word = reference_word.group(1)
# If the definition is a single word, make it lowercase because
# the wordnik API is case sensitive and single word definitions
# may have been capitalized
if pattern == "([^ .]*)[.]?":
root_word = root_word.lower()
# Post the root word and write to the log
post_id, new_definition = post_word(post_id+"/comments", root_word)
write_to_log(posts_log, "Posted comment definition of word '"+root_word+"' on post with definition of '"+word+"'")
# Save off the id of the first posted comment because all subsequent
# comments should be replies to this initial comment
if not post_root_word.comment_id:
post_root_word.comment_id = post_id
# Check the root word's definition for other referenced words
post_root_word(post_root_word.comment_id, root_word, new_definition)
post_root_word.comment_id = None
def main():
# Get a random word that has not been posted yet
posted_words = Set([post["word"] for post in execute_query("select word from Posts")])
while True:
word = get_wordnik_json("words.json/randomWords", {"minLength": 0})[0]["word"]
if word not in posted_words:
break
write_to_log(error_log, "Word: '"+word+"' already posted, posting another...")
# Make a post, insert its data into the database, and log it
post_id, definition = post_word(page_info["page_id"]+"/feed", word)
execute_query("insert into Posts (id, word) values (%s, %s)", (post_id, word))
write_to_log(posts_log, "Finished posting word - "+word)
# If the posted word references a root word, post the
# definition of the root word as a comment
post_root_word(post_id, word, definition)
if __name__ == "__main__":
try:
main()
except Exception as e:
write_to_log(error_log, "Unexpected error caught while making a post: "+str(e))
| word_info = get_wordnik_json("word.json/"+word+"/definitions", {})[0]
definition = word_info["text"]
return post_to_page(route, word+(" - "+word_info["partOfSpeech"] if "partOfSpeech" in word_info else "")+"\n"+definition), definition |
test_wallet_sync.py | # flake8: noqa: F811, F401
import asyncio
import pytest
from colorlog import logging
from scam.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from scam.protocols import full_node_protocol
from scam.simulator.simulator_protocol import FarmNewBlockProtocol
from scam.types.peer_info import PeerInfo
from scam.util.ints import uint16, uint32
from scam.wallet.wallet_state_manager import WalletStateManager
from tests.connection_utils import disconnect_all_and_reconnect
from tests.core.fixtures import default_400_blocks, default_1000_blocks
from tests.setup_nodes import bt, self_hostname, setup_node_and_wallet, setup_simulators_and_wallets, test_constants
from tests.time_out_assert import time_out_assert
def wallet_height_at_least(wallet_node, h):
height = wallet_node.wallet_state_manager.blockchain._peak_height
if height == h:
return True
return False
log = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestWalletSync:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_node_and_wallet(test_constants):
yield _
@pytest.fixture(scope="function")
async def wallet_node_simulator(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def wallet_node_starting_height(self):
async for _ in setup_node_and_wallet(test_constants, starting_height=100):
yield _
@pytest.mark.asyncio
async def test_basic_sync_wallet(self, wallet_node, default_400_blocks):
full_node_api, wallet_node, full_node_server, wallet_server = wallet_node
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
# The second node should eventually catch up to the first one, and have the
# same tip at height num_blocks - 1.
await time_out_assert(100, wallet_height_at_least, True, wallet_node, len(default_400_blocks) - 1)
# Tests a reorg with the wallet
num_blocks = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks, block_list_input=default_400_blocks[:-5])
for i in range(1, len(blocks_reorg)):
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks_reorg[i]))
await disconnect_all_and_reconnect(wallet_server, full_node_server)
await time_out_assert(
100, wallet_height_at_least, True, wallet_node, len(default_400_blocks) + num_blocks - 5 - 1
)
@pytest.mark.asyncio
async def test_backtrack_sync_wallet(self, wallet_node, default_400_blocks):
full_node_api, wallet_node, full_node_server, wallet_server = wallet_node
for block in default_400_blocks[:20]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
# The second node should eventually catch up to the first one, and have the
# same tip at height num_blocks - 1.
await time_out_assert(100, wallet_height_at_least, True, wallet_node, 19)
# Tests a reorg with the wallet
@pytest.mark.asyncio
async def test_short_batch_sync_wallet(self, wallet_node, default_400_blocks):
full_node_api, wallet_node, full_node_server, wallet_server = wallet_node
for block in default_400_blocks[:200]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
# The second node should eventually catch up to the first one, and have the
# same tip at height num_blocks - 1.
await time_out_assert(100, wallet_height_at_least, True, wallet_node, 199)
# Tests a reorg with the wallet
@pytest.mark.asyncio
async def test_long_sync_wallet(self, wallet_node, default_1000_blocks, default_400_blocks):
full_node_api, wallet_node, full_node_server, wallet_server = wallet_node
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
# The second node should eventually catch up to the first one, and have the
# same tip at height num_blocks - 1.
await time_out_assert(600, wallet_height_at_least, True, wallet_node, len(default_400_blocks) - 1)
await disconnect_all_and_reconnect(wallet_server, full_node_server)
# Tests a long reorg
for block in default_1000_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
log.info(f"wallet node height is {wallet_node.wallet_state_manager.blockchain._peak_height}")
await time_out_assert(600, wallet_height_at_least, True, wallet_node, len(default_1000_blocks) - 1)
await disconnect_all_and_reconnect(wallet_server, full_node_server)
# Tests a short reorg
num_blocks = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks, block_list_input=default_1000_blocks[:-5])
for i in range(1, len(blocks_reorg)):
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks_reorg[i]))
await time_out_assert(
600, wallet_height_at_least, True, wallet_node, len(default_1000_blocks) + num_blocks - 5 - 1
)
@pytest.mark.asyncio
async def test_wallet_reorg_sync(self, wallet_node_simulator, default_400_blocks):
num_blocks = 5
full_nodes, wallets = wallet_node_simulator
full_node_api = full_nodes[0]
wallet_node, server_2 = wallets[0]
fn_server = full_node_api.full_node.server
wsm: WalletStateManager = wallet_node.wallet_state_manager
wallet = wsm.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(fn_server._port)), None)
# Insert 400 blocks
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Farm few more with reward
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
# Confirm we have the funds
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
async def get_tx_count(wallet_id):
txs = await wsm.get_all_transactions(wallet_id)
return len(txs)
await time_out_assert(5, get_tx_count, 2 * (num_blocks - 1), 1)
# Reorg blocks that carry reward
num_blocks = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks, block_list_input=default_400_blocks[:-5])
for block in blocks_reorg[-30:]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(5, get_tx_count, 0, 1)
await time_out_assert(5, wallet.get_confirmed_balance, 0)
@pytest.mark.asyncio
async def test_wallet_reorg_get_coinbase(self, wallet_node_simulator, default_400_blocks):
full_nodes, wallets = wallet_node_simulator | wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(fn_server._port)), None)
# Insert 400 blocks
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Reorg blocks that carry reward
num_blocks_reorg = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks_reorg, block_list_input=default_400_blocks[:-5])
for block in blocks_reorg[:-5]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
async def get_tx_count(wallet_id):
txs = await wsm.get_all_transactions(wallet_id)
return len(txs)
await time_out_assert(10, get_tx_count, 0, 1)
num_blocks_reorg_1 = 40
blocks_reorg_1 = bt.get_consecutive_blocks(
1, pool_reward_puzzle_hash=ph, farmer_reward_puzzle_hash=ph, block_list_input=blocks_reorg[:-30]
)
blocks_reorg_2 = bt.get_consecutive_blocks(num_blocks_reorg_1, block_list_input=blocks_reorg_1)
for block in blocks_reorg_2[-41:]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await disconnect_all_and_reconnect(server_2, fn_server)
# Confirm we have the funds
funds = calculate_pool_reward(uint32(len(blocks_reorg_1))) + calculate_base_farmer_reward(
uint32(len(blocks_reorg_1))
)
await time_out_assert(10, get_tx_count, 2, 1)
await time_out_assert(10, wallet.get_confirmed_balance, funds) | full_node_api = full_nodes[0]
wallet_node, server_2 = wallets[0]
fn_server = full_node_api.full_node.server
wsm = wallet_node.wallet_state_manager |
0004_auto_20170303_2141.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 21:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_remove_student_student_name'),
]
operations = [
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('skill', models.CharField(choices=[('Django', 'Django'), ('Python', 'Python'), ('Java', 'Java'), ('Ruby', 'Ruby')], max_length=255)),
],
),
migrations.RemoveField(
model_name='student',
name='user',
),
migrations.AddField(
model_name='user',
name='connections',
field=models.ManyToManyField(related_name='_user_connections_+', to=settings.AUTH_USER_MODEL), | name='Student',
),
] | ),
migrations.DeleteModel( |
server.js | /* eslint-disable */
var renderToString = dep(require('preact-render-to-string')); | module.exports = {
renderToString: renderToString,
renderToStaticMarkup: renderToString
}; |
function dep(obj) { return obj['default'] || obj; }
|
base.py | #
# Copyright (C) 2017 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os.path
import re
from pecan import conf
from pecan import request, response, abort, expose
from pecan.rest import RestController
from managesf.model.yamlbkd.engine import SFResourceBackendEngine
from managesf import policy
# TODO do it with v2
from managesf.model import SFUserCRUD
from git.exc import GitCommandError
logger = logging.getLogger(__name__)
# TODO move to managesf.api once users API is started
def get_user_groups(username):
user_email = SFUserCRUD().get(username=username).get('email')
logger.info('Found email %s for username %s' % (user_email, username))
resources_engine = SFResourceBackendEngine(
os.path.join(conf.resources['workdir'], 'read'),
conf.resources['subdir'])
try:
resources = resources_engine.get(
conf.resources['master_repo'], 'master')
except GitCommandError:
logger.info("Unable to read groups from the resources engine.")
logger.info("It is probably because we are boostrapping SF.")
return []
groups = resources['resources'].get('groups', {})
return [g for g in groups if user_email in groups[g]['members']]
def authorize(rule_name, target):
if not request.remote_user:
request.remote_user = request.headers.get('X-Remote-User')
credentials = {'username': request.remote_user, 'groups': []}
if request.remote_user:
credentials['groups'] = get_user_groups(request.remote_user)
return policy.authorize(rule_name, target, credentials)
class APIv2RestController(RestController):
def __init__(self, *args, **kwargs):
super(APIv2RestController, self).__init__(*args, **kwargs)
self._logger = logging.getLogger(
'managesf.v2.controllers.%s' % self.__class__.__name__)
class APIv2RestProxyController(APIv2RestController):
manager = None
policies_map = {'get .+/path/to/(?P<x>.+)/command': 'managesf.policy.name'}
def _find_policy(self, lookup):
"""Find policy according to REST path."""
for expr in self.policies_map:
regex = re.compile(expr)
if regex.search(lookup):
target_elements = regex.search(lookup).groupdict()
return {'policy': self.policies_map[expr],
'target_elements': target_elements}
return {}
def _policy_target(self, verb, target_elements, *args, **kwargs):
# override me
target = {}
return target
# This makes the assumption that backend services always return JSON.
# This is true for all of them except gerrit, which will not be covered
# this way.
def _do(self, verb):
def action(*args, **kwargs):
if not self.manager:
return abort(404,
detail='This service is not configured.')
path = request.path
lookup = ("%s %s" % (verb, path))
pol_scan = self._find_policy(lookup)
pol, target_elements = None, {}
if pol_scan:
pol = pol_scan['policy']
target_elements = pol_scan['target_elements']
if not kwargs and request.content_length:
if 'json' in request.content_type:
kwargs = request.json
else: | if not pol:
# Unknown endpoint, default behavior is to forbid access
pol = 'rule:none'
if not authorize(pol, target=target):
return abort(401,
detail='Failure to comply with policy %s' % pol)
# HACK The RestController's routing method seems to discard
# extensions on the last remainder. This is a dirty fix
full_path = request.path_url
last_arg = full_path.split('/')[-1]
if last_arg and args[-1] != last_arg:
args = args[:-1] + (last_arg, )
if request.content_length and 'json' in request.content_type:
proxied_response = getattr(self.manager, verb)(
*args, json=kwargs)
self._logger.debug(
"calling passthrough manager with "
"args: '%s', json: '%s'" % (args, kwargs))
elif kwargs:
proxied_response = getattr(self.manager, verb)(
*args, params=kwargs)
self._logger.debug(
"calling passthrough manager with "
"args: '%s', params: '%s'" % (args, kwargs))
else:
proxied_response = getattr(self.manager, verb)(*args)
response.status = proxied_response.status_code
if int(proxied_response.status_code) > 399:
response.text = proxied_response.text
return abort(proxied_response.status_code)
else:
return proxied_response.json()
return action
@expose('json')
def get(self, *args, **kwargs):
return self._do('get')(*args, **kwargs)
@expose('json')
def post(self, *args, **kwargs):
return self._do('post')(*args, **kwargs)
@expose('json')
def put(self, *args, **kwargs):
return self._do('put')(*args, **kwargs)
@expose('json')
def delete(self, *args, **kwargs):
return self._do('delete')(*args, **kwargs) | kwargs = request.params
target = self._policy_target(verb, target_elements,
*args, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.