query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Trace formats and logs a trace message.
func (CryptoMachineLogger) Trace(message string, args ...interface{}) { log.Tracef(message, args...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l *Logger) Trace(messageFormat string, messageArgs ...interface{}) {\n\tl.Log(Trace, messageFormat, messageArgs...)\n}", "func ExampleTrace() {\n\tsetup()\n\tlog.Trace().Msg(\"hello world\")\n\n\t// Output: {\"level\":\"trace\",\"time\":1199811905,\"message\":\"hello world\"}\n}", "func Trace(format string, v ...interface{}) {\n\tLog(1, TRACE, format, v...)\n}", "func Trace(format string, v ...interface{}) {\n\tLeveledLogger(level.Trace, format, v...)\n}", "func (l *jsonLogger) Trace(message interface{}, params ...interface{}) {\n\tl.jsonLogParser.parse(context.Background(), l.jsonLogParser.log.Trace(), \"\", params...).Msgf(\"%s\", message)\n}", "func (l *Logger) Trace(message string, args ...interface{}) { l.Log(Trace, message, args...) }", "func (log *Log) Trace(message string) {\n\tlog.Log(NewMessage(MessageTrace, message))\n}", "func Trace(format string, a ...interface{}) {\n\tprefix := yellow(trac)\n\tlog.Println(prefix, fmt.Sprintf(format, a...))\n}", "func Trace(msg string) {\r\n\tif (currentLogLevel > trace_level.value) {\r\n\t\treturn;\r\n\t}\r\n\r\n\tfmt.Fprintf(os.Stdout, adjustLog(trace_level.name, msg))\r\n}", "func (this *Logger) Trace(message string) {\n\tthis.Tracef(\"%s\", message)\n}", "func Trace(msg string, ctx ...interface{}) {\n\tmetrics.GetOrRegisterCounter(\"trace\", nil).Inc(1)\n\tl.Output(msg, l.LvlTrace, CallDepth, ctx...)\n}", "func (logger *Logger) Trace(msg string, extras ...map[string]string) error {\n\tif TraceLevel >= logger.LogLevel {\n\t\treturn logger.Log(msg, TraceLevel, extras...)\n\t}\n\treturn nil\n}", "func (e *Entry) Trace(msg string) *Entry {\n\te.Info(msg)\n\tv := e.WithFields(e.Fields)\n\tv.Message = msg\n\tv.start = time.Now()\n\treturn v\n}", "func Trace(msg string, args ...interface{}) {\n\tDefaultLog.Trace(msg, args...)\n}", "func (r *Record) Trace(args ...interface{}) {\n\tr.Log(TraceLevel, args...)\n}", "func (l *thundraLogger) Trace(v ...interface{}) {\n\tif logLevelId > traceLogLevelId {\n\t\treturn\n\t}\n\tlogManager.recentLogLevel = traceLogLevel\n\tlogManager.recentLogLevelId = traceLogLevelId\n\tl.Output(2, fmt.Sprint(v...))\n}", "func Trace(data []byte) {\n\tlog.Print(\"TRACE: \", string(data))\n}", "func (l *MessageLogger) Trace(msg string) { l.logger.Trace(msg) }", "func Trace(ctx context.Context, msg string) {\n\tsp := opentracing.SpanFromContext(ctx)\n\tif sp != nil {\n\t\tsp.LogEvent(msg)\n\t}\n}", "func Trace(ctx context.Context, format string, args ...interface{}) {\n\ttracer.Trace(ctx, format, args...)\n}", "func (dl *DummyTracer) Trace(ctx context.Context, format string, args ...interface{}) {\n\tfmt.Printf(format+\"\\n\", args...)\n}", "func (stimLogger *FullStimLogger) Trace(message ...interface{}) {\n\tif stimLogger.highestLevel >= TraceLevel {\n\t\tif stimLogger.setLogger == nil {\n\t\t\tif stimLogger.forceFlush {\n\t\t\t\tstimLogger.writeLogs(stimLogger.formatString(TraceLevel, traceMsg, message...))\n\t\t\t} else {\n\t\t\t\tstimLogger.formatAndLog(TraceLevel, traceMsg, message...)\n\t\t\t}\n\t\t} else {\n\t\t\tstimLogger.setLogger.Debug(message...)\n\t\t}\n\t}\n}", "func (l Logger) Trace(msg ...interface{}) {\n\tif l.Level <= log.TraceLevel {\n\t\tout := fmt.Sprint(append([]interface{}{l.TraceColor.Sprint(\"TRACE: \")}, msg...)...)\n\t\tout = checkEnding(out)\n\t\tfmt.Fprint(l.TraceOut, out)\n\t}\n}", "func (c *context) Trace(tag, msg string, fields ...log.Field) {\n\tc.l.Trace(tag, msg, c.logFields(fields)...)\n\tc.incLogLevelCount(log.LevelTrace, tag)\n}", "func Trace(msg ...interface{}) {\n\tCurrent.Trace(msg...)\n}", "func Trace(v ...interface{}) {\n\tjasonLog.output(2, levelTrace, \"\", v...)\n}", "func (c Context) Trace(msg string) {\n\tc.Log(50, msg, GetCallingFunction())\n}", "func (l *Logger) Trace(v ...interface{}) {\n\tl.Log(fmt.Sprintln(v...), Ltrace, Trace)\n}", "func (zw *zerologWrapper) Trace(ctx context.Context, format string, args ...interface{}) {\n\tnewEntry(zw, false, zw.cfg.staticFields).Trace(ctx, format, args...)\n}", "func (log *Logger) Trace(arg0 interface{}, args ...interface{}) {\n\tconst (\n\t\tlvl = TRACE\n\t)\n\tswitch first := arg0.(type) {\n\tcase string:\n\t\t// Use the string as a format string\n\t\tlog.intLogf(lvl, first, args...)\n\tcase func() string:\n\t\t// Log the closure (no other arguments used)\n\t\tlog.intLogc(lvl, first)\n\tdefault:\n\t\t// Build a format string so that it will be similar to Sprint\n\t\tlog.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(\" %v\", len(args)), args...)\n\t}\n}", "func Trace(ctx context.Context, msg string, params ...interface{}) {\n\tif l := DefaultLogger(); l != nil {\n\t\tif ll, ok := l.(LeveledLogger); ok {\n\t\t\tll.Trace(ctx, msg, params...)\n\t\t} else {\n\t\t\tl.Log(Eventf(TraceSeverity, ctx, msg, params...))\n\t\t}\n\t}\n}", "func (l Logger) Trace(msg ...interface{}) {\n\tif l.Level <= log.TraceLevel {\n\t\tl.logger.Print(append([]interface{}{\"[TRACE] \"}, msg...)...)\n\t}\n}", "func (logger *Logger) Trace(a ...any) {\n\tlogger.echo(nil, level.Trace, formatPrint, a...)\n}", "func (l *Logger) Trace(name, file string, line int) {\r\n\tl.timeReset()\r\n\tdefer l.timeLog(name)\r\n}", "func Trace(message interface{}) {\n\tlogging.Debug(message)\n}", "func printTrace(msg string) {\n\tif Trace {\n\t\tfmt.Println(fmt.Sprintf(\"TRACE %s: %s\", getFormattedTime(), msg))\n\t}\n}", "func printTrace(msg string) {\n\tif Trace {\n\t\tfmt.Println(fmt.Sprintf(\"TRACE %s: %s\", getFormattedTime(), msg))\n\t}\n}", "func (z *ZapLogger) Trace(args ...interface{}) {\n\tz.Debug(args)\n}", "func Trace(trace interface{}) {\n\tif fmt.Sprintf(\"%v\", trace) == \"[]\" {\n\t\tLog(Ltrace, fmt.Sprintf(formats[\"empty\"], trace))\n\t} else {\n\t\tLog(Ltrace, fmt.Sprintf(formats[\"trace\"], trace))\n\t}\n}", "func (l *Log) Trace(args ...interface{}) {\n\tl.logger.Trace(args...)\n}", "func (l Mylog) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {\n\telapsed := time.Now().Sub(begin)\n\ts, _ := json.Marshal(&ctx)\n\tl.Info(ctx, string(s))\n\tif err != nil {\n\t\tsql, rows := fc()\n\t\tl.ServiceLog.Error(ctx, utils.FileWithLineNum(), err, float64(elapsed.Nanoseconds())/1e6, rows, sql)\n\t} else {\n\t\tsql, rows := fc()\n\t\tl.ServiceLog.Info(utils.FileWithLineNum(), float64(elapsed.Nanoseconds())/1e6, rows, sql)\n\t}\n}", "func (t *tracer) Trace(msg ...interface{}) Tracer {\n\tif !t.IsNull() && t.enabled {\n\t\tmessage := t.TraceMessage(msg...)\n\t\tif message != \"\" {\n\t\t\tlogrus.Tracef(message)\n\t\t}\n\t}\n\treturn t\n}", "func (l *logger) Trace(b []byte) ([]byte, error) {\n\tlog.Tracef(\"%s\", b)\n\treturn []byte(\"\"), nil\n}", "func (l *Logger) Tracef(format string, a ...interface{}) {\n\tif l.LoggerLevel >= TraceLevel {\n\t\tl.log(l.color.Blue(\"TRACE: \"), format, a...)\n\t}\n}", "func Trace(args ...interface{}) {\n\tLog(logrus.TraceLevel, args...)\n}", "func (log *Log) Tracef(format string, a ...interface{}) {\n\tlog.printf(TRACE, format, a...)\n}", "func (z *ZapLogWrapper) Trace(args ...interface{}) {\n\tz.l.Trace(args...)\n}", "func (logger *ColorLogger) Trace(format string, args ...interface{}) {\n\tif !logger.Verbose {\n\t\treturn\n\t}\n\tlogger.log(LOG_LEVEL_ALL, \"blue\", format, args...)\n}", "func Trace(v ...interface{}) {\n\tif level <= LevelTrace {\n\t\tTorbitLogger.Printf(\"[T] %v\\n\", v)\n\t}\n}", "func (l Logger) Tracef(template string, args ...interface{}) {\n\tif l.Level <= log.TraceLevel {\n\t\tl.logger.Printf(\"[TRACE] \"+template, args...)\n\t}\n}", "func Trace(v ...interface{}) {\n\tWithLevel(LevelTrace, v...)\n}", "func (l *Logger) Trace(v ...interface{}) { l.lprint(TRACE, v...) }", "func Tracef(format string, params ...interface{}){\n log.Tracef(format, params)\n}", "func (l *Logger) Tracef(format string, v ...interface{}) { l.lprintf(TRACE, format, v...) }", "func (l *Logger) Trace(a ...interface{}) {\n\tif l.Level() >= Trace {\n\t\tl.logTrace.Print(a...)\n\t}\n}", "func (l *Logger) Trace(err errors.Error) {\n\tl.logTrace.Printf(string(err.JSON()))\n}", "func Trace(v ...interface{}) *Logger {\n\tif logger.IsTrace() {\n\t\tlogger.printf(LevelTraceName, noFormat, v...)\n\t}\n\treturn logger\n}", "func Trace(f interface{}, v ...interface{}) {\n\tlogs.Trace(f, v...)\n}", "func (l Logger) Tracef(template string, args ...interface{}) {\n\tif l.Level <= log.TraceLevel {\n\t\tout := l.TraceColor.Sprint(\"TRACE: \")\n\t\tout += fmt.Sprintf(template, args...)\n\t\tout = checkEnding(out)\n\t\tfmt.Fprint(l.TraceOut, out)\n\t}\n}", "func (log *Log) Tracef(format string, args ...interface{}) {\n\tmsg := fmt.Sprintf(format, args...)\n\tlog.Trace(msg)\n}", "func Tracef(format string, v ...interface{}) {\n\tjasonLog.output(2, levelTrace, format, v...)\n}", "func Trace(args ...interface{}) {\n\tif glog.V(trace) {\n\t\tglog.InfoDepth(1, \"TRACE: \"+fmt.Sprint(args...)) // 1 == depth in the stack of the caller\n\t}\n}", "func (mr *MockFieldsMockRecorder) Trace(ctx, format interface{}, args ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, format}, args...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Trace\", reflect.TypeOf((*MockFields)(nil).Trace), varargs...)\n}", "func (l *AppLogger) Trace(tag string, message ...interface{}) {\n\tl.logging.SetFormatter(&logrus.JSONFormatter{})\n\tk := getAppFields(l.reqId, tag, l.userId)\n\tl.logging.WithFields(k).Trace(message...)\n}", "func Trace(args ...interface{}) {\n\tLogger.Trace(args...)\n}", "func (l *zapLog) Trace(args ...interface{}) {\n\tif l.logger.Core().Enabled(zapcore.DebugLevel) {\n\t\tl.logger.Debug(fmt.Sprint(args...))\n\t}\n}", "func (l *NamedLogger) Tracef(format string, params ...interface{}) {\n\tformat, params = l.convert(format, params)\n\tl.Logger.Tracef(format, params...)\n}", "func (c *Client) Tracef(format string, v ...interface{}) {\n\tlog.Printf(\"[TRACE] %s\", fmt.Sprintf(format, v...))\n}", "func (mr *MockMessagesMockRecorder) Trace(ctx, format interface{}, args ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, format}, args...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Trace\", reflect.TypeOf((*MockMessages)(nil).Trace), varargs...)\n}", "func Tracef(logKey LogKey, format string, args ...interface{}) {\n\tlogTo(context.TODO(), LevelTrace, logKey, format, args...)\n}", "func (a CompatLoggerAdapter) Tracef(format string, v ...interface{}) {\n\ta.Printf(\"TRACE: \"+format, v...)\n}", "func (lgr *lager) Tracef(msg string, v ...interface{}) {\n\tlgr.logf(Trace, msg, v...)\n}", "func (l *Logger) Trace(log ...interface{}) {\n\tl.instance.Trace(log...)\n}", "func (l *Log) Tracef(format string, args ...interface{}) {\n\tl.logger.Tracef(format, args...)\n}", "func (s *StanLogger) Tracef(format string, v ...interface{}) {\n\ts.executeLogCall(func(logger Logger, format string, v ...interface{}) {\n\t\tif s.trace {\n\t\t\tlogger.Tracef(format, v...)\n\t\t}\n\t}, format, v...)\n}", "func TestTrace(t *testing.T) {\n\tvar data = []byte(`Log this!`)\n\tapolog.Trace(data)\n}", "func Tracef(format string, args ...interface{}) {\n\tif glog.V(trace) {\n\t\tglog.InfoDepth(1, fmt.Sprintf(\"TRACE: \"+format, args...)) // 1 == depth in the stack of the caller\n\t}\n}", "func (v *MultiLogger) Trace(args ...interface{}) {\n\tv.Self().Log(logrus.TraceLevel, args...)\n}", "func (w *LogrusWrapper) Tracef(format string, args ...interface{}) {\n\tw.Log.Tracef(format, args...)\n}", "func (l *Logger) Tracef(format string, log ...interface{}) {\n\tl.instance.Tracef(format, log...)\n}", "func (l Logger) Tracew(msg string, fields log.Fields) {\n\tif l.Level <= log.TraceLevel {\n\t\tout := l.TraceColor.Sprint(\"TRACE: \")\n\t\tout += fmt.Sprint(msg, handlFields(fields))\n\t\tout = checkEnding(out)\n\t\tfmt.Fprint(l.TraceOut, out)\n\t}\n}", "func (e *Entry) Tracef(format string, v ...interface{}) {\n\tif LogLevelTrace >= e.logger.Level {\n\t\te.logger.printf(LogLevelTrace, e.Context, format, v...)\n\t}\n}", "func Tracef(format string, v ...interface{}) {\n\tWithLevelf(LevelTrace, format, v...)\n}", "func (r *Record) Tracef(format string, args ...interface{}) {\n\tr.Logf(TraceLevel, format, args...)\n}", "func (l *NamedLogger) Trace(v ...interface{}) {\n\tvar k []interface{}\n\tk = append(k, fmt.Sprintf(\"(%v)\", l.Name))\n\tk = append(k, v...)\n\tl.Logger.Trace(k...)\n}", "func (c *Client) tracef(format string, args ...interface{}) {\n\tif c.tracelog != nil {\n\t\tc.tracelog.Printf(format, args...)\n\t}\n}", "func (l *jsonLogger) TraceContext(ctx context.Context, message interface{}, params ...interface{}) {\n\tl.jsonLogParser.parse(ctx, l.jsonLogParser.log.Trace(), \"\", params...).Msgf(\"%s\", message)\n}", "func Tracef(format string, args ...interface{}) {\n\tl.Trace().Msgf(format, args...)\n}", "func (l *Logger) Tracef(format string, v ...interface{}) {\n\tif l.trace {\n\t\tl.logger.Printf(l.traceLabel+format, v...)\n\t}\n}", "func Tracef(format string, v ...interface{}) { std.lprintf(TRACE, format, v...) }", "func (f *TextFormatter) SetTrace(s string, code ansi.Code) *TextFormatter {\n\tf.trace = newTextFormatterTemplate(s, code, f.funcMap)\n\tf.trace.code = code\n\treturn f\n}", "func (logger *Logger) Tracef(format string, a ...any) {\n\tlogger.echo(nil, level.Trace, format, a...)\n}", "func (logger *Logger) Ftrace(w io.Writer, a ...any) {\n\tlogger.echo(w, level.Trace, formatPrint, a...)\n}", "func (l *Logger) Tracef(format string, v ...interface{}) {\n\tif l.Level() >= Trace {\n\t\tl.logTrace.Printf(format, v...)\n\t}\n}", "func (z *ZapLogWrapper) Tracef(format string, args ...interface{}) {\n\tz.l.Tracef(format, args...)\n}", "func (this *Logger) Tracef(format string, v ...interface{}) {\n\tif this.level <= TRACE {\n\t\tthis.write(this.color.Blue(), \"TRACE\", format, v...)\n\t}\n}", "func Tracef(format string, args ...interface{}) {\n\tLogger.Tracef(format, args...)\n}", "func (l Logger) Tracew(msg string, fields log.Fields) {\n\tif l.Level <= log.TraceLevel {\n\t\tl.logger.Printf(\"[TRACE] %s %s\", msg, handlFields(fields))\n\t}\n}", "func TraceF(format string, v ...interface{}) {\n\tif variable.EnableTraceLog {\n\t\tlog.Printf(\"[TRACE] \"+format, v...)\n\t}\n}", "func Trace(\n\tparent context.Context,\n\tdesc string) (ctx context.Context, report ReportFunc) {\n\t// If tracing is disabled, this is a no-op.\n\tif !*fEnabled {\n\t\tctx = parent\n\t\treport = func(err error) {}\n\t\treturn\n\t}\n\n\t// Is this context already being traced? If so, simply add a span.\n\tif parent.Value(traceStateKey) != nil {\n\t\tctx, report = StartSpan(parent, desc)\n\t\treturn\n\t}\n\n\t// Set up a new trace state.\n\tts := new(traceState)\n\tbaseReport := ts.CreateSpan(desc)\n\n\t// Log when finished.\n\treport = func(err error) {\n\t\tbaseReport(err)\n\t\tts.Log()\n\t}\n\n\t// Set up the context.\n\tctx = context.WithValue(parent, traceStateKey, ts)\n\n\treturn\n}" ]
[ "0.75850546", "0.7433395", "0.73427314", "0.73143286", "0.728935", "0.72199863", "0.72048634", "0.7191854", "0.71277946", "0.7099762", "0.7095311", "0.7072843", "0.706002", "0.7057078", "0.7019095", "0.6962309", "0.69594043", "0.6942132", "0.69262415", "0.6925229", "0.6915073", "0.69118375", "0.69070363", "0.6881893", "0.6864103", "0.6843906", "0.68381435", "0.68373513", "0.68189263", "0.67951286", "0.67907387", "0.67740065", "0.6763133", "0.67582184", "0.6753487", "0.6720937", "0.6720937", "0.6696661", "0.66836476", "0.66826594", "0.6682321", "0.667948", "0.6658497", "0.6656786", "0.6649798", "0.66261035", "0.6617108", "0.66124016", "0.6608974", "0.66019815", "0.6600299", "0.6589705", "0.6588918", "0.65876186", "0.65813243", "0.6578411", "0.65596706", "0.6547723", "0.65307796", "0.6519609", "0.6518859", "0.6518199", "0.6517114", "0.6508895", "0.6503307", "0.64965105", "0.6491884", "0.64862055", "0.6482288", "0.64802897", "0.64800274", "0.6467939", "0.6461567", "0.6457056", "0.6454077", "0.64524055", "0.6450773", "0.6438289", "0.64279914", "0.6425618", "0.64244336", "0.64202136", "0.6412643", "0.6404275", "0.6394056", "0.63868386", "0.63846725", "0.63817316", "0.63803285", "0.6378172", "0.63737816", "0.636386", "0.63632077", "0.63604295", "0.6341972", "0.63348013", "0.6333358", "0.63188887", "0.63110876", "0.6294272" ]
0.70474577
14
BenchmarkCryptopanIPv4 benchmarks annonymizing IPv4 addresses.
func BenchmarkCryptopanPoint(b *testing.B) { cpan, err := anonymizer.CryptoPan(testKey) if err != nil { b.Fatal("New(testKey) failed:", err) } b.ResetTimer() point := geopoint.Value(75071988303315493) for i := 0; i < b.N; i++ { _ = cpan.Anonymize(point) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func BenchmarkIPv4_inline(b *testing.B) {\n\tb.ReportAllocs()\n\tips := []ip4i{}\n\tfor i := 0; i < b.N; i++ {\n\t\tip := newip4i_v4(8, 8, 8, 8)\n\t\tips = ips[:0]\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tips = append(ips, ip)\n\t\t}\n\t}\n}", "func IPv4(a, b, c, d uint8) IP {\n\treturn IP{\n\t\tlo: 0xffff00000000 | uint64(a)<<24 | uint64(b)<<16 | uint64(c)<<8 | uint64(d),\n\t\tz: z4,\n\t}\n}", "func IpV4Address() string {\n\tblocks := []string{}\n\tfor i := 0; i < 4; i++ {\n\t\tnumber := seedAndReturnRandom(255)\n\t\tblocks = append(blocks, strconv.Itoa(number))\n\t}\n\n\treturn strings.Join(blocks, \".\")\n}", "func (d *DB) checkip(ip string) (iptype uint32, ipnum uint128.Uint128, ipindex uint32) {\n\tiptype = 0\n\tipnum = uint128.From64(0)\n\tipnumtmp := uint128.From64(0)\n\tipindex = 0\n\tipaddress := net.ParseIP(ip)\n\n\tif ipaddress != nil {\n\t\tv4 := ipaddress.To4()\n\n\t\tif v4 != nil {\n\t\t\tiptype = 4\n\t\t\tipnum = uint128.From64(uint64(binary.BigEndian.Uint32(v4)))\n\t\t} else {\n\t\t\tv6 := ipaddress.To16()\n\n\t\t\tif v6 != nil {\n\t\t\t\tiptype = 6\n\t\t\t\treverseBytes(v6)\n\t\t\t\tipnum = uint128.FromBytes(v6)\n\n\t\t\t\tif ipnum.Cmp(from_v4mapped) >= 0 && ipnum.Cmp(to_v4mapped) <= 0 {\n\t\t\t\t\t// ipv4-mapped ipv6 should treat as ipv4 and read ipv4 data section\n\t\t\t\t\tiptype = 4\n\t\t\t\t\tipnum = ipnum.Sub(from_v4mapped)\n\t\t\t\t} else if ipnum.Cmp(from_6to4) >= 0 && ipnum.Cmp(to_6to4) <= 0 {\n\t\t\t\t\t// 6to4 so need to remap to ipv4\n\t\t\t\t\tiptype = 4\n\t\t\t\t\tipnum = ipnum.Rsh(80)\n\t\t\t\t\tipnum = ipnum.And(last_32bits)\n\t\t\t\t} else if ipnum.Cmp(from_teredo) >= 0 && ipnum.Cmp(to_teredo) <= 0 {\n\t\t\t\t\t// Teredo so need to remap to ipv4\n\t\t\t\t\tiptype = 4\n\t\t\t\t\tipnum = uint128.Uint128{^ipnum.Lo, ^ipnum.Hi}\n\t\t\t\t\tipnum = ipnum.And(last_32bits)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif iptype == 4 {\n\t\tif d.meta.ipv4indexed {\n\t\t\tipnumtmp = ipnum.Rsh(16)\n\t\t\tipnumtmp = ipnumtmp.Lsh(3)\n\t\t\tipindex = uint32(ipnumtmp.Add(uint128.From64(uint64(d.meta.ipv4indexbaseaddr))).Lo)\n\t\t}\n\t} else if iptype == 6 {\n\t\tif d.meta.ipv6indexed {\n\t\t\tipnumtmp = ipnum.Rsh(112)\n\t\t\tipnumtmp = ipnumtmp.Lsh(3)\n\t\t\tipindex = uint32(ipnumtmp.Add(uint128.From64(uint64(d.meta.ipv6indexbaseaddr))).Lo)\n\t\t}\n\t}\n\treturn\n}", "func IPv4(opts ...options.OptionFunc) string {\n\treturn singleFakeData(IPV4Tag, func() interface{} {\n\t\topt := options.BuildOptions(opts)\n\t\ti := Internet{fakerOption: *opt}\n\t\treturn i.ipv4()\n\t}, opts...).(string)\n}", "func network4(addr uint32, prefix uint) uint32 {\n\treturn addr & netmask(prefix)\n}", "func ipv4only(addr IPAddr) bool {\n\treturn supportsIPv4 && addr.IP.To4() != nil\n}", "func indexAsIPv4(i uint32, baseSlashEight int) string {\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, uint32(i)+uint32(baseSlashEight*16777216))\n\treturn ip.String()\n}", "func inAddrV4(ip netip.Addr) (uint32, error) {\n\tif !ip.Is4() {\n\t\treturn 0, fmt.Errorf(\"%s is not IPv4\", ip)\n\t}\n\tv4 := ip.As4()\n\treturn endian.Uint32(v4[:]), nil\n}", "func ParseIPv4AndCIDR(data string) []*net.IPNet {\n\tfmt.Println(\"data: \", data)\n\tvar reIPv4 = regexp.MustCompile(`(((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])+)(\\/(3[0-2]|[1-2][0-9]|[0-9]))?`)\n\t//var reIPv4 = regexp.MustCompile(`(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/(3[0-2]|[1-2][0-9]|[0-9]))`)\n\tscanner := bufio.NewScanner(strings.NewReader(data))\n\n\taddrs := make([]*net.IPNet, 0)\n\tfor scanner.Scan() {\n\t\tx := reIPv4.FindString(scanner.Text())\n\t\tfmt.Println(\"in ParseIPv4AndCIDR, x: \", x)\n\t\tif !strings.Contains(x, \"/\") {\n\t\t\tif !strings.Contains(x, \":\") {\n\t\t\t\tx = x + \"/32\"\n\t\t\t} else {\n\t\t\t\tx = x + \"/128\"\n\t\t\t}\n\t\t}\n\t\tif addr, cidr, e := net.ParseCIDR(x); e == nil {\n\t\t\t//if !ipv4.IsRFC4193(addr) && !ipv4.IsLoopback(addr) && !ipv4.IsBogonIP(addr) {\n\t\t\tif !IsLoopback(addr) && !IsBogonIP(addr) {\n\t\t\t\taddrs = append(addrs, cidr)\n\t\t\t}\n\t\t}\n\t}\n\treturn addrs\n}", "func extractIPv4(ptr string) string {\n\ts := strings.Replace(ptr, \".in-addr.arpa\", \"\", 1)\n\twords := strings.Split(s, \".\")\n\tfor i, j := 0, len(words)-1; i < j; i, j = i+1, j-1 {\n\t\twords[i], words[j] = words[j], words[i]\n\t}\n\treturn strings.Join(words, \".\")\n}", "func decodeips(ips []string) {\n\n\tvar match bool\n\n\tif verbose && host != \"\" {\n\t\tfmt.Printf(\"Received %v addresses from %s\\n\", len(ips), host)\n\t}\n\n\tfor _, ip := range ips {\n\n\t\tnetip := net.ParseIP(ip)\n\t\tnetip = netip.To4()\n\t\tif netip == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcrcAddr := crc16(netip)\n\t\tmatch = false\n\n\t\tfor _, ipport := range ips {\n\n\t\t\tnetipport := net.ParseIP(ipport)\n\t\t\tnetipport = netipport.To4()\n\t\t\tif netipport == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif (netipport[0] == byte(crcAddr>>8)) && (netipport[1] == byte(crcAddr&0xff)) {\n\t\t\t\ttheport := (uint16(netipport[2]) << 8) + uint16(netipport[3])\n\t\t\t\tfmt.Printf(\"realip: %s\\tport: %v\\tencodedip: %s\\n\", ip, theport, ipport)\n\t\t\t\tif verbose {\n\t\t\t\t\tfmt.Printf(\"crcAddr: 0x%x encoded bytes: 0x%x 0x%x 0x%x 0x%x\\n\", crcAddr, netipport[0], netipport[1], netipport[2], netipport[3])\n\t\t\t\t}\n\t\t\t\tmatch = true\n\t\t\t}\n\t\t}\n\t\tif match == false && verbose {\n\t\t\tfmt.Printf(\"No match found for ip: %s\\n\", ip)\n\t\t}\n\t}\n\n}", "func IPv4ClassfulNetwork(address net.IP) *net.IPNet {\n\tif address.To4() != nil {\n\t\tvar newIP net.IP\n\t\tvar newMask net.IPMask\n\t\tswitch {\n\t\tcase uint8(address[0]) < 128:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), 0, 0, 0)\n\t\t\tnewMask = net.IPv4Mask(255, 0, 0, 0)\n\t\tcase uint8(address[0]) < 192:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), uint8(address[1]), 0, 0)\n\t\t\tnewMask = net.IPv4Mask(255, 255, 0, 0)\n\t\tcase uint8(address[0]) < 224:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), uint8(address[1]), uint8(address[2]), 0)\n\t\t\tnewMask = net.IPv4Mask(255, 255, 255, 0)\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t\treturn &net.IPNet{IP: newIP, Mask: newMask}\n\t}\n\treturn nil\n}", "func encodeip(ip, port string) {\n\n\tvar netport int\n\tif port != \"\" {\n\t\tnetport, _ = strconv.Atoi(port)\n\t}\n\n\tnetip := net.ParseIP(ip)\n\tnetip = netip.To4()\n\tif netip == nil {\n\t\tfmt.Printf(\"error changing ip: %s to 4 bytes storage\\n\", ip)\n\t\tos.Exit(0)\n\t}\n\n\tcrcAddr := crc16(netip)\n\n\tbs := make([]byte, 4)\n\tbs[0] = byte(crcAddr >> 8)\n\tbs[1] = byte(crcAddr & 0xff)\n\tbs[2] = byte(netport >> 8)\n\tbs[3] = byte(netport & 0xff)\n\n\tencodedip := net.IPv4(bs[0], bs[1], bs[2], bs[3])\n\tif x := encodedip.To4(); x == nil {\n\t\tfmt.Printf(\"Error checking encoded ip to real ip\\n\")\n\t\tfmt.Printf(\"Real ip: \\t%s\\n\", ip)\n\t\tfmt.Printf(\"Encoded ip:\\t%v.%v.%v.%v\\n\", bs[0], bs[1], bs[2], bs[3])\n\t} else {\n\n\t\tfmt.Printf(\"crcAddr:\\t%#v\\n\", crcAddr)\n\t\tfmt.Printf(\"Real ip: \\t%s\\n\", ip)\n\t\tfmt.Printf(\"Encoded ip:\\t%s\\n\", encodedip.String())\n\t}\n}", "func PublicIpv4EqualFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EqualFold(s.C(FieldPublicIpv4), v))\n\t})\n}", "func parseIPv4(s string) (ip IP, ok bool) {\n\tvar ip4 [4]byte\n\n\tfor i := 0; i < 4; i++ {\n\t\tvar (\n\t\t\tj int\n\t\t\tacc uint16\n\t\t)\n\t\t// Parse one byte of digits. Bail if we overflow, stop at\n\t\t// first non-digit.\n\t\t//\n\t\t// As of Go 1.15, don't try to factor this digit reading into\n\t\t// a helper function. Its complexity is slightly too high for\n\t\t// inlining, which ends up costing +50% in parse time.\n\t\tfor j = 0; j < len(s); j++ {\n\t\t\tif s[j] < '0' || s[j] > '9' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tacc = (acc * 10) + uint16(s[j]-'0')\n\t\t\tif acc > 255 {\n\t\t\t\treturn IP{}, false\n\t\t\t}\n\t\t}\n\t\t// There must be at least 1 digit per quad.\n\t\tif j == 0 {\n\t\t\treturn IP{}, false\n\t\t}\n\n\t\tip4[i] = uint8(acc)\n\n\t\t// Non-final byte must be followed by a dot\n\t\tif i < 3 {\n\t\t\tif len(s) == j || s[j] != '.' {\n\t\t\t\treturn IP{}, false\n\t\t\t}\n\t\t\tj++\n\t\t}\n\n\t\t// Advance to the next set of digits.\n\t\ts = s[j:]\n\t}\n\tif len(s) != 0 {\n\t\treturn IP{}, false\n\t}\n\n\treturn IPv4(ip4[0], ip4[1], ip4[2], ip4[3]), true\n}", "func (i Internet) Ipv4() string {\n\tips := make([]string, 0, 4)\n\n\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(1, 255)))\n\tfor j := 0; j < 3; j++ {\n\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t}\n\n\treturn strings.Join(ips, \".\")\n}", "func TestIPv4Routes(t *testing.T) {\n\tctx := Setup(t)\n\tdefer ctx.Teardown()\n\n\tconst (\n\t\t// first subnet\n\t\tmsName1 = \"microservice1\"\n\t\tsubnet1 = \"10.0.0.0/24\"\n\t\ttap1IP = \"10.0.0.1\"\n\t\tlinuxTap1IP = \"10.0.0.2\"\n\t\ttap1Label = \"tap-1\"\n\n\t\t// second subnet\n\t\tmsName2 = \"microservice2\"\n\t\tsubnet2 = \"20.0.0.0/24\"\n\t\ttap2IP = \"20.0.0.1\"\n\t\tlinuxTap2IP = \"20.0.0.2\"\n\t\ttap2Label = \"tap-2\"\n\n\t\tsuffix = \"/24\"\n\t)\n\n\t// TAP interface for the first subnet\n\tvppTap1 := &vpp_interfaces.Interface{\n\t\tName: tap1Label,\n\t\tType: vpp_interfaces.Interface_TAP,\n\t\tEnabled: true,\n\t\tIpAddresses: []string{tap1IP + suffix},\n\t\tLink: &vpp_interfaces.Interface_Tap{\n\t\t\tTap: &vpp_interfaces.TapLink{\n\t\t\t\tVersion: 2,\n\t\t\t\tToMicroservice: MsNamePrefix + msName1,\n\t\t\t},\n\t\t},\n\t}\n\tlinuxTap1 := &linux_interfaces.Interface{\n\t\tName: tap1Label,\n\t\tType: linux_interfaces.Interface_TAP_TO_VPP,\n\t\tEnabled: true,\n\t\tIpAddresses: []string{linuxTap1IP + suffix},\n\t\tLink: &linux_interfaces.Interface_Tap{\n\t\t\tTap: &linux_interfaces.TapLink{\n\t\t\t\tVppTapIfName: tap1Label,\n\t\t\t},\n\t\t},\n\t\tNamespace: &linux_namespace.NetNamespace{\n\t\t\tType: linux_namespace.NetNamespace_MICROSERVICE,\n\t\t\tReference: MsNamePrefix + msName1,\n\t\t},\n\t}\n\n\t// TAP interfaces for the second subnet\n\tvppTap2 := &vpp_interfaces.Interface{\n\t\tName: tap2Label,\n\t\tType: vpp_interfaces.Interface_TAP,\n\t\tEnabled: true,\n\t\tIpAddresses: []string{tap2IP + suffix},\n\t\tLink: &vpp_interfaces.Interface_Tap{\n\t\t\tTap: &vpp_interfaces.TapLink{\n\t\t\t\tVersion: 2,\n\t\t\t\tToMicroservice: MsNamePrefix + msName2,\n\t\t\t},\n\t\t},\n\t}\n\tlinuxTap2 := &linux_interfaces.Interface{\n\t\tName: tap2Label,\n\t\tType: linux_interfaces.Interface_TAP_TO_VPP,\n\t\tEnabled: true,\n\t\tIpAddresses: []string{linuxTap2IP + suffix},\n\t\tLink: &linux_interfaces.Interface_Tap{\n\t\t\tTap: &linux_interfaces.TapLink{\n\t\t\t\tVppTapIfName: tap2Label,\n\t\t\t},\n\t\t},\n\t\tNamespace: &linux_namespace.NetNamespace{\n\t\t\tType: linux_namespace.NetNamespace_MICROSERVICE,\n\t\t\tReference: MsNamePrefix + msName2,\n\t\t},\n\t}\n\n\t// Routes\n\tsubnet1LinuxRoute := &linux_l3.Route{\n\t\tOutgoingInterface: tap1Label,\n\t\tScope: linux_l3.Route_GLOBAL,\n\t\tDstNetwork: subnet2,\n\t\tGwAddr: tap1IP,\n\t}\n\tsubnet2LinuxRoute := &linux_l3.Route{\n\t\tOutgoingInterface: tap2Label,\n\t\tScope: linux_l3.Route_GLOBAL,\n\t\tDstNetwork: subnet1,\n\t\tGwAddr: tap2IP,\n\t}\n\tsubnet2LinuxLinkRoute := &linux_l3.Route{\n\t\tOutgoingInterface: tap2Label,\n\t\tScope: linux_l3.Route_LINK,\n\t\tDstNetwork: subnet1,\n\t}\n\n\tctx.StartMicroservice(msName1)\n\tctx.StartMicroservice(msName2)\n\n\t// configure everything in one resync\n\terr := ctx.GenericClient().ResyncConfig(\n\t\tvppTap1, linuxTap1,\n\t\tvppTap2, linuxTap2,\n\t\tsubnet1LinuxRoute, subnet2LinuxRoute,\n\t)\n\tctx.Expect(err).ToNot(HaveOccurred())\n\n\tctx.Eventually(ctx.GetValueStateClb(vppTap1)).Should(Equal(kvscheduler.ValueState_CONFIGURED))\n\tctx.Expect(ctx.GetValueState(linuxTap1)).To(Equal(kvscheduler.ValueState_CONFIGURED))\n\tctx.Expect(ctx.GetValueState(vppTap2)).To(Equal(kvscheduler.ValueState_CONFIGURED))\n\tctx.Expect(ctx.GetValueState(linuxTap2)).To(Equal(kvscheduler.ValueState_CONFIGURED))\n\tctx.Expect(ctx.GetValueState(subnet1LinuxRoute)).To(Equal(kvscheduler.ValueState_CONFIGURED))\n\tctx.Expect(ctx.GetValueState(subnet2LinuxRoute)).To(Equal(kvscheduler.ValueState_CONFIGURED))\n\n\tctx.Expect(ctx.GetRunningMicroservice(msName1).Ping(\"20.0.0.2\")).To(Succeed())\n\tctx.Expect(ctx.GetRunningMicroservice(msName2).Ping(\"10.0.0.2\")).To(Succeed())\n\n\t// keep the current number of routes before the update\n\tnumLinuxRoutes := ctx.NumValues(&linux_l3.Route{}, kvs.SBView)\n\n\t// reconfigure subnet 1 route as link local\n\terr = ctx.GenericClient().ChangeRequest().Update(\n\t\tsubnet2LinuxLinkRoute,\n\t).Send(context.Background())\n\tctx.Expect(err).ToNot(HaveOccurred())\n\n\tctx.Expect(ctx.GetRunningMicroservice(msName1).Ping(\"20.0.0.2\")).NotTo(Succeed())\n\tctx.Expect(ctx.GetRunningMicroservice(msName2).Ping(\"10.0.0.2\")).NotTo(Succeed())\n\n\t// route count should be unchanged\n\tctx.Expect(ctx.NumValues(&linux_l3.Route{}, kvs.SBView)).To(Equal(numLinuxRoutes))\n}", "func parseIPv4(s string) (ip ipOctets, cc int) {\n\tip = make(ipOctets, net.IPv4len)\n\n\tfor i := 0; i < net.IPv4len; i++ {\n\t\tip[i] = make([]ipOctet, 0)\n\t}\n\n\tvar bb [2]uint16 // octet bounds: 0 - lo, 1 - hi\n\n\ti := 0 // octet idx\n\tk := 0 // bound idx: 0 - lo, 1 - hi\n\nloop:\n\tfor i < net.IPv4len {\n\t\t// Decimal number.\n\t\tn, c, ok := dtoi(s)\n\t\tif !ok || n > 0xFF {\n\t\t\treturn nil, cc\n\t\t}\n\n\t\t// Save bound.\n\t\tbb[k] = uint16(n)\n\n\t\t// Stop at max of string.\n\t\ts = s[c:]\n\t\tcc += c\n\t\tif len(s) == 0 {\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\ti++\n\t\t\tbreak\n\t\t}\n\n\t\t// Otherwise must be followed by dot, colon or dp.\n\t\tswitch s[0] {\n\t\tcase '.':\n\t\t\tfallthrough\n\t\tcase ',':\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\tbb[1] = 0\n\t\t\tk = 0\n\t\tcase '-':\n\t\t\tif k == 1 {\n\t\t\t\t// To many dashes in one octet.\n\t\t\t\treturn nil, cc\n\t\t\t}\n\t\t\tk++\n\t\tdefault:\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\ti++\n\t\t\tbreak loop\n\t\t}\n\n\t\tif s[0] == '.' {\n\t\t\ti++\n\t\t}\n\n\t\ts = s[1:]\n\t\tcc++\n\t}\n\n\tif i < net.IPv4len {\n\t\t// Missing ip2octets.\n\t\treturn nil, cc\n\t}\n\n\treturn ip, cc\n}", "func TestIPNetSerialization(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\tname string\n\t\tipNet *net.IPNet\n\t\terr error\n\t}{\n\t\t{\n\t\t\tname: \"ipv4 without mask\",\n\t\t\tipNet: &net.IPNet{\n\t\t\t\tIP: net.ParseIP(\"172.217.6.46\"),\n\t\t\t\tMask: net.IPv4Mask(0x00, 0x00, 0x00, 0x00),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ipv4 with default mask\",\n\t\t\tipNet: &net.IPNet{\n\t\t\t\tIP: net.ParseIP(\"172.217.6.46\"),\n\t\t\t\tMask: defaultIPv4Mask,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ipv4 with non-default mask\",\n\t\t\tipNet: &net.IPNet{\n\t\t\t\tIP: net.ParseIP(\"172.217.6.46\"),\n\t\t\t\tMask: net.IPv4Mask(0xff, 0xff, 0x00, 0x00),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ipv6 without mask\",\n\t\t\tipNet: &net.IPNet{\n\t\t\t\tIP: net.ParseIP(\"2001:db8:a0b:12f0::1\"),\n\t\t\t\tMask: net.IPMask(make([]byte, net.IPv6len)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ipv6 with default mask\",\n\t\t\tipNet: &net.IPNet{\n\t\t\t\tIP: net.ParseIP(\"2001:db8:a0b:12f0::1\"),\n\t\t\t\tMask: defaultIPv6Mask,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ipv6 with non-default mask\",\n\t\t\tipNet: &net.IPNet{\n\t\t\t\tIP: net.ParseIP(\"2001:db8:a0b:12f0::1\"),\n\t\t\t\tMask: net.IPMask([]byte{\n\t\t\t\t\t0xff, 0xff, 0x00, 0x00, 0x00, 0xff,\n\t\t\t\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n\t\t\t\t\t0x00, 0x00, 0x00, 0x00,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\ttestCase := testCase\n\t\tsuccess := t.Run(testCase.name, func(t *testing.T) {\n\t\t\t// Serialize the IP network and deserialize it back.\n\t\t\t// We'll do this to ensure we are properly serializing\n\t\t\t// and deserializing them.\n\t\t\tvar b bytes.Buffer\n\t\t\terr := encodeIPNet(&b, testCase.ipNet)\n\t\t\tif testCase.err != nil && err != testCase.err {\n\t\t\t\tt.Fatalf(\"encoding IP network %v expected \"+\n\t\t\t\t\t\"error \\\"%v\\\", got \\\"%v\\\"\",\n\t\t\t\t\ttestCase.ipNet, testCase.err, err)\n\t\t\t}\n\t\t\tipNet, err := decodeIPNet(&b)\n\t\t\tif testCase.err != nil && err != testCase.err {\n\t\t\t\tt.Fatalf(\"decoding IP network %v expected \"+\n\t\t\t\t\t\"error \\\"%v\\\", got \\\"%v\\\"\",\n\t\t\t\t\ttestCase.ipNet, testCase.err, err)\n\t\t\t}\n\n\t\t\t// If the test did not expect a result, i.e., an invalid\n\t\t\t// IP network, then we can exit now.\n\t\t\tif testCase.err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Otherwise, ensure the result is what we expect.\n\t\t\tif !ipNet.IP.Equal(testCase.ipNet.IP) {\n\t\t\t\tt.Fatalf(\"expected IP %v, got %v\",\n\t\t\t\t\ttestCase.ipNet.IP, ipNet.IP)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(ipNet.Mask, testCase.ipNet.Mask) {\n\t\t\t\tt.Fatalf(\"expected mask %#v, got %#v\",\n\t\t\t\t\ttestCase.ipNet.Mask, ipNet.Mask)\n\t\t\t}\n\t\t})\n\t\tif !success {\n\t\t\treturn\n\t\t}\n\t}\n}", "func IsValidIP4(ipAddress string) bool {\n\tipAddress = strings.Trim(ipAddress, \" \")\n\tif !regexp.MustCompile(`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$`).\n\t\tMatchString(ipAddress) {\n\t\treturn false\n\t}\n\treturn true\n}", "func BenchmarkFNV1a(b *testing.B) {\n\tstr := \"[email protected]\"\n\tbytes := []byte(str)\n\th := fnv.New32a()\n\tfor i := 0; i < b.N; i++ {\n\t\th.Reset()\n\t\th.Write(bytes)\n\t\t_ = h.Sum([]byte{})\n\t}\n}", "func RandomIPv4() string {\n\tfirst := rand.Int31n(256)\n\tsecond := rand.Int31n(256)\n\tthird := rand.Int31n(256)\n\tfourth := rand.Int31n(256)\n\tstr := fmt.Sprintf(\"%d.%d.%d.%d\", first, second, third, fourth)\n\treturn str\n}", "func extractUnicastIPv4Addrs(addrs []net.Addr) []string {\n\tvar ips []string\n\n\tfor _, a := range addrs {\n\t\tvar ip net.IP\n\n\t\tswitch a := a.(type) {\n\t\tcase *net.IPNet:\n\t\t\tip = a.IP\n\t\tcase *net.IPAddr:\n\t\t\tip = a.IP\n\t\t}\n\n\t\tif ip == nil || len(ip.To4()) == 0 {\n\t\t\t// Windows dataplane doesn't support IPv6 yet.\n\t\t\tcontinue\n\t\t}\n\t\tif ip.IsLoopback() {\n\t\t\t// Skip 127.0.0.1.\n\t\t\tcontinue\n\t\t}\n\t\tips = append(ips, ip.String()+\"/32\")\n\t}\n\n\treturn ips\n}", "func isIPv4(s []byte) bool {\n\tfor _, v := range s[4:] {\n\t\tif v != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func inet_aton(ipnr net.IP) int64 {\n\tbits := strings.Split(ipnr.String(), \".\")\n\n\tb0, _ := strconv.Atoi(bits[0])\n\tb1, _ := strconv.Atoi(bits[1])\n\tb2, _ := strconv.Atoi(bits[2])\n\tb3, _ := strconv.Atoi(bits[3])\n\n\tvar sum int64\n\n\tsum += int64(b0) << 24\n\tsum += int64(b1) << 16\n\tsum += int64(b2) << 8\n\tsum += int64(b3)\n\n\treturn sum\n}", "func PublicIpv4ContainsFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldPublicIpv4), v))\n\t})\n}", "func Ipv4EqualFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EqualFold(s.C(FieldIpv4), v))\n\t})\n}", "func Ipv4(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldIpv4), v))\n\t})\n}", "func IPv4(str string) bool {\n\tip := net.ParseIP(str)\n\treturn ip != nil && strings.Contains(str, \".\")\n}", "func (internet Internet) IPv4(v reflect.Value) (interface{}, error) {\n\treturn internet.ipv4(), nil\n}", "func extractIPv4(reverseName string) (string, error) {\n\t// reverse the segments and then combine them\n\tsegments := ReverseArray(strings.Split(reverseName, \".\"))\n\n\tip := net.ParseIP(strings.Join(segments, \".\")).To4()\n\tif ip == nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse IPv4 reverse name: %q\", reverseName)\n\t}\n\treturn ip.String(), nil\n}", "func IncrementIPv4(ip net.IP, inc int) net.IP {\n\tip = ip.To4()\n\tv := binary.BigEndian.Uint32(ip)\n\tif v >= uint32(0) {\n\t\tv = v + uint32(inc)\n\t} else {\n\t\tv = v - uint32(-inc)\n\t}\n\tip = make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, v)\n\treturn ip\n}", "func (ip IP) As4() [4]byte {\n\tif ip.z == z4 || ip.Is4in6() {\n\t\tvar ret [4]byte\n\t\tbinary.BigEndian.PutUint32(ret[:], uint32(ip.lo))\n\t\treturn ret\n\t}\n\tif ip.z == z0 {\n\t\tpanic(\"As4 called on IP zero value\")\n\t}\n\tpanic(\"As4 called on IPv6 address\")\n}", "func ipv6EllipsisIPv4(s string) (n int, ok bool) {\n\tsplit := strings.Split(s, \"::\")\n\tleft := strings.Split(split[0], \":\")\n\tnleft := len(left)\n\tif split[0] == \"\" {\n\t\tnleft = 0\n\t}\n\tnoipv4 := strings.TrimSuffix(strings.TrimRight(split[1], \".0123456789\"), \":\")\n\tright := strings.Split(noipv4, \":\")\n\tnright := len(right)\n\tif noipv4 == \"\" {\n\t\tnright = 0\n\t}\n\n\tif nleft+nright <= 5 {\n\t\treturn len(s), true\n\t}\n\tif nleft+nright <= 7 {\n\t\treturn len(split[0]) + 2 + len(noipv4), true\n\t}\n\tright = right[:7-len(left)]\n\trightlen := len(strings.Join(right, \":\"))\n\treturn len(split[0]) + 2 + rightlen, true\n}", "func stringIPv4(n uint32) string {\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, n)\n\treturn ip.String()\n}", "func Test_RandAddressAt(t *testing.T) {\n\tt.Parallel()\n\n\tbase := swarm.MustParseHexAddress(\"ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c\")\n\tb0 := base.Bytes()\n\thw0 := []byte{b0[0], b0[1], 0, 0} // highest words of base address\n\thw0int := binary.BigEndian.Uint32(hw0)\n\n\tfor bitsInCommon := 0; bitsInCommon < 30; bitsInCommon++ {\n\t\taddr := swarm.RandAddressAt(t, base, bitsInCommon)\n\t\tassertNotZeroAddress(t, addr)\n\n\t\tb1 := addr.Bytes()\n\t\thw1 := []byte{b1[0], b1[1], 0, 0} // highest words of 1\n\t\thw1int := binary.BigEndian.Uint32(hw1)\n\n\t\t//bb0 is the bit mask to AND with hw0 and hw1\n\t\tbb0 := uint32(0)\n\t\tfor i := 0; i < bitsInCommon; i++ {\n\t\t\tbb0 |= (1 << (31 - i))\n\t\t}\n\n\t\tandhw0 := hw0int & bb0\n\t\tandhw1 := hw1int & bb0\n\n\t\t// the result of the AND with both highest words of b0 and b1 should be equal\n\t\tif andhw0 != andhw1 {\n\t\t\tt.Fatalf(\"hw0 %08b hw1 %08b mask %08b &0 %08b &1 %08b\", hw0int, hw1int, bb0, andhw0, andhw1)\n\t\t}\n\t}\n}", "func FilterIPV4(ips []net.IP) []string {\n\tvar ret = make([]string, 0)\n\tfor _, ip := range ips {\n\t\tif ip.To4() != nil {\n\t\t\tret = append(ret, ip.String())\n\t\t}\n\t}\n\treturn ret\n}", "func localIPv4s() ([]string, error) {\n\tvar ips []string\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn ips, err\n\t}\n\n\tfor _, a := range addrs {\n\t\tif ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() && ipnet.IP.To4() != nil {\n\t\t\tips = append(ips, ipnet.IP.String())\n\t\t}\n\t}\n\n\treturn ips, nil\n}", "func (internet *Internet) IPv4Address() string {\n\tvar parts []string\n\tfor i := 0; i < 4; i++ {\n\t\tparts = append(parts, fmt.Sprintf(\"%d\", internet.faker.random.Intn(253)+2))\n\t}\n\treturn strings.Join(parts, \".\")\n}", "func decodeIPv4ToNetIP(ip uint32) net.IP {\n\toBigInt := big.NewInt(0)\n\toBigInt = oBigInt.SetUint64(uint64(ip))\n\treturn IntToIPv4(oBigInt)\n}", "func (ip IP) v4(i uint8) uint8 {\n\treturn uint8(ip.lo >> ((3 - i) * 8))\n}", "func PublicIpv4Contains(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldPublicIpv4), v))\n\t})\n}", "func (i Internet) LocalIpv4() string {\n\tips := make([]string, 0, 4)\n\tips = append(ips, i.Faker.RandomStringElement([]string{\"10\", \"172\", \"192\"}))\n\n\tif ips[0] == \"10\" {\n\t\tfor j := 0; j < 3; j++ {\n\t\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t\t}\n\t}\n\n\tif ips[0] == \"172\" {\n\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(16, 31)))\n\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t\t}\n\t}\n\n\tif ips[0] == \"192\" {\n\t\tips = append(ips, \"168\")\n\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t\t}\n\t}\n\n\treturn strings.Join(ips, \".\")\n}", "func Benchmark_Ctx_IPs(b *testing.B) {\n\tapp := New()\n\tc := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(c)\n\tc.Fasthttp.Request.Header.Set(HeaderXForwardedFor, \"127.0.0.1, 127.0.0.1, 127.0.0.1\")\n\tvar res []string\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tres = c.IPs()\n\t}\n\tutils.AssertEqual(b, []string{\"127.0.0.1\", \"127.0.0.1\", \"127.0.0.1\"}, res)\n}", "func splitRange4(addr uint32, prefix uint, lo, hi uint32, cidrs *[]*net.IPNet) error {\n\tif prefix > 32 {\n\t\treturn fmt.Errorf(\"Invalid mask size: %d\", prefix)\n\t}\n\n\tbc := broadcast4(addr, prefix)\n\tif (lo < addr) || (hi > bc) {\n\t\treturn fmt.Errorf(\"%d, %d out of range for network %d/%d, broadcast %d\", lo, hi, addr, prefix, bc)\n\t}\n\n\tif (lo == addr) && (hi == bc) {\n\t\tcidr := net.IPNet{IP: uint32ToIPV4(addr), Mask: net.CIDRMask(int(prefix), 8*net.IPv4len)}\n\t\t*cidrs = append(*cidrs, &cidr)\n\t\treturn nil\n\t}\n\n\tprefix++\n\tlowerHalf := addr\n\tupperHalf := setBit(addr, prefix, 1)\n\tif hi < upperHalf {\n\t\treturn splitRange4(lowerHalf, prefix, lo, hi, cidrs)\n\t} else if lo >= upperHalf {\n\t\treturn splitRange4(upperHalf, prefix, lo, hi, cidrs)\n\t} else {\n\t\terr := splitRange4(lowerHalf, prefix, lo, broadcast4(lowerHalf, prefix), cidrs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn splitRange4(upperHalf, prefix, upperHalf, hi, cidrs)\n\t}\n}", "func (ds *DataStore) assignPodIPv4AddressUnsafe(k8sPod *k8sapi.K8SPodInfo) (string, int, error) {\n\tpodKey := PodKey{\n\t\tname: k8sPod.Name,\n\t\tnamespace: k8sPod.Namespace,\n\t\tcontainer: k8sPod.Container,\n\t}\n\tcurTime := time.Now()\n\tfor _, nic := range ds.nicIPPools {\n\t\tif (k8sPod.IP == \"\") && (len(nic.IPv4Addresses) == nic.AssignedIPv4Addresses) {\n\t\t\t// skip this NIC, since it has no available IP addresses\n\t\t\tklog.V(2).Infof(\"AssignPodIPv4Address: Skip NIC %s that does not have available addresses\", nic.ID)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, addr := range nic.IPv4Addresses {\n\t\t\tif k8sPod.IP == addr.Address {\n\t\t\t\t// After L-IPAM restart and built IP warm-pool, it needs to take the existing running pod IP out of the pool.\n\t\t\t\tif !addr.Assigned {\n\t\t\t\t\tincrementAssignedCount(ds, nic, addr)\n\t\t\t\t}\n\t\t\t\tklog.V(1).Infof(\"AssignPodIPv4Address: Reassign IP %v to pod (name %s, namespace %s)\",\n\t\t\t\t\taddr.Address, k8sPod.Name, k8sPod.Namespace)\n\t\t\t\tds.podsIP[podKey] = PodIPInfo{IP: addr.Address, DeviceNumber: nic.DeviceNumber}\n\t\t\t\treturn addr.Address, nic.DeviceNumber, nil\n\t\t\t}\n\t\t\tif !addr.Assigned && k8sPod.IP == \"\" && curTime.Sub(addr.UnassignedTime) > addressCoolingPeriod {\n\t\t\t\t// This is triggered by a pod's Add Network command from CNI plugin\n\t\t\t\tincrementAssignedCount(ds, nic, addr)\n\t\t\t\tklog.V(1).Infof(\"AssignPodIPv4Address: Assign IP %v to pod (name %s, namespace %s container %s)\",\n\t\t\t\t\taddr.Address, k8sPod.Name, k8sPod.Namespace, k8sPod.Container)\n\t\t\t\tds.podsIP[podKey] = PodIPInfo{IP: addr.Address, DeviceNumber: nic.DeviceNumber}\n\t\t\t\treturn addr.Address, nic.DeviceNumber, nil\n\t\t\t}\n\t\t}\n\t}\n\tklog.Errorf(\"DataStore has no available IP addresses\")\n\treturn \"\", 0, errors.New(\"assignPodIPv4AddressUnsafe: no available IP addresses\")\n}", "func commonPrefixLen(a, b net.IP) (cpl int) {\n\tif a4 := a.To4(); a4 != nil {\n\t\ta = a4\n\t}\n\tif b4 := b.To4(); b4 != nil {\n\t\tb = b4\n\t}\n\tif len(a) != len(b) {\n\t\treturn 0\n\t}\n\t// If IPv6, only up to the prefix (first 64 bits)\n\tif len(a) > 8 {\n\t\ta = a[:8]\n\t\tb = b[:8]\n\t}\n\tfor len(a) > 0 {\n\t\tif a[0] == b[0] {\n\t\t\tcpl += 8\n\t\t\ta = a[1:]\n\t\t\tb = b[1:]\n\t\t\tcontinue\n\t\t}\n\t\tbits := 8\n\t\tab, bb := a[0], b[0]\n\t\tfor {\n\t\t\tab >>= 1\n\t\t\tbb >>= 1\n\t\t\tbits--\n\t\t\tif ab == bb {\n\t\t\t\tcpl += bits\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func isIPv4(ip net.IP) bool {\n\treturn ip.To4() != nil && strings.Count(ip.String(), \":\") < 2\n}", "func ip(ipnet *net.IPNet, offset uint32) []expr.Any {\n\t// TODO: make it work with ipv6\n\t// TODO: remote the bitwise operation if there's not mask\n\tif ipnet == nil {\n\t\treturn nil\n\t}\n\n\tif ipnet.IP.To4() == nil {\n\t\treturn nil\n\t}\n\n\t// [ payload load 4b @ network header + 12 => reg 1 ]\n\t// [ bitwise reg 1 = (reg=1 & YYY ) ^ 0x00000000 ]\n\t// [ cmp eq reg 1 XXX ]\n\treturn []expr.Any{\n\t\t&expr.Payload{\n\t\t\tOperationType: expr.PayloadLoad,\n\t\t\tDestRegister: 1,\n\t\t\tBase: expr.PayloadBaseNetworkHeader,\n\t\t\tLen: 4,\n\t\t\tOffset: offset,\n\t\t},\n\t\t&expr.Bitwise{\n\t\t\tSourceRegister: 1,\n\t\t\tDestRegister: 1,\n\t\t\tLen: 4,\n\t\t\tMask: ipnet.Mask,\n\t\t\tXor: []byte{0x0, 0x0, 0x0, 0x0},\n\t\t},\n\t\t&expr.Cmp{\n\t\t\tOp: expr.CmpOpEq,\n\t\t\tRegister: 1,\n\t\t\tData: ipnet.IP.To4(),\n\t\t},\n\t}\n}", "func netNtoA(sum uint32) string {\n\tip := make(net.IP, net.IPv4len)\n\tip[0] = byte((sum >> 24) & 0xFF)\n\tip[1] = byte((sum >> 16) & 0xFF)\n\tip[2] = byte((sum >> 8) & 0xFF)\n\tip[3] = byte(sum & 0xFF)\n\treturn ip.String()\n}", "func main() {\n\tif len(os.Args) != 2 {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s host\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\n\taddr, err := net.ResolveIPAddr(\"ip\", os.Args[1])\n\tif err != nil {\n\t\tfmt.Println(\"Resolution error: \", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tconn, err := net.DialIP(\"ip4:icmp\", addr, addr)\n\tcheckError(err)\n\n\tvar msg [512]byte\n\tmsg[0] = 8 // echo\n\n\tmsg[1] = 0\n\n\tmsg[2] = 0\n\tmsg[3] = 0\n\n\tmsg[4] = 0\n\tmsg[5] = 13\n\n\tmsg[6] = 0\n\tmsg[7] = 37\n\n\tlen := 8\n\n\tcheck := checkSum(msg[0:len])\n\tmsg[2] = byte(check >> 8)\n\tmsg[3] = byte(check & 255)\n\t_, err = conn.Write(msg[0:len])\n\tcheckError(err)\n\t_, err = conn.Read(msg[0:])\n\tcheckError(err)\n\n\tfmt.Println(\"got response\")\n\n\tif msg[5] == 13 {\n\t\tfmt.Println(\"indentifier matches\")\n\t}\n\n\tif msg[7] == 37 {\n\t\tfmt.Println(\"sequence matches\")\n\t}\n\n\tos.Exit(0)\n}", "func broadcast4(addr uint32, prefix uint) uint32 {\n\treturn addr | ^netmask(prefix)\n}", "func sendPacket(sourceIP string, destinationIP string, size int, message string, appID int, chanID int, icmpType layers.ICMPv4TypeCode) []byte {\n\n\tvar payloadSize int\n\tif size < 28 {\n\t\t//Unable to create smaller packets.\n\t\tpayloadSize = 0\n\t} else {\n\t\tpayloadSize = size - 28\n\t}\n\n\t//Convert IP to 4bit representation\n\tsrcIP := net.ParseIP(sourceIP).To4()\n\tdstIP := net.ParseIP(destinationIP).To4()\n\n\t//IP Layer\n\tip := layers.IPv4{\n\t\tSrcIP: srcIP,\n\t\tDstIP: dstIP,\n\t\tVersion: 4,\n\t\tTTL: 64,\n\t\tProtocol: layers.IPProtocolICMPv4,\n\t}\n\n\ticmp := layers.ICMPv4{\n\t\tTypeCode: icmpType,\n\t}\n\n\topts := gopacket.SerializeOptions{\n\t\tFixLengths: true,\n\t\tComputeChecksums: true,\n\t}\n\n\tipHeaderBuf := gopacket.NewSerializeBuffer()\n\n\terr := ip.SerializeTo(ipHeaderBuf, opts)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t//Set \"Don't Fragment\"-Flag in Header\n\tipHeader, err := ipv4.ParseHeader(ipHeaderBuf.Bytes())\n\tipHeader.Flags |= ipv4.DontFragment\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpayloadBuf := gopacket.NewSerializeBuffer()\n\n\t//Influence the payload size\n\tpayload := gopacket.Payload(generatePayload(payloadSize, \",\"+strconv.Itoa(appID)+\",\"+strconv.Itoa(chanID)+\",\"+message+\",\"))\n\terr = gopacket.SerializeLayers(payloadBuf, opts, &icmp, payload)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t//Send packet\n\tvar packetConn net.PacketConn\n\tvar rawConn *ipv4.RawConn\n\n\tpacketConn, err = net.ListenPacket(\"ip4:icmp\", srcIP.String())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trawConn, err = ipv4.NewRawConn(packetConn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = rawConn.WriteTo(ipHeader, payloadBuf.Bytes(), nil)\n\n\treturn append(ipHeaderBuf.Bytes(), payloadBuf.Bytes()...)\n}", "func ipv4toCinaddr(ip net.IP) C.in_addr_t {\n\t//TODO: pass a pointer of struct in_addr instead of uint32 to backend.\n\t// don't use LittleEndian.\n\tip = ip.To4()\n\tif ip == nil {\n\t\treturn 0\n\t}\n\treturn C.in_addr_t(binary.LittleEndian.Uint32(ip))\n}", "func IsIPv4(dots string) bool {\n\tip := net.ParseIP(dots)\n\tif ip == nil {\n\t\treturn false\n\t}\n\treturn ip.To4() != nil\n}", "func byteReverseIP4(ip net.IP) (revip revIP4) {\n\n\tfor j := 0; j < len(ip); j++ {\n\t\trevip.Byte[len(ip)-j-1] = ip[j]\n\t\trevip.String = fmt.Sprintf(\"%d.%s\", ip[j], revip.String)\n\t}\n\n\trevip.String = strings.TrimRight(revip.String, \".\")\n\n\treturn\n}", "func id_generate_4(ip string, port string) string {\n\tips := strings.Split(ip, \".\")\n\n\tif len(ips) != 4 {\n\t\tfmt.Println(DHT_PREFIX+\"Malformed IPv4\")\n\t\treturn \"0\"\n\t}\n\n\tid := \"\"\n\n\t// fill 0's if not an three digits number in each part of IPs\n\tfor _,v := range ips {\n\t\tid = id + strings.Repeat(\"0\", 3-len(v)) + v\n\t}\n\n\treturn id+port\n}", "func EnsureIPv4(ipOrHost string) (string, error) {\n\tip := net.ParseIP(ipOrHost)\n\tif ip != nil {\n\t\tif ip.To4() == nil {\n\t\t\treturn \"\", fmt.Errorf(\"%s is IPv6 address\", ipOrHost)\n\t\t}\n\t\treturn ipOrHost, nil\n\t}\n\taddrs, err := net.LookupHost(ipOrHost)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tips := make([]string, 0)\n\tfor _, addr := range addrs {\n\t\tif ip := net.ParseIP(addr); ip != nil {\n\t\t\tif ip.To4() == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tips = append(ips, addr)\n\t\t}\n\t}\n\tif len(ips) == 0 {\n\t\treturn \"\", errors.New(\"no IPv4 address found\")\n\t}\n\trand.Seed(time.Now().UnixNano())\n\treturn ips[rand.Intn(len(ips))], nil\n}", "func Ping4(addr string) Node {\n\treturn &ping4{addr: addr}\n}", "func mainFunction(start, end string, count int) ([]string, error) {\n\tallAddr := make([]string, count)\n\tsplitStartAddr := strings.Split(start, \".\")\n\ta, err := strconv.Atoi(splitStartAddr[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := strconv.Atoi(splitStartAddr[1])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := strconv.Atoi(splitStartAddr[2])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td, err := strconv.Atoi(splitStartAddr[3])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i:=0; i<count; i++ {\n\t\tif d > 255 {\n\t\t\td = 0\n\t\t\tc++\n\t\t}\n\t\tif c > 255 {\n\t\t\tc = 0\n\t\t\tb++\n\t\t}\n\t\tif b > 255 {\n\t\t\tb = 0\n\t\t\ta++\n\t\t}\n\t\tif a > 255 {\n\t\t\treturn nil, errors.New(\"ip address is not exist\")\n\t\t}\n\n\t\ttemp := make([]string, 4)\n\t\ttemp[0] = strconv.Itoa(a)\n\t\ttemp[1] = strconv.Itoa(b)\n\t\ttemp[2] = strconv.Itoa(c)\n\t\ttemp[3] = strconv.Itoa(d)\n\n\t\tallAddr[i] = strings.Join(temp, \".\")\n\t\td++\n\t}\n\tcount--\n\tfmt.Println(\"test: \")\n\tfmt.Println(allAddr)\n\tif allAddr[count] != end {\n\t\treturn nil, errors.New(\"fail\")\n\t}\n\n\treturn allAddr, nil\n}", "func pad4(n uint16) int {\n\treturn -int(n) & 3\n}", "func mask4(n uint8) uint32 {\n\treturn ^uint32(0) << (32 - n)\n}", "func isIPv4(s string) bool {\n\tip := netutils.ParseIPSloppy(s)\n\treturn ip != nil && strings.Contains(s, \".\")\n}", "func ipv4ToUInt32(ip net.IP) uint32 {\n\treturn binary.BigEndian.Uint32(ip)\n}", "func Test_validIPAddress(t *testing.T) {\n\tcases := []struct {\n\t\tname, input, expected string\n\t}{\n\t\t{\"x1\", \"172.16.254.1\", \"IPv4\"},\n\t\t{\"x2\", \"2001:0db8:85a3:0:0:8A2E:0370:7334\", \"IPv6\"},\n\t\t{\"x3\", \"256.256.256.256\", \"Neither\"},\n\t}\n\n\tfor _, tt := range cases {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif output := validIPAddress(tt.input); output != tt.expected {\n\t\t\t\tt.Errorf(\"validIPAddress(%s)=%s, expected=%s\", tt.input, output, tt.expected)\n\t\t\t}\n\t\t})\n\t}\n}", "func GetIPv4(c *fluent.GRIBIClient, wantACK fluent.ProgrammingResult, t testing.TB, _ ...TestOpt) {\n\tops := []func(){\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.NextHopEntry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithIndex(1).\n\t\t\t\t\tWithIPAddress(\"1.1.1.1\"))\n\t\t},\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.NextHopGroupEntry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithID(1).\n\t\t\t\t\tAddNextHop(1, 1))\n\t\t},\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.IPv4Entry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithNextHopGroup(1).\n\t\t\t\t\tWithPrefix(\"42.42.42.42/32\"))\n\t\t},\n\t}\n\n\tres := doModifyOps(c, t, ops, wantACK, false)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithNextHopOperation(1).\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithNextHopGroupOperation(1).\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithIPv4Operation(\"42.42.42.42/32\").\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tctx := context.Background()\n\tc.Start(ctx, t)\n\tdefer c.Stop(t)\n\tgr, err := c.Get().\n\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\tWithAFT(fluent.IPv4).\n\t\tSend()\n\n\tif err != nil {\n\t\tt.Fatalf(\"got unexpected error from get, got: %v\", err)\n\t}\n\n\tchk.GetResponseHasEntries(t, gr,\n\t\tfluent.IPv4Entry().\n\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\tWithNextHopGroup(1).\n\t\t\tWithPrefix(\"42.42.42.42/32\"),\n\t)\n}", "func PublicIpv4(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPublicIpv4), v))\n\t})\n}", "func swizzle(tls *libc.TLS, in uint32, limit uint32) uint32 { /* speedtest1.c:286:10: */\n\tvar out uint32 = uint32(0)\n\tfor limit != 0 {\n\t\tout = ((out << 1) | (in & uint32(1)))\n\t\tin >>= 1\n\t\tlimit >>= 1\n\t}\n\treturn out\n}", "func Ipv4ContainsFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldIpv4), v))\n\t})\n}", "func MakeV4PrefixFromNetwork(ip string) string {\n\tparts := strings.Split(ip, \".\")\n\treturn fmt.Sprintf(\"%s.%s.%s.\", parts[0], parts[1], parts[2])\n}", "func PublicIpv4GTE(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldPublicIpv4), v))\n\t})\n}", "func IPv4() (string, error) {\n\tconn, err := net.Dial(\"udp\", \"8.8.8.8:80\")\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Failed to determine your IP\")\n\t}\n\tlocalAddr := conn.LocalAddr().(*net.UDPAddr)\n\tmyIP := localAddr.IP.String()\n\tconn.Close()\n\treturn myIP, nil\n}", "func CheckIPv4Addr(addr string) error {\n\tif IsEmptyString(&addr) {\n\t\treturn errors.New(\"addr is empty\")\n\t}\n\n\tstrArray := strings.Split(addr, \":\")\n\n\tif len(strArray) != 2 {\n\t\treturn errors.New(\"Invalid addr:\" + addr)\n\t}\n\n\tif IsEmptyString(&strArray[0]) {\n\t\treturn errors.New(\"Invalid addr:\" + addr)\n\t}\n\n\tif IsEmptyString(&strArray[1]) {\n\t\treturn errors.New(\"Invalid addr:\" + addr)\n\t}\n\n\tvar error error\n\n\tipv4 := strArray[0]\n\terror = CheckIPv4(ipv4)\n\tif error != nil {\n\t\treturn error\n\t}\n\n\tvar port int64\n\tport, error = strconv.ParseInt(strArray[1], 10, 64)\n\tif error != nil {\n\t\treturn error\n\t}\n\n\terror = CheckPort(port)\n\tif error != nil {\n\t\treturn error\n\t}\n\n\treturn nil\n}", "func getAddrDelay(addrs []ma.Multiaddr, tcpDelay time.Duration, quicDelay time.Duration,\n\toffset time.Duration) []network.AddrDelay {\n\n\tsort.Slice(addrs, func(i, j int) bool { return score(addrs[i]) < score(addrs[j]) })\n\n\t// If the first address is (QUIC, IPv6), make the second address (QUIC, IPv4).\n\thappyEyeballs := false\n\tif len(addrs) > 0 {\n\t\tif isQUICAddr(addrs[0]) && isProtocolAddr(addrs[0], ma.P_IP6) {\n\t\t\tfor i := 1; i < len(addrs); i++ {\n\t\t\t\tif isQUICAddr(addrs[i]) && isProtocolAddr(addrs[i], ma.P_IP4) {\n\t\t\t\t\t// make IPv4 address the second element\n\t\t\t\t\tif i > 1 {\n\t\t\t\t\t\ta := addrs[i]\n\t\t\t\t\t\tcopy(addrs[2:], addrs[1:i])\n\t\t\t\t\t\taddrs[1] = a\n\t\t\t\t\t}\n\t\t\t\t\thappyEyeballs = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tres := make([]network.AddrDelay, 0, len(addrs))\n\n\tvar totalTCPDelay time.Duration\n\tfor i, addr := range addrs {\n\t\tvar delay time.Duration\n\t\tswitch {\n\t\tcase isQUICAddr(addr):\n\t\t\t// For QUIC addresses we dial an IPv6 address, then after quicDelay an IPv4\n\t\t\t// address, then after quicDelay we dial rest of the addresses.\n\t\t\tif i == 1 {\n\t\t\t\tdelay = quicDelay\n\t\t\t}\n\t\t\tif i > 1 && happyEyeballs {\n\t\t\t\tdelay = 2 * quicDelay\n\t\t\t} else if i > 1 {\n\t\t\t\tdelay = quicDelay\n\t\t\t}\n\t\t\ttotalTCPDelay = delay + tcpDelay\n\t\tcase isProtocolAddr(addr, ma.P_TCP):\n\t\t\tdelay = totalTCPDelay\n\t\t}\n\t\tres = append(res, network.AddrDelay{Addr: addr, Delay: offset + delay})\n\t}\n\treturn res\n}", "func GetIPv4Chain(c *fluent.GRIBIClient, wantACK fluent.ProgrammingResult, t testing.TB, _ ...TestOpt) {\n\tops := []func(){\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.NextHopEntry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithIndex(1).\n\t\t\t\t\tWithIPAddress(\"1.1.1.1\"))\n\t\t},\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.NextHopGroupEntry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithID(1).\n\t\t\t\t\tAddNextHop(1, 1))\n\t\t},\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.IPv4Entry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithNextHopGroup(1).\n\t\t\t\t\tWithPrefix(\"42.42.42.42/32\"))\n\t\t},\n\t}\n\n\tres := doModifyOps(c, t, ops, wantACK, false)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithNextHopOperation(1).\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithNextHopGroupOperation(1).\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithIPv4Operation(\"42.42.42.42/32\").\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tctx := context.Background()\n\tc.Start(ctx, t)\n\tdefer c.Stop(t)\n\tgr, err := c.Get().\n\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\tWithAFT(fluent.AllAFTs).\n\t\tSend()\n\n\tif err != nil {\n\t\tt.Fatalf(\"got unexpected error from get, got: %v\", err)\n\t}\n\n\tchk.GetResponseHasEntries(t, gr,\n\t\tfluent.IPv4Entry().\n\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\tWithNextHopGroup(1).\n\t\t\tWithPrefix(\"42.42.42.42/32\"),\n\t\tfluent.NextHopGroupEntry().\n\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\tWithID(1).\n\t\t\tAddNextHop(1, 1),\n\t\tfluent.NextHopEntry().\n\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\tWithIndex(1).\n\t\t\tWithIPAddress(\"1.1.1.1\"),\n\t)\n}", "func GetIPv4Addr(ifAdds map[string]string, ipAdds *map[string]string) {\n\ttempAdds := *ipAdds\n\tfor k, v := range ifAdds {\n\t\tif runtime.GOOS == \"linux\" {\n\t\t\tif (strings.HasPrefix(k, \"local\") || strings.HasPrefix(k, \"en\") || strings.HasPrefix(k, \"eth\")) && strings.HasSuffix(k, \"_0\") {\n\t\t\t\ttempAdds[\"eth_ipv4\"] = v\n\t\t\t} else if strings.HasPrefix(k, \"wireless\") && strings.HasSuffix(k, \"_0\") {\n\t\t\t\ttempAdds[\"wireless_ipv4\"] = v\n\t\t\t}\n\t\t} else {\n\t\t\tif (strings.HasPrefix(k, \"local\") || strings.HasPrefix(k, \"en\") || strings.HasPrefix(k, \"eth\")) && strings.HasSuffix(k, \"_1\") {\n\t\t\t\ttempAdds[\"eth_ipv4\"] = v\n\t\t\t} else if strings.HasPrefix(k, \"wireless\") && strings.HasSuffix(k, \"_1\") {\n\t\t\t\ttempAdds[\"wireless_ipv4\"] = v\n\t\t\t}\n\t\t}\n\n\t}\n}", "func IsIPv4(s string) bool {\n\tvar p [IPv4len]byte\n\tfor i := 0; i < IPv4len; i++ {\n\t\tif len(s) == 0 {\n\t\t\t// Missing octets.\n\t\t\treturn false\n\t\t}\n\t\tif i > 0 {\n\t\t\tif s[0] != '.' {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\ts = s[1:]\n\t\t}\n\t\tn, c, ok := dtoi(s)\n\t\tif !ok || n > 0xFF {\n\t\t\treturn false\n\t\t}\n\t\ts = s[c:]\n\t\tp[i] = byte(n)\n\t}\n\tif len(s) != 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func CalculateChecksum(p *packet.Packet) {\n\tif p.Ether.EtherType == packet.SwapBytesUint16(common.IPV4Number) {\n\t\tpIPv4 := p.GetIPv4()\n\t\tpIPv4.HdrChecksum = packet.SwapBytesUint16(packet.CalculateIPv4Checksum(pIPv4))\n\n\t\tif pIPv4.NextProtoID == common.UDPNumber {\n\t\t\tpUDP := p.GetUDPForIPv4()\n\t\t\tpUDP.DgramCksum = packet.SwapBytesUint16(packet.CalculateIPv4UDPChecksum(pIPv4, pUDP, p.Data))\n\t\t} else if pIPv4.NextProtoID == common.TCPNumber {\n\t\t\tpTCP := p.GetTCPForIPv4()\n\t\t\tpTCP.Cksum = packet.SwapBytesUint16(packet.CalculateIPv4TCPChecksum(pIPv4, pTCP, p.Data))\n\t\t} else {\n\t\t\tprintln(\"Unknown IPv4 protocol number\", pIPv4.NextProtoID)\n\t\t\tprintln(\"TEST FAILED\")\n\t\t}\n\t} else if packet.SwapBytesUint16(p.Ether.EtherType) == common.IPV6Number {\n\t\tpIPv6 := p.GetIPv6()\n\t\tif pIPv6.Proto == common.UDPNumber {\n\t\t\tpUDP := p.GetUDPForIPv6()\n\t\t\tpUDP.DgramCksum = packet.SwapBytesUint16(packet.CalculateIPv6UDPChecksum(pIPv6, pUDP, p.Data))\n\t\t} else if pIPv6.Proto == common.TCPNumber {\n\t\t\tpTCP := p.GetTCPForIPv6()\n\t\t\tpTCP.Cksum = packet.SwapBytesUint16(packet.CalculateIPv6TCPChecksum(pIPv6, pTCP, p.Data))\n\t\t} else {\n\t\t\tprintln(\"Unknown IPv6 protocol number\", pIPv6.Proto)\n\t\t\tprintln(\"TEST FAILED\")\n\t\t}\n\t} else {\n\t\tprintln(\"Unknown packet EtherType\", p.Ether.EtherType)\n\t\tprintln(\"TEST FAILED\")\n\t}\n}", "func IPv4(name string) (string, error) {\n\ti, err := net.InterfaceByName(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taddrs, err := i.Addrs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, a := range addrs {\n\t\tif ipn, ok := a.(*net.IPNet); ok {\n\t\t\tif ipn.IP.To4() != nil {\n\t\t\t\treturn ipn.IP.String(), nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"no IPv4 found for interface: %q\", name)\n}", "func PublicIpv4EQ(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPublicIpv4), v))\n\t})\n}", "func (client *MockClient) generatePodIPv4() string {\n\tclient.ipCounter++\n\treturn fmt.Sprintf(\"1.1.%d.%d\", client.ipCounter/256, client.ipCounter%256)\n}", "func isIPv4(fl FieldLevel) bool {\n\tip := net.ParseIP(fl.Field().String())\n\n\treturn ip != nil && ip.To4() != nil\n}", "func ipv4Touint32(ip net.IP) uint32 {\n\tif len(ip) == 16 {\n\t\treturn binary.BigEndian.Uint32(ip[12:16])\n\t}\n\treturn binary.BigEndian.Uint32(ip)\n}", "func inet_ntoa(ipnr int64) net.IP {\n\tvar bytes [4]byte\n\tbytes[0] = byte(ipnr & 0xFF)\n\tbytes[1] = byte((ipnr >> 8) & 0xFF)\n\tbytes[2] = byte((ipnr >> 16) & 0xFF)\n\tbytes[3] = byte((ipnr >> 24) & 0xFF)\n\n\treturn net.IPv4(bytes[3], bytes[2], bytes[1], bytes[0])\n}", "func Ipv4Contains(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldIpv4), v))\n\t})\n}", "func IPv4FragmentHash(h header.IPv4) uint32 {\n\tx := uint32(h.ID())<<16 | uint32(h.Protocol())\n\tt := h.SourceAddress().As4()\n\ty := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24\n\tt = h.DestinationAddress().As4()\n\tz := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24\n\treturn Hash3Words(x, y, z, hashIV)\n}", "func main() {\n\tvalidIPAddress(\"1.0.1.\")\n}", "func BenchmarkAdler32(b *testing.B) {\n\tstr := \"[email protected]\"\n\tbytes := []byte(str)\n\tfor i := 0; i < b.N; i++ {\n\t\t_ = adler32.Checksum(bytes)\n\t}\n}", "func pad4(n int) int {\n\treturn n + ((4 - n) & 3)\n}", "func Test(t *testing.T) {\n\tC4A := make([]byte, 524288/8)\n\tfor i := range C4A {\n\t\tC4A[i] = byte(i)\n\t}\n\n\texamples := []struct {\n\t\tKey []byte\n\t\tNonce []byte\n\t\tData []byte\n\t\tPlainText []byte\n\t\tCipherText []byte\n\t\tTagLen int\n\t}{\n\t\t{ // C.1\n\t\t\t[]byte{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f},\n\t\t\t[]byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16},\n\t\t\t[]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07},\n\t\t\t[]byte{0x20, 0x21, 0x22, 0x23},\n\t\t\t[]byte{0x71, 0x62, 0x01, 0x5b, 0x4d, 0xac, 0x25, 0x5d},\n\t\t\t4,\n\t\t},\n\t\t{ // C.2\n\t\t\t[]byte{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f},\n\t\t\t[]byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17},\n\t\t\t[]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},\n\t\t\t[]byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f},\n\t\t\t[]byte{0xd2, 0xa1, 0xf0, 0xe0, 0x51, 0xea, 0x5f, 0x62, 0x08, 0x1a, 0x77, 0x92, 0x07, 0x3d, 0x59, 0x3d, 0x1f, 0xc6, 0x4f, 0xbf, 0xac, 0xcd},\n\t\t\t6,\n\t\t},\n\t\t{ // C.3\n\t\t\t[]byte{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f},\n\t\t\t[]byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b},\n\t\t\t[]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},\n\t\t\t[]byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37},\n\n\t\t\t[]byte{0xe3, 0xb2, 0x01, 0xa9, 0xf5, 0xb7, 0x1a, 0x7a, 0x9b, 0x1c, 0xea, 0xec, 0xcd, 0x97, 0xe7, 0x0b, 0x61, 0x76, 0xaa, 0xd9, 0xa4, 0x42, 0x8a, 0xa5, 0x48, 0x43, 0x92, 0xfb, 0xc1, 0xb0, 0x99, 0x51},\n\t\t\t8,\n\t\t},\n\t\t{ // C.4\n\t\t\t[]byte{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f},\n\t\t\t[]byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c},\n\t\t\tC4A,\n\t\t\t[]byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f},\n\n\t\t\t[]byte{0x69, 0x91, 0x5d, 0xad, 0x1e, 0x84, 0xc6, 0x37, 0x6a, 0x68, 0xc2, 0x96, 0x7e, 0x4d, 0xab, 0x61, 0x5a, 0xe0, 0xfd, 0x1f, 0xae, 0xc4, 0x4c, 0xc4, 0x84, 0x82, 0x85, 0x29, 0x46, 0x3c, 0xcf, 0x72, 0xb4, 0xac, 0x6b, 0xec, 0x93, 0xe8, 0x59, 0x8e, 0x7f, 0x0d, 0xad, 0xbc, 0xea, 0x5b},\n\t\t\t14,\n\t\t},\n\t}\n\n\tfor _, ex := range examples {\n\t\tc, err := aes.NewCipher(ex.Key)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tccm, err := NewCCMWithNonceAndTagSizes(c, len(ex.Nonce), ex.TagLen)\n\t\tif err != nil {\n\t\t\tt.Log(err)\n\t\t}\n\n\t\tCipherText := ccm.Seal(nil, ex.Nonce, ex.PlainText, ex.Data)\n\n\t\tif !bytes.Equal(ex.CipherText, CipherText) {\n\t\t\tt.Log(err)\n\t\t}\n\n\t\tPlainText, err := ccm.Open(nil, ex.Nonce, ex.CipherText, ex.Data)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !bytes.Equal(ex.PlainText, PlainText) {\n\t\t\tt.Log(err)\n\t\t}\n\t}\n}", "func PublicIpv4HasSuffix(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldPublicIpv4), v))\n\t})\n}", "func isIP4AddrResolvable(fl FieldLevel) bool {\n\tif !isIPv4(fl) {\n\t\treturn false\n\t}\n\n\t_, err := net.ResolveIPAddr(\"ip4\", fl.Field().String())\n\n\treturn err == nil\n}", "func ipFrom16Match(ip IP, a [16]byte) IP {\n\tif ip.Is6() {\n\t\treturn IPv6Raw(a) // doesn't unwrap\n\t}\n\treturn IPFrom16(a)\n}", "func uint32ToIPV4(addr uint32) net.IP {\n\tip := make([]byte, net.IPv4len)\n\tbinary.BigEndian.PutUint32(ip, addr)\n\treturn ip\n}", "func isCIDRv4(fl FieldLevel) bool {\n\tip, _, err := net.ParseCIDR(fl.Field().String())\n\n\treturn err == nil && ip.To4() != nil\n}", "func isPureIPv4Address(ipString string) bool {\n\tfor i := 0; i < len(ipString); i++ {\n\t\tswitch ipString[i] {\n\t\tcase '.':\n\t\t\treturn true\n\t\tcase ':':\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}", "func InetNtoA(sum uint32) string {\n\tip := make(net.IP, net.IPv4len)\n\tip[0] = byte((sum >> 24) & 0xFF)\n\tip[1] = byte((sum >> 16) & 0xFF)\n\tip[2] = byte((sum >> 8) & 0xFF)\n\tip[3] = byte(sum & 0xFF)\n\treturn ip.String()\n}", "func IsIPv4(ip net.IP) bool {\n\treturn (len(ip) == net.IPv4len) || (len(ip) == net.IPv6len &&\n\t\tip[0] == 0x00 &&\n\t\tip[1] == 0x00 &&\n\t\tip[2] == 0x00 &&\n\t\tip[3] == 0x00 &&\n\t\tip[4] == 0x00 &&\n\t\tip[5] == 0x00 &&\n\t\tip[6] == 0x00 &&\n\t\tip[7] == 0x00 &&\n\t\tip[8] == 0x00 &&\n\t\tip[9] == 0x00 &&\n\t\tip[10] == 0xff &&\n\t\tip[11] == 0xff)\n}", "func Test4Send(t *testing.T) {\n\tt.Parallel()\n\t//TestListener.TestListener()\n\taddrs, err := net.LookupHost(\"www.google.com\")\n\tif err != nil {\n\t\tt.Log(\"Test4Send: LookupHost err.Error(): \", err.Error())\n\t\tt.Fail()\n\t}\n\n\ttest, err := newProbe(\"\", addrs[0], 2003, 80)\n\ttest.Timeout = 3 * time.Second\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Fatal error: \", err.Error())\n\t\tt.Fatalf(\"Fatal error: %s\", err.Error())\n\t}\n\n\terr = test.Send()\n\tif err != nil {\n\t\tt.Log(\"Test4Send: test failed to run\")\n\t\tt.Fail()\n\t}\n\tif test.Result.Raw != nil {\n\t\tt.Log(\"test.Result != nil, expected == nil\")\n\t\tt.Fail()\n\t}\n\n}", "func ipSpan(ipnet *net.IPNet) (minIP, maxIP uint32) {\n\tminIP = binary.BigEndian.Uint32(ipnet.IP.To4()) + 1\n\tmaxIP = minIP + (^binary.BigEndian.Uint32(ipnet.Mask)) - 2\n\treturn minIP, maxIP\n}" ]
[ "0.6648894", "0.6117457", "0.5954083", "0.56230605", "0.5603974", "0.5562972", "0.5555366", "0.5479856", "0.5454979", "0.5426456", "0.5424627", "0.5422614", "0.5397122", "0.5388935", "0.53806365", "0.5374257", "0.53436697", "0.5332875", "0.5310846", "0.52789986", "0.5262892", "0.5196923", "0.51791334", "0.5175691", "0.5153823", "0.5151748", "0.5097216", "0.5086516", "0.50784266", "0.5078402", "0.50739026", "0.50691724", "0.506576", "0.50590354", "0.5057168", "0.50514215", "0.5039639", "0.50076926", "0.49984765", "0.49858633", "0.49780807", "0.49755755", "0.4968437", "0.49616435", "0.49607173", "0.49548584", "0.4938768", "0.48869458", "0.4883283", "0.48711404", "0.48623526", "0.4848261", "0.48466688", "0.4838842", "0.48383033", "0.48359495", "0.48356715", "0.48317087", "0.48267674", "0.48240682", "0.48224047", "0.48163953", "0.48126948", "0.48079485", "0.479595", "0.47878528", "0.4786536", "0.47751364", "0.47697482", "0.4764119", "0.4756433", "0.47503695", "0.47499302", "0.47477153", "0.47446483", "0.47411886", "0.47398323", "0.4739035", "0.47358063", "0.47304192", "0.47245574", "0.47245216", "0.47203317", "0.47094685", "0.47039464", "0.47035754", "0.46982542", "0.4694112", "0.46937802", "0.46923393", "0.46907997", "0.4689982", "0.46870035", "0.46824935", "0.46739894", "0.46715423", "0.46690935", "0.46658522", "0.4664619", "0.46639755", "0.4661137" ]
0.0
-1
package function to get a shared PSRInstance
func psrInstance() (*PSRTracker, error) { if sharedInstance == nil { return nil, fmt.Errorf("Missing psrInstance singleton") } return sharedInstance, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *provider) getInstance(ctn *container) (reflect.Value, error) {\n\tif p.scope == Singleton {\n\t\tvar err error\n\t\tp.once.Do(func() {\n\t\t\tp.instance, err = p.call(ctn)\n\t\t})\n\t\treturn p.instance, err\n\t} else {\n\t\treturn p.call(ctn)\n\t}\n}", "func Get() *Service { return singleton }", "func (c *serviceClass) getSingleton(ns, name string) (*kapi.Service, error) {\n\treturn c.rk.clientset.Core().Services(ns).Get(name, meta.GetOptions{})\n}", "func (c *replicaSetClass) getSingleton(ns, name string) (*appsv1.ReplicaSet, error) {\n\treturn c.rk.clientset.Apps().ReplicaSets(ns).Get(name, metav1.GetOptions{})\n}", "func (this *DefaultHandler) GetInstance(xesRedis redo.XesRedisBase) (instance string) {\n\tconf := this.getKeyInfo(xesRedis)\n\tRedisConfMap := core.GetRedisConf()\n\n\tif xesRedis.GetCtx() != nil {\n\t\tif val := xesRedis.GetCtx().Value(\"CacheRemember\"); val != nil && cast.ToBool(val) == true {\n\t\t\treturn this.getShardingKeyInConf(xesRedis, \"localredis.num\", \"localredis\")\n\t\t}\n\t}\n\tif xesRedis.GetKey() == xesRedis.GetKeyName() {\n\t\treturn \"cache\"\n\t}\n\n\t//回放的时候是否有指定的redis连接\n\tusePika := false\n\tif xesRedis.GetCtx() != nil {\n\t\tif IS_PLAYBACK := xesRedis.GetCtx().Value(\"IS_PLAYBACK\"); IS_PLAYBACK != nil {\n\t\t\tif val, ok := IS_PLAYBACK.(string); ok {\n\t\t\t\tif val == \"1\" {\n\t\t\t\t\tusePika = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif _, ok := conf[\"playbackconnection\"]; ok && usePika {\n\t\tlogger.Dx(xesRedis.GetCtx(), \"[getInstance]\", \"usepika IS_PLAYBACK:%s,keyInfo:%v\", xesRedis.GetCtx().Value(\"IS_PLAYBACK\"), conf)\n\t\txesRedis.SetInstanceIP(confutil.GetConf(\"Redis\", cast.ToString(conf[\"playbackconnection\"])))\n\t\tinstance = cast.ToString(conf[\"playbackconnection\"])\n\t\tif instance == \"playbackpika\" {\n\t\t\tinstance = this.getFNVShardingKeyInConf(xesRedis, \"playbackpika.num\", instance)\n\t\t}\n\t\treturn\n\t}\n\tif sharding, ok := conf[\"sharding\"].(string); ok && sharding != \"\" {\n\t\treturn this.getShardingKeyInConf(xesRedis, \"shareding.num\", \"shareding\")\n\t}\n\t//If there is already an available link, reuse the original link.\n\tif connection, ok := conf[\"connection\"]; ok {\n\t\tif conn, ok := connection.(string); ok {\n\t\t\txesRedis.SetInstanceIP(RedisConfMap[conn])\n\t\t\treturn conn\n\t\t} else {\n\t\t\txesRedis.SetInstanceIP(\"\")\n\t\t\treturn \"\"\n\t\t}\n\t}\n\t//set redis server address into redis client.\n\txesRedis.SetInstanceIP(RedisConfMap[\"cache\"])\n\treturn \"cache\"\n}", "func (c *replicaSetClass) getSingleton(ns, name string) (*kext.ReplicaSet, error) {\n\treturn c.rk.clientset.Extensions().ReplicaSets(ns).Get(name, meta.GetOptions{})\n}", "func asSingleton(provider internalProvider) *singletonWrapper {\n\treturn &singletonWrapper{internalProvider: provider}\n}", "func getInstance() *Instance {\n\tstdOnce.Do(func() {\n\t\tstd, _ = NewEx(&Config{\n\t\t\tDisableAutoSaveHistory: true,\n\t\t})\n\t})\n\treturn std\n}", "func (px *Paxos) getInstance(seq int) Instance {\n px.mu.Lock()\n defer px.mu.Unlock()\n\n if _, ok := px.Instances[seq]; !ok {\n px.Instances[seq] = Instance { NullProposal(), \n NullProposal(), \n nil, \n Pending }\n if seq > px.maxSeq {\n px.maxSeq = seq\n }\n }\n\n px.freeMemory()\n\n return px.Instances[seq]\n}", "func (source SourceConfig) getInstance() sources.Source {\n\tswitch source.Type {\n\tcase SOURCE_BIRDWATCHER:\n\t\treturn birdwatcher.NewBirdwatcher(source.Birdwatcher)\n\t}\n\n\treturn nil\n}", "func SessHelperInst() *SessHelper {\n\tgSessHelperOnce.Do(func() {\n\t\tgSessHelper = &SessHelper{}\n\t\tgSessHelper.SetRedis(dbs.RedisInst().Client(\"default\"))\n\t\txlog.Info(\"init => SessHelper : ok\")\n\t})\n\treturn gSessHelper\n}", "func GetInstance() *GlobalContainer {\n\treturn globalContainerInstance\n}", "func GetInstance() *OddSingleton {\n\tvar singleton *OddSingleton\n\t(&lck).Lock()\n\tdefer (&lck).Unlock()\n\n\tif instances[nextInstance] == nil {\n\t\tinstances[nextInstance] = &OddSingleton{serialNumber: nextInstance + 1}\n\t}\n\tsingleton = instances[nextInstance]\n\tnextInstance = (nextInstance + 1) % 2\n\treturn singleton\n}", "func NewShared() *Minerva {\n\treturn &Minerva{shared: sharedMinerva}\n}", "func GetInstance(channelID string, config api.Config) (api.Client, error) {\n\tif channelID == \"\" {\n\t\treturn nil, errors.New(errors.GeneralError, \"Channel is required\")\n\t}\n\tvar c *clientImpl\n\tc.initializeCache()\n\tclientMutex.RLock()\n\tc = cachedClient[channelID] //client from cache\n\tclientMutex.RUnlock()\n\n\tif c != nil {\n\t\treturn c, nil\n\t}\n\n\tclientMutex.Lock()\n\tdefer clientMutex.Unlock()\n\n\tc = &clientImpl{selectionService: NewSelectionService(config), config: config}\n\terr := c.initialize(config.GetConfigBytes())\n\tif err != nil {\n\t\tlogger.Errorf(\"Error initializing client: %s\\n\", err)\n\t\treturn nil, errors.Wrap(errors.GeneralError, err, \"error initializing fabric client\")\n\t}\n\n\tif c.client == nil {\n\t\tlogger.Errorf(\"Error: SDK client is nil!!!\\n\")\n\t\treturn nil, errors.New(errors.GeneralError, \"SDK client is nil\")\n\t}\n\t//put client into cache\n\tcachedClient[channelID] = c\n\treturn c, nil\n}", "func (s *Server) GetInstance() *http.Server { return s.instance }", "func newNSLock(isDistXL bool) *nsLockMap {\n\tnsMutex := nsLockMap{\n\t\tisDistXL: isDistXL,\n\t}\n\tif isDistXL {\n\t\treturn &nsMutex\n\t}\n\tnsMutex.lockMap = make(map[nsParam]*nsLock)\n\treturn &nsMutex\n}", "func init() {\n\tinstance = GetInstance()\n}", "func GetInstance() *redis.Client {\n\tonce.Do(func() {\n\t\tRedisClient = redis.NewClient(&redis.Options{\n\t\t\tAddr: redisServerUrl,\n\t\t\tPassword: \"\",\n\t\t\tDB: 0,\n\t\t})\n\t\tpong, err := RedisClient.Ping().Result()\n\t\tif err != nil {\n\t\t\tfmt.Println(pong, err)\n\t\t}\n\t})\n\treturn RedisClient\n}", "func SharedSlaveContext() PocketSlaveContext {\n return getSingletonSlaveContext()\n}", "func Get() Interface {\n\tif singleton == nil {\n\t\tsingleton = new(impl)\n\t}\n\treturn singleton\n}", "func getInstance() *Instance {\n\tapiBaseURL := os.Getenv(\"DOCKER_HUB_API_URL\")\n\treg := os.Getenv(\"DOCKER_REGISTRY_URL\")\n\n\tif apiBaseURL != \"\" && reg != \"\" {\n\t\treturn &Instance{\n\t\t\tAPIHubBaseURL: apiBaseURL,\n\t\t\tRegistryInfo: &registry.IndexInfo{\n\t\t\t\tName: reg,\n\t\t\t\tMirrors: nil,\n\t\t\t\tSecure: true,\n\t\t\t\tOfficial: false,\n\t\t\t},\n\t\t}\n\t}\n\n\treturn &hub\n}", "func init() {\n\tShCache = &ShareCache{\n\t\tLRPC: &LeaderRpcAddr{\n\t\t\tAddr: \"\",\n\t\t\tPort: \"\",\n\t\t},\n\t}\n}", "func New() SharedMap {\n\tsm := sharedMap{\n\t\tm: make(map[string]interface{}),\n\t\tc: make(chan command),\n\t}\n\tgo sm.run()\n\treturn sm\n}", "func GetSingletonInstance() *singleton {\n\tonce.Do(func() {\n\t\tsingletonInstance = &singleton{\n\t\t\tid: time.Now().UnixNano(),\n\t\t}\n\t})\n\n\treturn singletonInstance\n}", "func Instance() *Cache {\n\tif client == nil {\n\t\tclient = new()\n\t}\n\treturn client\n}", "func GetInstance3() *singleton {\n\tif instance == nil {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\n\t\tif instance == nil {\n\t\t\tinstance = &singleton{}\n\t\t}\n\t}\n\treturn instance\n}", "func (m *Import) namespace(req *sdk.GetReq) Namespace {\n\tif global, ok := m.Root.(Namespace); ok {\n\t\treturn global\n\t}\n\n\t// Look for it in the cache of executions\n\tm.namespaceLock.RLock()\n\tns, ok := m.namespaceMap[req.ExecId]\n\tm.namespaceLock.RUnlock()\n\tif ok {\n\t\treturn ns\n\t}\n\n\tnsFunc, ok := m.Root.(NamespaceCreator)\n\tif !ok {\n\t\tpanic(\"Root must be NamespaceCreator if not Namespace\")\n\t}\n\n\t// Not found, we have to create it\n\tm.namespaceLock.Lock()\n\tdefer m.namespaceLock.Unlock()\n\n\t// If it was created while we didn't have the lock, return it\n\tns, ok = m.namespaceMap[req.ExecId]\n\tif ok {\n\t\treturn ns\n\t}\n\n\t// Init if we have to\n\tif m.namespaceMap == nil {\n\t\tm.namespaceMap = make(map[uint64]Namespace)\n\t}\n\n\t// Create it\n\tns = nsFunc.Namespace()\n\tm.namespaceMap[req.ExecId] = ns\n\n\t// Create the expiration function\n\ttime.AfterFunc(time.Until(req.ExecDeadline), func() {\n\t\tm.invalidateNamespace(req.ExecId)\n\t})\n\n\treturn ns\n}", "func (i InputCheckPasswordSRP) construct() InputCheckPasswordSRPClass { return &i }", "func toInstance(syncInstance *pb.SyncInstance) (instance *scpb.MicroServiceInstance) {\n\tinstance = &scpb.MicroServiceInstance{}\n\tif syncInstance.PluginName == PluginName && len(syncInstance.Expansions) > 0 {\n\t\tmatches := pb.Expansions(syncInstance.Expansions).Find(expansionDatasource, map[string]string{})\n\t\tif len(matches) > 0 {\n\t\t\terr := proto.Unmarshal(matches[0].Bytes, instance)\n\t\t\tif err == nil {\n\t\t\t\tinstance.InstanceId = syncInstance.InstanceId\n\t\t\t\tinstance.ServiceId = syncInstance.ServiceId\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Errorf(err, \"proto unmarshal %s instance, instanceID = %s, kind = %v, content = %v failed\",\n\t\t\t\tPluginName, instance.InstanceId, matches[0].Kind, matches[0].Bytes)\n\n\t\t}\n\t}\n\tinstance.InstanceId = syncInstance.InstanceId\n\tinstance.ServiceId = syncInstance.ServiceId\n\tinstance.Endpoints = make([]string, 0, len(syncInstance.Endpoints))\n\tinstance.HostName = syncInstance.HostName\n\tinstance.Version = syncInstance.Version\n\tinstance.Status = pb.SyncInstance_Status_name[int32(syncInstance.Status)]\n\n\tfor _, ep := range syncInstance.Endpoints {\n\t\taddr, err := url.Parse(ep)\n\t\tif err != nil {\n\t\t\tlog.Errorf(err, \"parse sc instance endpoint failed: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tendpoint := \"\"\n\t\tswitch addr.Scheme {\n\t\tcase \"http\":\n\t\t\tendpoint = strings.Replace(ep, \"http://\", \"rest://\", 1)\n\t\tcase \"https\":\n\t\t\tendpoint = strings.Replace(ep, \"https://\", \"rest://\", 1) + \"?sslEnabled=true\"\n\t\tcase \"rest\", \"highway\":\n\t\t\tendpoint = ep\n\t\t}\n\t\tinstance.Endpoints = append(instance.Endpoints, endpoint)\n\t}\n\n\tif syncInstance.HealthCheck != nil && syncInstance.HealthCheck.Mode != pb.HealthCheck_UNKNOWN {\n\t\tinstance.HealthCheck = &scpb.HealthCheck{\n\t\t\tMode: pb.HealthCheck_Modes_name[int32(syncInstance.HealthCheck.Mode)],\n\t\t\tPort: syncInstance.HealthCheck.Port,\n\t\t\tInterval: syncInstance.HealthCheck.Interval,\n\t\t\tTimes: syncInstance.HealthCheck.Times,\n\t\t\tUrl: syncInstance.HealthCheck.Url,\n\t\t}\n\t}\n\treturn\n}", "func getInstance() *KeyGen {\n\tonce.Do(\n\t\tfunc() {\n\t\t\tkeygen = new(KeyGen)\n\t\t\tkeygen.random = rand.New(rand.NewSource(time.Now().Unix()))\n\n\t\t})\n\treturn keygen\n}", "func newPrimary() *proxy {\n\tvar (\n\t\tp = &proxy{}\n\t\ttracker = mock.NewStatsTracker()\n\t\tsmap = newSmap()\n\t)\n\n\tp.owner.smap = newSmapOwner(cmn.GCO.Get())\n\tp.si = meta.NewSnode(\"primary\", apc.Proxy, meta.NetInfo{}, meta.NetInfo{}, meta.NetInfo{})\n\n\tsmap.addProxy(p.si)\n\tsmap.Primary = p.si\n\tp.owner.smap.put(smap)\n\n\tconfig := cmn.GCO.BeginUpdate()\n\tconfig.ConfigDir = \"/tmp/ais-tests\"\n\tconfig.Periodic.RetrySyncTime = cos.Duration(time.Millisecond * 100)\n\tconfig.Keepalive.Proxy.Name = \"heartbeat\"\n\tconfig.Keepalive.Proxy.Interval = cos.Duration(3 * time.Second)\n\tconfig.Timeout.CplaneOperation = cos.Duration(2 * time.Second)\n\tconfig.Timeout.MaxKeepalive = cos.Duration(4 * time.Second)\n\tconfig.Client.Timeout = cos.Duration(10 * time.Second)\n\tconfig.Client.TimeoutLong = cos.Duration(10 * time.Second)\n\tconfig.Cksum.Type = cos.ChecksumXXHash\n\tcmn.GCO.CommitUpdate(config)\n\tcmn.GCO.SetInitialGconfPath(\"/tmp/ais-tests/ais.config\")\n\n\tp.client.data = &http.Client{}\n\tp.client.control = &http.Client{}\n\tp.keepalive = newPalive(p, tracker, atomic.NewBool(true))\n\n\to := newBMDOwnerPrx(config)\n\to.put(newBucketMD())\n\tp.owner.bmd = o\n\n\te := newEtlMDOwnerPrx(config)\n\te.put(newEtlMD())\n\tp.owner.etl = e\n\n\tp.gmm = memsys.PageMM()\n\treturn p\n}", "func (r *HierarchyConfigReconciler) getSingleton(ctx context.Context, nm string) (*api.HierarchyConfiguration, error) {\n\tnnm := types.NamespacedName{Namespace: nm, Name: api.Singleton}\n\tinst := &api.HierarchyConfiguration{}\n\tif err := r.Get(ctx, nnm, inst); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// It doesn't exist - initialize it to a sane initial value.\n\t\tinst.ObjectMeta.Name = api.Singleton\n\t\tinst.ObjectMeta.Namespace = nm\n\t}\n\n\treturn inst, nil\n}", "func getHTTPClientSingleton() (*http.Client, error) {\n\tcerts, err := rootcerts.LoadSystemCAs()\n\tif err != nil {\n\t\tLogger.Errorf(\"Could not load System root CA files. Reason: %v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not load System root CA files. Reason: %v\", err)\n\t}\n\n\tconfig := &tls.Config{\n\t\tRootCAs: certs,\n\t}\n\n\ttr := &http.Transport{TLSClientConfig: config}\n\thttpClient := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: time.Second * ClientHttpTimeoutInSeconds,\n\t}\n\n\treturn httpClient, nil\n}", "func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tm, found := modules[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"No such module: %s\", moduleName)\n\t}\n\n\tif _, exists := m.instance[name]; exists {\n\t\treturn nil, fmt.Errorf(\"%s already exists in %s\", name, moduleName)\n\t}\n\n\tbi := &BaseInstance{name: name, module: m, subinstance: false}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, m.ringParam.Count, m.ringParam.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif m.ringParam.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, m.ringParam.Count, m.ringParam.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tif m.moduleType == TypeInterface || m.moduleType == TypeRIF {\n\t\tbi.counter = NewCounter()\n\t}\n\n\tinstance, err := m.factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\t// Set rule observer, if the module complies to RulesNotify.\n\tif rn, ok := instance.(RulesNotify); ok {\n\t\tbi.rules.setRulesNotify(rn)\n\t}\n\n\tm.instance[name] = bi\n\n\treturn bi, nil\n}", "func (d *driver) getSVC() (*s3.S3, error) {\n\tif s3Service != nil {\n\t\treturn s3Service, nil\n\t}\n\n\tcfg, err := clusterconfig.GetAWSConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsess, err := session.NewSession(&aws.Config{\n\t\tCredentials: credentials.NewStaticCredentials(cfg.Storage.S3.AccessKey, cfg.Storage.S3.SecretKey, \"\"),\n\t\tRegion: &cfg.Storage.S3.Region,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts3Service := s3.New(sess)\n\n\treturn s3Service, nil\n\n}", "func (config *Config) Singleton() (*Client, error) {\n\tif client, ok := singleton.Load(constant.ModuleRegistryEtcd, config.Name); ok && client != nil {\n\t\treturn client.(*Client), nil\n\t}\n\n\tclient, err := config.Build()\n\tif err != nil {\n\t\txlog.Jupiter().Error(\"build etcd client failed\", zap.Error(err))\n\t\treturn nil, err\n\t}\n\n\tsingleton.Store(constant.ModuleRegistryEtcd, config.Name, client)\n\n\treturn client, nil\n}", "func (pn *paxosNode) getInstance(key string) *paxosKeyData {\n\tpxi, ok := pn.instances[key]\n\tif !ok {\n\t\tpxi = &paxosKeyData{\n\t\t\tMyn: 0,\n\t\t\tNa: -1,\n\t\t\tNh: 0,\n\t\t\tVa: nil,\n\t\t\tmu: &sync.RWMutex{},\n\t\t\tCommittedVal: nil,\n\t\t\tstoreLock: &sync.RWMutex{},\n\t\t\tproposeLock: &sync.RWMutex{},\n\t\t}\n\t\tpn.instances[key] = pxi\n\t}\n\treturn pxi\n}", "func GetCryptoClient() base.CryptoClient {\n\n\t// 实例化方法1\n\t// 通过引用crypto so文件来获取crypto client\n\tcryptoClient, err := crypto_client.CreateCryptoClient(CryptoTypeConfig)\n\tif err != nil {\n\t\tlog.Error(\"load crypto client failed, %v\", err)\n\t}\n\treturn cryptoClient\n\n\t// 方法2\n\t// 直接引用代码, client目录中package 需由main 修改为 xchain\n\t//if cryptoClient == nil {\n\t//\tcryptoClient = xchain.GetInstance().(base.CryptoClient)\n\t//}\n\t//cryptoClient = xchain.GetInstance().(base.CryptoClient)\n\t//return cryptoClient\n}", "func newSharedSync(channel *Channel) sharedSync {\n\tshared := sharedSync{\n\t\ttransportCtx: channel.ctx,\n\t\ttransportLock: channel.transportLock,\n\t\tlegComplete: new(sync.WaitGroup),\n\t\trunSetup: new(sync.WaitGroup),\n\t\tsetupComplete: new(sync.WaitGroup),\n\t\trunLeg: new(sync.WaitGroup),\n\t}\n\n\tshared.runSetup.Add(1)\n\treturn shared\n}", "func GetInstance() *gin.RouterGroup {\n\treturn &server.RouterGroup\n}", "func newRPCServerService() (*rpcServerService, error) {\n return &rpcServerService{serviceMap: util.NewSyncMap()}, nil\n}", "func Service() *Settings {\n\tonce.Do(func() {\n\t\tinstance = &Settings{\n\t\t\tlock: sync.Mutex{},\n\t\t}\n\t\tinstance.Pirri.Version = \"4.0.0\"\n\t\tinstance.init()\n\t})\n\treturn instance\n}", "func EnsureSingleInstance() (*SingleInstance, error) {\n\tif singleInstance == nil && singleInstanceErr == nil {\n\t\tsingleInstanceLock.Lock()\n\t\tdefer singleInstanceLock.Unlock()\n\t\tif singleInstance == nil {\n\t\t\tsingleInstance, singleInstanceErr = newSingleInstance(resolveServerPath(false))\n\t\t}\n\t}\n\n\treturn singleInstance, singleInstanceErr\n}", "func GetInstance(address string, username string, password string) *RedfishClient {\n\tclient := RedfishClient{}\n\tclient.Protocol = \"https\"\n\tclient.CurrentAddress = address\n\tclient.Username = username\n\tclient.Password = password\n\treturn &client\n}", "func GetInstance(address string, username string, password string) *RedfishClient {\n\tclient := RedfishClient{}\n\tclient.Protocol = \"https\"\n\tclient.CurrentAddress = address\n\tclient.Username = username\n\tclient.Password = password\n\treturn &client\n}", "func Instance() *SynonymService {\n\treturn &synonymService\n}", "func NewSAMSARAAPI() SAMSARAAPI {\r\n samsaraAPIClient := new(SAMSARAAPI_IMPL)\r\n samsaraAPIClient.config = configuration_pkg.NewCONFIGURATION()\r\n\r\n return samsaraAPIClient\r\n}", "func Instance() *vestigo.Router {\n\tinfoMutex.RLock()\n\tdefer infoMutex.RUnlock()\n\treturn r\n}", "func SharedMain() *Main {\n\tonce.Do(func() {\n\t\tinstance = &Main{\n\t\t\tCallback: map[string]EventCallback{},\n\t\t}\n\t})\n\n\treturn instance\n}", "func (o *os) ensureSingleton() {\n\tif o.initialized == true {\n\t\treturn\n\t}\n\t//log.Println(\"Singleton not found. Fetching from GDNative...\")\n\tbase := gdnative.GetSingleton(\"_OS\")\n\to.SetBaseObject(base)\n\to.initialized = true\n}", "func newGoFactory() *GOFactory {\n\tgologger.SLogger.Println(\"Init Game Object Factory Singleton\")\n\tfOnce.Do(func() {\n\t\tgofactory = &GOFactory{\n\t\t\tGoCreator: make(map[string]ICreator),\n\t\t}\n\t})\n\treturn gofactory\n}", "func (p *Pod) GetInstance(fqn string) (*v1.Pod, error) {\n\to, err := p.GetFactory().Get(p.gvr.String(), fqn, true, labels.Everything())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar pod v1.Pod\n\terr = runtime.DefaultUnstructuredConverter.FromUnstructured(o.(*unstructured.Unstructured).Object, &pod)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pod, nil\n}", "func GetInstance() Singleton {\n\tif instance == nil {\n\t\tinstance = new(singleton)\n\t}\n\treturn instance\n}", "func newClient() *sts.STS {\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t}))\n\tconfig := aws.NewConfig()\n\tif debug {\n\t\tconfig.WithLogLevel(aws.LogDebugWithHTTPBody)\n\t}\n\treturn sts.New(sess, config)\n}", "func (c *client) Instance(bridge, instance string) Interface {\n\treturn &internalClient{\n\t\tsvc: c.services,\n\t\tbridge: bridge,\n\t\tinstance: instance,\n\t}\n}", "func getClient(c net.Conn, s *server) *Client {\n\tclient := clientPool.Get().(*Client)\n\tclient.Reset(c, s)\n\treturn client\n}", "func GetInstance(address common.Address) (*Servicekeyresolver, error) {\n\t_rpc := rpc.GetInstance()\n\tclient := _rpc.GetEthClient()\n\n\tinstance, err := NewServicekeyresolver(address, client)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tif instance == nil {\n\t\terr := fmt.Errorf(\"Cannot get Servicekeyresolver Instance\")\n\t\treturn nil, err\n\t}\n\treturn instance, nil\n}", "func New(ctx context.Context, mgr manager.Manager, logger log.Logger) (*MemberReconciler, error) {\n\topts := options.GetSidecarOptions()\n\tkubeClient := kubernetes.NewForConfigOrDie(mgr.GetConfig())\n\tmember, err := identity.Retrieve(ctx, opts.Name, opts.Namespace, kubeClient)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get member\")\n\t}\n\tlogger.Info(ctx, \"Member loaded\", \"member\", member)\n\n\tc, err := client.New(mgr.GetConfig(), client.Options{\n\t\tScheme: mgr.GetScheme(),\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get dynamic client\")\n\t}\n\n\thost, err := network.FindFirstNonLocalIP()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get scylla address\")\n\t}\n\n\tauthToken, err := helpers.GetAgentAuthToken(ctx, kubeClient.CoreV1(), member.Cluster, member.Namespace)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get auth token\")\n\t}\n\n\tcfg := scyllaclient.DefaultConfig(authToken, host.String())\n\tif err := cfgutil.ParseYAML(&cfg, path.Join(naming.ScyllaClientConfigDirName, naming.ScyllaClientConfigFileName)); err != nil {\n\t\treturn nil, errors.Wrap(err, \"parse scylla agent config\")\n\t}\n\t// Auth token from secret is preferred.\n\tcfg.AuthToken = authToken\n\n\tscyllaClient, err := scyllaclient.NewClient(cfg, logger.Named(\"scylla_client\"))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"create scylla client\")\n\t}\n\tmc := &MemberReconciler{\n\t\tClient: c,\n\t\tkubeClient: kubeClient,\n\t\tmember: member,\n\t\tscheme: mgr.GetScheme(),\n\t\tscyllaClient: scyllaClient,\n\t\tlogger: logger,\n\t}\n\n\tif err = mc.onStartup(ctx); err != nil {\n\t\treturn nil, errors.Wrap(err, \"startup\")\n\t}\n\n\treturn mc, nil\n}", "func New() *Client {\n return &Client{&API{}}\n}", "func Instance() Service {\n\tif c == nil {\n\t\tc = &configService{\n\t\t\tdataMap: make(map[string]interface{}),\n\t\t}\n\t}\n\treturn c\n}", "func (tm *TestManager) GetInstance(moduleName string) TestApi.TestInterface {\n\tv := reflect.New(tm.ModuleMap[moduleName])\n\treturn v.Interface().(TestApi.TestInterface)\n}", "func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tfactory, found := instanceFactories[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"Module '%s' doesn't exist.\\n\", moduleName)\n\t}\n\n\trp, ok := ringParams[moduleName]\n\tif !ok {\n\t\trp = defaultRingParam\n\t}\n\n\tbi := &BaseInstance{name: name}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif rp.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tinstance, err := factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\treturn bi, nil\n}", "func New(ctx *gbimporter.PackedContext, filename string) types.ImporterFrom {\n\tif ctx == nil {\n\t\tc := gbimporter.PackContext(&build.Default)\n\t\tctx = &c\n\t}\n\tgSharedOnce.Do(func() {\n\t\tgShared = newPkgCache(ctx)\n\t\tgShared.BackgroundUpdater()\n\t})\n\treturn &sharedCache{gShared, ctx, filename}\n}", "func NewSharedWithChannelTeamInfo()(*SharedWithChannelTeamInfo) {\n m := &SharedWithChannelTeamInfo{\n TeamInfo: *NewTeamInfo(),\n }\n return m\n}", "func getFactory(flags *pflag.FlagSet) *clientcmd.Factory {\n\tfactory, err := getFactoryFromCluster()\n\tif err != nil {\n\t\tglog.Infof(\"Seems like we are not running in an OpenShift environment (%s), falling back to building a std factory...\", err)\n\t\tfactory = clientcmd.New(flags)\n\t}\n\n\treturn factory\n}", "func (s AuthSentCode) construct() AuthSentCodeClass { return &s }", "func (r *ReconcileZdyfapi) NewSc(m *zdyfv1alpha1.Zdyfapi) *v1.StorageClass {\n\trp := corev1.PersistentVolumeReclaimRetain\n\tmode := v1.VolumeBindingWaitForFirstConsumer\n\ttr := true\n\tsc := &v1.StorageClass{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"StorageClass\",\n\t\t\tAPIVersion: \"storage.k8s.io/v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: m.Spec.NameSCName,\n\t\t\tNamespace: m.Namespace,\n\t\t\tAnnotations: map[string]string{\"storage-class\": m.Name}, //sc 与 PVC\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(m, schema.GroupVersionKind{\n\t\t\t\t\tGroup: zdyfv1alpha1.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: zdyfv1alpha1.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: \"Zdyfapi\",\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tProvisioner: \"kubernetes.io/no-provisioner\", //no-provisioner\n\t\tParameters: map[string]string{\n\t\t\t//\"type\": m.Spec.DataSC,\n\t\t},\n\t\tAllowVolumeExpansion: &tr, //expansion the volume\n\t\tVolumeBindingMode: &mode,\n\t\tReclaimPolicy: &rp,\n\t}\n\treturn sc\n}", "func SingletonDubboClient() *Client {\n\tif dubboClient == nil {\n\t\tonceClient.Do(func() {\n\t\t\tdubboClient = NewDubboClient()\n\t\t})\n\t}\n\n\treturn dubboClient\n}", "func GetInstance() *WafServer {\n\tonceInit.Do(func() {\n\t\tserverInstance = newServerInstance()\n\t})\n\treturn serverInstance\n}", "func GetInstance() *Singleton {\n\tif instance == nil {\n\t\tinstance = new(Singleton)\n\t}\n\treturn instance\n}", "func GetInstance() Proxy {\n\tonce.Do(func() {\n\t\tinstance = &proxy{\n\t\t\tproxy: &apiconfigv1.Proxy{},\n\t\t\tlock: sync.Mutex{},\n\t\t}\n\t})\n\treturn instance\n}", "func spGetObject(obj unsafe.Pointer, path *C.char) unsafe.Pointer {\n\treturn C.sp_getobject(obj, path)\n}", "func (client *clientImpl) getInstanceCache() *internal.InstanceCache {\n\tclient.instanceCacheInit.Do(func() {\n\t\tif client.CacheDir == \"\" {\n\t\t\treturn\n\t\t}\n\t\tpath := filepath.Join(client.CacheDir, \"instances\")\n\t\tclient.instanceCache = internal.NewInstanceCache(local.NewFileSystem(path))\n\t})\n\treturn client.instanceCache\n}", "func newSgwCtrl(addr net.UDPAddr, dataPort int, recovery byte) (*SgwCtrl, error) {\n\tmyLog := log.WithFields(logrus.Fields{\n\t\t\"addr\": addr,\n\t\t\"recovery\": recovery,\n\t})\n\tmyLog.Info(\"A new SGW Ctrl has created\")\n\n\tabsSPgw, err := newAbsSPgw(addr, recovery, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgtpSessionRepo := newGtpSessionRepo()\n\tsgwCtrl := &SgwCtrl{absSPgw, gtpSessionRepo}\n\n\tsgwDataUDPAddr := net.UDPAddr{IP: addr.IP, Port: dataPort}\n\tsgwCtrl.pair, err = newSgwData(sgwDataUDPAddr, recovery, sgwCtrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo sgwCtrl.sgwCtrlReceiverRoutine()\n\tgo sgwCtrl.echoReceiver()\n\n\treturn sgwCtrl, nil\n}", "func Main() {\n\tcontainerdshim.Run(\"io.containerd.runsc.v1\", shim.New)\n}", "func GetInstance() NetworkUtil {\n\treturn networkUtilIns\n}", "func new() *Cache {\n\tlog.Info(infoRedisInit)\n\taddr := os.Getenv(envRedisAddr)\n\tif addr == \"\" {\n\t\tlog.Fatal(errRedisAddr)\n\t}\n\trc := redis.NewClient(&redis.Options{\n\t\tAddr: addr,\n\t\tPassword: os.Getenv(envRedisPass),\n\t})\n\n\tif _, err := rc.Ping().Result(); err != nil {\n\t\tlog.Fatal(errConnection)\n\t}\n\treturn &Cache{client: rc}\n}", "func OrganizationManagerInstance() (*OrganizationManager) {\n if organizationManager != nil {\n return organizationManager\n } else {\n // Get our database connection.\n dao := database.Instance()\n organizationManager = &OrganizationManager{dao}\n return organizationManager\n }\n}", "func SpawnInstance(user *twitter.User, twitterClient *twitter.Client, appConfig *AppConfig, tweetRefID *int64) error {\n\tvar count int\n\tif err := CountActiveSession(&count); err != nil {\n\t\treturn err\n\t}\n\n\tif count > appConfig.Scw.Limit {\n\t\treturn errors.New(\"Server limit reached\")\n\t}\n\n\ts, _ := scw.NewScalewayAPI(appConfig.Scw.AccessKey, appConfig.Scw.SecretKey, \"SCW-Twt\", appConfig.Scw.Region)\n\tpassword := NewPassword(6)\n\tserver := scw.ScalewayServerDefinition{\n\t\tName: user.ScreenName,\n\t\tImage: &appConfig.Scw.Image,\n\t\tOrganization: appConfig.Scw.AccessKey,\n\t\tCommercialType: appConfig.Scw.InstanceType,\n\t\tTags: []string{password},\n\t}\n\n\tid, err := s.PostServer(server)\n\n\tsession := &Session{\n\t\tUserID: user.ID,\n\t\tScreenName: user.ScreenName,\n\t\tServerID: id,\n\t\tState: \"starting\",\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := CreateSession(session); err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"twitter ID\": user.ID,\n\t\t\"server ID\": id,\n\t}).Info(\"Server created\")\n\tTweet(\n\t\ttwitterClient,\n\t\tfmt.Sprintf(\"@%s %s\", user.ScreenName, appConfig.Messages.StartingInProgress),\n\t\ttweetRefID,\n\t)\n\n\tif err := scw.StartServer(s, id, true); err != nil {\n\t\treturn err\n\n\t}\n\tlog.WithFields(logrus.Fields{\n\t\t\"twitter ID\": user.ID,\n\t\t\"server ID\": id,\n\t}).Info(\"Server started\")\n\n\tserver_info, _ := s.GetServer(id)\n\n\tnow := time.Now()\n\tsession.SessionEndAt = now.Add(time.Duration(appConfig.Scw.SessionTime * 60000000000))\n\tsession.State = \"running\"\n\n\tif err := UpdateSession(session); err != nil {\n\t\treturn err\n\t}\n\n\tTweet(\n\t\ttwitterClient,\n\t\tfmt.Sprintf(\"@%s %s\", user.ScreenName, appConfig.Messages.ServerReady),\n\t\ttweetRefID,\n\t)\n\n\tDM(\n\t\ttwitterClient,\n\t\tuser.ID,\n\t\tuser.ScreenName,\n\t\tfmt.Sprintf(\"%s %s ubuntu@%s\", appConfig.Messages.DmServerReady, password, server_info.PublicAddress.IP),\n\t)\n\treturn nil\n}", "func New() Go { return Go{} }", "func newRsrcmgr(t *testing.T, kvstoreURL string) *RsrcMgr {\n\t// memkv config\n\ts := kvapi.NewScheme()\n\tconfig := store.Config{Type: store.KVStoreTypeMemkv, Servers: strings.Split(kvstoreURL, \",\"), Codec: kvapi.NewJSONCodec(s)}\n\n\t// create a new memkv store\n\tkv, err := store.New(config)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create store with error: %v\", err)\n\t}\n\n\t// create a resource manager instance\n\trm, err := NewResourceMgr(kv)\n\tAssertOk(t, err, \"Resourcemgr init failed\")\n\n\treturn rm\n}", "func Instance() serving.Serving {\n\treturn instance\n}", "func WorkerPoolInstance() *WorkerPool {\n\treturn globalWorkerPool\n}", "func GetInstance() *Configuration {\r\n\tonce.Do(func() {\r\n\t\tfile, _ := os.Open(\"conf.json\")\r\n\t\tdefer file.Close()\r\n\t\tdecoder := json.NewDecoder(file)\r\n\t\tconfiguration := Configuration{}\r\n\t\terr := decoder.Decode(&configuration)\r\n\t\tif err != nil {\r\n\t\t\tfmt.Println(\"error:\", err)\r\n\t\t}\r\n\r\n\t\tvar dbDriverName string\r\n\t\tflag.StringVar(&dbDriverName, \"dbDriverName\", \"\", \"specify 'dbDriverName' to use. Defaults to specified in config.json.\")\r\n\r\n\t\tvar dbConnectionString string\r\n\t\tflag.StringVar(&dbConnectionString, \"dbConnectionString\", \"\", \"specify 'dbConnectionString' to use. Defaults to specified in config.json.\")\r\n\r\n\t\tvar jwtSecret string\r\n\t\tflag.StringVar(&jwtSecret, \"jwtSecret\", \"\", \"specify 'jwtSecret' to use. Defaults to specified in config.json.\")\r\n\r\n\t\tflag.Parse()\r\n\r\n\t\tif len(dbDriverName) > 0 {\r\n\t\t\tconfiguration.DbDriverName = dbDriverName\r\n\t\t}\r\n\r\n\t\tif len(dbConnectionString) > 0 {\r\n\t\t\tconfiguration.DbConnectionString = dbConnectionString\r\n\t\t}\r\n\r\n\t\tif len(jwtSecret) > 0 {\r\n\t\t\tconfiguration.JwtSecret = jwtSecret\r\n\t\t}\r\n\r\n\t\tinstantiated = &configuration\r\n\t})\r\n\r\n\treturn instantiated\r\n}", "func (c *client) createShared(org, team, path string, data map[string]interface{}) (*library.Secret, error) {\n\treturn c.create(fmt.Sprintf(\"%s/shared/%s/%s/%s\", c.config.Prefix, org, team, path), data)\n}", "func Singleton() *InstanceLogger {\n\treturn singleton\n}", "func NewInstance(projectID string, metaData map[string]string, secGroup []map[string]interface{}) openstack.InstanceClient {\n\treturn &Instance{\n\t\tprojectID: projectID,\n\t\tmetaData: metaData,\n\t\tsecGroup: secGroup,\n\t\tcreated: time.Now(),\n\t}\n}", "func (s *singleton) Shareable() bool {\n\treturn false\n}", "func newServices(\n\tsettingsFile string,\n\tswanAccess Access,\n\tswiftAccess swift.Access,\n\towidAccess owid.Access) *services {\n\tvar swiftStore swift.Store\n\tvar owidStore owid.Store\n\n\t// Use the file provided to get the SWIFT settings.\n\tswiftConfig := swift.NewConfig(settingsFile)\n\terr := swiftConfig.Validate()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Use the file provided to get the OWID settings.\n\towidConfig := owid.NewConfig(settingsFile)\n\terr = owidConfig.Validate()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Link to the SWIFT storage.\n\tswiftStore = swift.NewStore(swiftConfig)\n\n\t// Link to the OWID storage.\n\towidStore = owid.NewStore(owidConfig)\n\n\t// Get the default browser detector.\n\tb, err := swift.NewBrowserRegexes()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Create the swan configuration.\n\tc := newConfig(settingsFile)\n\n\t// Get the SWIFT access node for the SWAN network. Log any errors rather\n\t// than panic because it may be that a network has yet to be established\n\t// for SWAN in the storage tables.\n\tan, err := swiftStore.GetAccessNode(c.Network)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tlog.Printf(\"Has a '%s' network been created?\", c.Network)\n\t}\n\n\t// Return the services.\n\treturn &services{\n\t\tc,\n\t\tswift.NewServices(swiftConfig, swiftStore, swiftAccess, b),\n\t\towid.NewServices(owidConfig, owidStore, owidAccess),\n\t\tan,\n\t\tswanAccess}\n}", "func getNewsInstance() *News {\n\tonce.Do(func() {\n\t\tif newsInstance == nil {\n\t\t\tnewsInstance = &News{}\n\t\t}\n\t})\n\treturn newsInstance\n}", "func init() {\n\tsysImpl = &nacl.MojoNaClSystem{}\n}", "func instanceFromConfig(cfg core.Config, ch1 chan *core.Event, ch2 chan *core.Event, reg core.Registry) (core.Component, error) {\n\n\tmoduleName, ok := cfg[\"module\"].(string)\n\tif !ok {\n\t\tlog.Error(\"Missing 'module' (module name) from configuration\")\n\t\treturn nil, cli.NewExitError(\"Missing 'module' (module name) from configuration\", -3)\n\t}\n\n\tlog.Info(\"Loading \", moduleName)\n\n\tmodConstructor, ok := reg[moduleName]\n\tif !ok {\n\t\tlog.Error(\"Unknown module '\", moduleName, \"'\")\n\t\treturn nil, cli.NewExitError(\"Unknown module '\"+moduleName+\"'\", -4)\n\t}\n\n\tlog.Info(\"Loaded!\")\n\n\treturn modConstructor(ch1, ch2, cfg), nil\n}", "func (m *Mongo) Instance() (*mgo.Database, *mgo.Session) {\n\ts := m.Session.Copy()\n\treturn s.DB(m.Settings.Database), s\n}", "func newNs(name string) (*namespace, error) {\n\tns := &namespace{\n\t\tname: name,\n\t\tsetMap: make(map[string]string),\n\t\tpodMap: make(map[types.UID]*corev1.Pod),\n\t\tnpMap: make(map[string]*networkingv1.NetworkPolicy),\n\t\tipsMgr: ipsm.NewIpsetManager(),\n\t\tiptMgr: iptm.NewIptablesManager(),\n\t}\n\n\treturn ns, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileSiddhiProcess{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func (wfm *Store) UnsafeGetInstance(name, encodedData string) (*Instance, error) {\n\n\tdef := wfm.UnsafeGetDefinition(name)\n\n\tif def == nil {\n\t\tpanic(fmt.Sprint(\"unknown workflow: \", name))\n\t}\n\n\tif encodedData == `\"\"` {\n\t\tencodedData = \"{}\"\n\t}\n\n\tvar wfDef *Definition\n\tif def.versionDef != nil {\n\t\t// in this case the workflow was versioned while running.\n\t\t// so we get the initial workflow from the list of versions in the version definition\n\t\twfDef = def.versionDef.getInitialDefinition()\n\t} else {\n\t\twfDef = def.workflowDef\n\t}\n\n\terr := serializer.Decode(encodedData, wfDef.defaultInstance.Handler)\n\n\treturn wfDef.defaultInstance, err\n}", "func GetDagInstance() DAGService {\n\tonce.Do(func() {\n\t\tparentContext = context.Background()\n\n\t\trouter, err := newNbsDagService()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tlogger.Info(\"dag service start to run......\")\n\t\tinstance = router\n\t})\n\n\treturn instance\n}", "func (s TestingSingleton) Singleton() Singleton {\n\treturn s.singleton\n}", "func getCRDClient(resController *ClusterWatcher, gvr schema.GroupVersionResource, resInfo *autoCreateKAMInfo) dynamic.ResourceInterface {\n\tvar intfNoNS = resController.plugin.dynamicClient.Resource(gvr)\n\tvar intf dynamic.ResourceInterface\n\tif resInfo.namespace != \"\" {\n\t\tintf = intfNoNS.Namespace(resInfo.namespace)\n\t} else {\n\t\tintf = intfNoNS\n\t}\n\treturn intf\n}" ]
[ "0.5939632", "0.58129483", "0.5690675", "0.56497866", "0.5634682", "0.5603417", "0.5504634", "0.5499801", "0.5396577", "0.5367739", "0.5262332", "0.5245395", "0.5219913", "0.52185434", "0.5160952", "0.51536685", "0.5138078", "0.5122033", "0.511309", "0.511022", "0.51099515", "0.51041716", "0.5103123", "0.51025605", "0.5097265", "0.5088111", "0.507383", "0.5065167", "0.5032591", "0.5024879", "0.4996433", "0.49943417", "0.49942273", "0.4985449", "0.49846503", "0.49709156", "0.4951217", "0.49463695", "0.49406755", "0.49351338", "0.49299735", "0.49204922", "0.49165842", "0.4913358", "0.49029243", "0.49029243", "0.49018025", "0.48730278", "0.48680452", "0.4865716", "0.48645508", "0.48617032", "0.4851665", "0.4832203", "0.48319688", "0.48299196", "0.48207557", "0.4802108", "0.4801748", "0.48001504", "0.4780863", "0.4769411", "0.4769046", "0.47655788", "0.4758456", "0.47537223", "0.47468123", "0.4745304", "0.47359547", "0.47334063", "0.47276822", "0.47230977", "0.4718737", "0.47100285", "0.47096756", "0.46958873", "0.46952927", "0.4690585", "0.46880165", "0.46771538", "0.46686882", "0.46605897", "0.46475714", "0.46457452", "0.46455655", "0.46405032", "0.46362123", "0.46147123", "0.46122545", "0.46091565", "0.46088102", "0.46065855", "0.46059144", "0.46039608", "0.46026564", "0.4602012", "0.4600586", "0.4588846", "0.45875955", "0.45848653" ]
0.70069283
0
BuildPSRTracker creates and initializes a new tracker instance
func BuildPSRTracker() (*PSRTracker, error) { psr := &PSRTracker{Requests: nil, requestByID: make(map[uint]*PrespecifiedRequest)} if err := psr.init(); err != nil { return nil, err } funcs = map[string]interface{}{ "value": value, "average": average, "median": median, "square": square, } sharedInstance = psr return psr, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewTracker(k8sClient kubernetes.Interface, customIF informer.SharedInformerFactory, k8sIF k8sinformers.SharedInformerFactory, c Config) (*Tracker, error) {\n\n\t//Each informer write to their own queue which are then merged\n\tinformerQueues := make(map[string]chan DeployMessage)\n\n\t//Create all required Informer (Also add additional ones here)\n\tkcdInformer := customIF.Custom().V1().KCDs()\n\tdeploymentInformer := k8sIF.Apps().V1().Deployments()\n\n\t//If you add an informer add a key here, the order of the keys will be the order they are emptied\n\tinformerList := []string{\"kcd\", \"deployment\"}\n\tfor _, informer := range informerList {\n\t\tinformerQueues[informer] = make(chan DeployMessage, 100)\n\t}\n\n\tdeploymentClient := k8sClient.AppsV1().Deployments(c.Namespace)\n\tpodClient := k8sClient.CoreV1().Pods(viper.GetString(\"tracker.namespace\"))\n\n\t//Set up message endpoint\n\tvar httpClient *http.Client\n\tvar rander *rand.Rand\n\tvar sqsClient *sqs.SQS\n\n\tswitch c.Endpointendpointtype {\n\tcase \"http\":\n\t\tif c.KubeDeployEndpoint != \"\" {\n\t\t\thttpClient = &http.Client{\n\t\t\t\tTimeout: time.Duration(5 * time.Second),\n\t\t\t}\n\t\t\trander = rand.New(rand.NewSource(time.Now().UnixNano()))\n\t\t}\n\tcase \"sqs\":\n\t\tsess := session.Must(session.NewSession(&aws.Config{\n\t\t\tRegion: aws.String(c.SQSregion),\n\t\t}))\n\t\tsqsClient = sqs.New(sess)\n\tdefault:\n\t\terr := errors.New(\"Unknown endpoint type given: \" + c.Endpointendpointtype)\n\t\treturn nil, err\n\t}\n\n\tt := &Tracker{\n\t\tdeploymentcInformer: deploymentInformer.Informer(),\n\t\tkcdcInformer: kcdInformer.Informer(),\n\n\t\tdeploymentClient: deploymentClient,\n\t\tpodClient: podClient,\n\n\t\tk8scSynced: deploymentInformer.Informer().HasSynced,\n\t\tkcdcSynced: kcdInformer.Informer().HasSynced,\n\n\t\tdeployMessageQueue: make(chan DeployMessage, 100),\n\n\t\tinformerQueues: informerQueues,\n\t\tinformerList: informerList,\n\n\t\tclusterName: c.Cluster,\n\t\tversion: c.Version,\n\t\tKubeDeployEndpointAPI: c.KubeDeployEndpoint,\n\t\tRobbieEndpointAPI: c.RobbieEndpoint,\n\t\tnamespace: c.Namespace,\n\t\tsqsregion: c.SQSregion,\n\t\tendpointendpointtype: c.Endpointendpointtype,\n\t\tkcdapp: c.KCDapp,\n\n\t\thttpClient: httpClient,\n\t\trand: rander,\n\t\tsqsClient: sqsClient,\n\t\tkcdStates: make(map[string]string),\n\t}\n\n\t//Add event handlers to Informers\n\tt.deploymentcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tUpdateFunc: t.trackDeployment,\n\t})\n\tt.kcdcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: t.trackAddKcd,\n\t\tUpdateFunc: t.trackKcd,\n\t})\n\treturn t, nil\n}", "func NewTracker(root string, ignore IgnoreHandler) *Tracker {\r\n\treturn &Tracker{\r\n\t\troot: root,\r\n\t\tignore: ignore,\r\n\t\tEvents: make(chan (Event)),\r\n\t\tErrors: make(chan (error)),\r\n\t}\r\n}", "func InitTracker(options ...func(*Tracker)) *Tracker {\n\tt := &Tracker{}\n\n\t// Set Defaults\n\tt.Platform = DEFAULT_PLATFORM\n\tt.Base64Encode = DEFAULT_BASE_64\n\n\t// Option parameters\n\tfor _, op := range options {\n\t\top(t)\n\t}\n\n\t// Check Emitter is not nil\n\tif t.Emitter == nil {\n\t\tpanic(\"FATAL: Emitter cannot be nil.\")\n\t}\n\n\treturn t\n}", "func New(jobsession string) (*Tracker, error) {\n\tsingularityPath, err := exec.LookPath(\"singularity\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"singularity command is not found\")\n\t}\n\treturn &Tracker{\n\t\tprocessTracker: simpletracker.New(jobsession),\n\t\tsingularityPath: singularityPath,\n\t}, nil\n}", "func (fn *selfCheckpointingDoFn) CreateTracker(rest offsetrange.Restriction) *sdf.LockRTracker {\n\treturn sdf.NewLockRTracker(offsetrange.NewTracker(rest))\n}", "func New(config Config) (spec.Tracker, error) {\n\tnewTracker := &tracker{\n\t\tConfig: config,\n\n\t\tID: id.MustNew(),\n\t\tType: ObjectType,\n\t}\n\n\tif newTracker.Log == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"logger must not be empty\")\n\t}\n\tif newTracker.FactoryCollection == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"factory collection must not be empty\")\n\t}\n\tif newTracker.StorageCollection == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"storage collection must not be empty\")\n\t}\n\n\tnewTracker.Log.Register(newTracker.GetType())\n\n\treturn newTracker, nil\n}", "func NewTracker() *Tracker {\n\tt := &Tracker{\n\t\tinChan: make(chan *event.Event),\n\t\tqueryChan: make(chan QueryFunc),\n\t\ttotal: &counter{},\n\t\tincidentResolvers: make(map[string]chan *event.Incident),\n\t\thosts: make(map[string]*counter),\n\t\thostTimes: make(map[string]time.Time),\n\t\tservices: make(map[string]*counter),\n\t\tsubServices: make(map[string]*counter),\n\t}\n\n\treturn t\n}", "func NewTracker(w http.ResponseWriter, r *http.Request) *Tracker {\n\treturn &Tracker{\n\t\tRequest: NewRequestRecorder(r),\n\t\tResponseWriter: w,\n\t}\n}", "func NewReadCloserTracker(rc io.ReadCloser) io.ReadCloser {\n\tvar name string\n\t_, file, line, ok := runtime.Caller(1)\n\tif !ok {\n\t\tname = \"unknown\"\n\t} else {\n\t\tname = file + \":\" + strconv.Itoa(line)\n\t}\n\tOpened(name)\n\treturn &readCloserTracker{\n\t\tReadCloser: rc,\n\t\tname: name,\n\t\tisClosed: false,\n\t}\n}", "func NewTracker(flow string, nodes int) Tracker {\n\treturn &tracker{\n\t\tflow: flow,\n\t\tnodes: make(map[string]int, nodes),\n\t\tlocks: make(map[*Node]*sync.Mutex, nodes),\n\t}\n}", "func newGitTracks(c *FarosV1alpha1Client, namespace string) *gitTracks {\n\treturn &gitTracks{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func (c ClientFake) CreateTracker(name, trackerType, networkID, destinationURL, deeplinkURL, campaignID, segmentID, priceType string, priceValue float32, allowPublisherView bool, events []string, clickURLCustomParams []interface{}) (Tracker, error) {\n\treturn Tracker{}, nil\n}", "func (r *Prunner) Init(p cluster.Node) *atomic.Bool {\n\tr.Core = &CoreStats{}\n\tr.Core.init(p.Snode(), 24)\n\tr.Core.statsTime = cmn.GCO.Get().Periodic.StatsTime.D()\n\tr.ctracker = make(copyTracker, 24)\n\n\tr.statsRunner.name = \"proxystats\"\n\tr.statsRunner.daemon = p\n\n\tr.statsRunner.stopCh = make(chan struct{}, 4)\n\tr.statsRunner.workCh = make(chan NamedVal64, 256)\n\n\tr.Core.initMetricClient(p.Snode(), &r.statsRunner)\n\n\treturn &r.statsRunner.startedUp\n}", "func TestCoverageReviewer_AddCoverageReview(t *testing.T) {\n\t/*\n\t\trp := mocks.ReviewProcess{}\n\t\trp.On(\"Start\", \"any\")\n\n\t\tcoverageReviewer{nil, nil, rp, 0.7}\n\t*/\n}", "func (fn *intRangeFn) CreateTracker(rest offsetrange.Restriction) *sdf.LockRTracker {\n\treturn sdf.NewLockRTracker(offsetrange.NewTracker(rest))\n}", "func makeTracker(ctx context.Context, name string) (context.Context, func(error)) {\n\tctx, err := tag.New(ctx, tag.Upsert(apiNameKey, name))\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatalf(\"cannot add tag %v=%v\", apiNameKey, name)\n\t}\n\n\t// It would have been nice to pull the latency (end-start) elapsed time\n\t// from Spans but this is hidden from us, so we have to call time.Now()\n\t// twice ourselves.\n\tctx, span := trace.StartSpan(ctx, name)\n\tstart := time.Now()\n\n\treturn ctx, func(err error) {\n\n\t\tstatus := \"ok\"\n\t\tif err != nil {\n\t\t\tif err == context.Canceled {\n\t\t\t\tstatus = \"canceled\"\n\t\t\t} else if err == context.DeadlineExceeded {\n\t\t\t\tstatus = \"timeout\"\n\t\t\t} else if derr, ok := err.(*docker.Error); ok {\n\t\t\t\tstatus = strconv.FormatInt(int64(derr.Status), 10)\n\t\t\t} else {\n\t\t\t\tstatus = \"error\"\n\t\t\t}\n\t\t}\n\n\t\tctx, err := tag.New(ctx, tag.Upsert(apiStatusKey, status))\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Fatalf(\"cannot add tag %v=%v\", apiStatusKey, status)\n\t\t}\n\n\t\tstats.Record(ctx, dockerLatencyMeasure.M(int64(time.Now().Sub(start)/time.Millisecond)))\n\t\tspan.End()\n\t}\n}", "func (t *Tracker) Start() (err error) {\r\n\tif t.watcher == nil {\r\n\t\tif t.watcher, err = fsnotify.NewWatcher(); err != nil {\r\n\t\t\treturn\r\n\t\t}\r\n\t\tif err = t.addDirs(t.root); err != nil {\r\n\t\t\tt.Close()\r\n\t\t\treturn\r\n\t\t}\r\n\t\tgo track(t)\r\n\t}\r\n\treturn\r\n}", "func newUploadTracker(checker Checker, uploader Uploader, isol *isolated.Isolated) *UploadTracker {\n\tisol.Files = make(map[string]isolated.File)\n\treturn &UploadTracker{\n\t\tchecker: checker,\n\t\tuploader: uploader,\n\t\tisol: isol,\n\t\tfileHashCache: make(map[string]hashResult),\n\t\tlOS: standardOS{},\n\t}\n}", "func NewStatusTracker() *StatusTracker {\n\tsuccesses := make([]bool, 10)\n\tfor i := range successes {\n\t\tsuccesses[i] = true\n\t}\n\n\treturn &StatusTracker{\n\t\t0,\n\t\tsuccesses,\n\t\tdefaultHalfOpenThreshold,\n\t\tdefaultOpenThreshold,\n\t\tClosed,\n\t\tsync.Mutex{},\n\t}\n}", "func New(args Arguments) *ConnectionTracker {\n\treturn &ConnectionTracker{\n\t\targs: args.WithDefaults(),\n\t\tcurrent: make(map[string]DestinationState),\n\t\tdown: make(map[string]DestinationState),\n\t}\n}", "func (s StatsGraph) construct() StatsGraphClass { return &s }", "func NewTracker() *Tracker {\n\treturn &Tracker{ID: 1}\n}", "func NewTracker(timeout time.Duration) *Tracker {\n\treturn &Tracker{\n\t\tlast: time.Now(),\n\t\ttimeout: timeout,\n\t}\n}", "func (psr *PSRTracker) Exec(ctx context.Context) error {\n\t//TODO: retrieve github updates of psr config file. For now, we'll just pull\n\t//PSR's as defined by psr.json file\n\tvar syncGroup sync.WaitGroup\n\tvar doneGroup sync.WaitGroup\n\tctx = context.WithValue(ctx, psrWaitGroupKey, &syncGroup)\n\terrorCh := make(chan error)\n\tdoneGroup.Add(1)\n\tgo func() {\n\t\tdefer doneGroup.Done()\n\t\tfor {\n\t\t\te := <-errorCh\n\t\t\tif e != nil {\n\t\t\t\tpsrLog.Error(\"Problem in PSR fetch: %v]\\n\", e)\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i := 0; i < len(psr.Requests); i++ {\n\t\tp := psr.Requests[i]\n\t\tsyncGroup.Add(1)\n\t\tgo p.fetch(ctx, errorCh)\n\t}\n\tsyncGroup.Wait()\n\terrorCh <- nil\n\tdoneGroup.Wait()\n\tpsrLog.Info(\"PSR Tracker cycle complete\")\n\treturn nil\n}", "func newTracker(maxAge, evaluationInterval time.Duration, minimumPortScanned int) (t *Tracker) {\n\tt = &Tracker{\n\t\tportScanners: make(chan *TrackerEntry),\n\t\tminimumPortScanned: minimumPortScanned,\n\t\tmaxAge: maxAge,\n\t\tm: make(map[string]*TrackerEntry),\n\t}\n\tgo func() {\n\t\tfor now := range time.Tick(evaluationInterval) {\n\t\t\tt.l.Lock()\n\t\t\tfor k, v := range t.m {\n\t\t\t\tif now.After(v.expiry) {\n\t\t\t\t\tlog.Infof(\"removing %q because entry is expired\", k)\n\t\t\t\t\tdelete(t.m, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\tt.l.Unlock()\n\t\t}\n\t}()\n\treturn\n}", "func (fn *TruncateFn) CreateTracker(rest offsetrange.Restriction) *sdf.LockRTracker {\n\tfn.Estimator = RangeEstimator{int64(10)}\n\ttracker, err := offsetrange.NewGrowableTracker(rest, &fn.Estimator)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn sdf.NewLockRTracker(tracker)\n}", "func (c *Collector) Init() error {\n\tif c.config.PushRefID.Valid {\n\t\tc.referenceID = c.config.PushRefID.String\n\t\tc.logger.WithField(\"referenceId\", c.referenceID).Debug(\"Cloud: directly pushing metrics without init\")\n\t\treturn nil\n\t}\n\n\tthresholds := make(map[string][]string)\n\n\tfor name, t := range c.thresholds {\n\t\tfor _, threshold := range t {\n\t\t\tthresholds[name] = append(thresholds[name], threshold.Source)\n\t\t}\n\t}\n\tmaxVUs := lib.GetMaxPossibleVUs(c.executionPlan)\n\n\ttestRun := &TestRun{\n\t\tName: c.config.Name.String,\n\t\tProjectID: c.config.ProjectID.Int64,\n\t\tVUsMax: int64(maxVUs),\n\t\tThresholds: thresholds,\n\t\tDuration: c.duration,\n\t}\n\n\tresponse, err := c.client.CreateTestRun(testRun)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.referenceID = response.ReferenceID\n\n\tif response.ConfigOverride != nil {\n\t\tc.logger.WithFields(logrus.Fields{\n\t\t\t\"override\": response.ConfigOverride,\n\t\t}).Debug(\"Cloud: overriding config options\")\n\t\tc.config = c.config.Apply(*response.ConfigOverride)\n\t}\n\n\tc.logger.WithFields(logrus.Fields{\n\t\t\"name\": c.config.Name,\n\t\t\"projectId\": c.config.ProjectID,\n\t\t\"duration\": c.duration,\n\t\t\"referenceId\": c.referenceID,\n\t}).Debug(\"Cloud: Initialized\")\n\treturn nil\n}", "func Start(label string) *Tracker {\n\treturn &Tracker{\n\t\tLabel: label,\n\t\tRun: time.Now(),\n\t}\n}", "func Setup(r *http.Request, params martini.Params) string {\n\tstates, err := getStatuses()\n\tif err != nil {\n\t\treturn \"{\\\"error\\\":\\\"\" + err.Error() + \"\\\"}\"\n\t}\n\n\tmsg := messages.Setup{\n\t\tOrg: r.URL.Query().Get(\"org\"),\n\t\tRepo: r.URL.Query().Get(\"repo\"),\n\t\tStates: states,\n\t\tConfig: config(),\n\t}\n\n\treturn Request(\"issue-tracker.setup\", msg)\n}", "func (s StatsGraphAsync) construct() StatsGraphClass { return &s }", "func (opcTimer *OPCTimer) AddTracker(tracker *Tracker) {\n\topcTimer.trackers.PushBack(tracker)\n}", "func NewTrackerController(service *goa.Service, db application.DB, scheduler *remoteworkitem.Scheduler) *TrackerController {\n\treturn &TrackerController{Controller: service.NewController(\"TrackerController\"), db: db, scheduler: scheduler}\n}", "func New(origin string, authCfg *config.Auth) (*IssueTracker, error) {\n\treturn &IssueTracker{origin, authCfg}, nil\n}", "func newTrackBuilder() *trackBuilder {\n\tt := &trackBuilder{}\n\tsetKit(t, \"windows\")\n\treturn t\n}", "func newPerfProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig, client *http.Client) (ingestion.Processor, error) {\n\treturn &perfProcessor{\n\t\tstore: ptracestore.Default,\n\t\tvcs: vcs,\n\t}, nil\n}", "func New(runner, tracker, hosted string) *Runner {\n\tn := &Runner{\n\t\ttcl: client.New(tracker, http.DefaultClient, client.JsonCodec),\n\t\tbase: hosted,\n\t\trunner: runner,\n\t\trpc: gorpc.NewServer(),\n\t\trq: rpc.NewRunnerQueue(),\n\t\tresp: make(chan rpc.Output),\n\t}\n\n\t//register the run service in the rpc\n\tif err := n.rpc.RegisterService(n.rq, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//register the pinger\n\tif err := n.rpc.RegisterService(pinger.Pinger{}, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//register ourselves in the rpc\n\tif err := n.rpc.RegisterService(n, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//register the codec\n\tn.rpc.RegisterCodec(json.NewCodec(), \"application/json\")\n\n\t//start processing\n\tgo n.run()\n\n\treturn n\n}", "func (m *Tracker) Init(tag string, deepcopy bool) {\n\tm.kmap = make(map[common.Key]interface{})\n\tm.tag = tag\n\tm.deepcopy = deepcopy\n}", "func Tracker(e *Engine) func(ctx *gin.Context, p *plugin.LogFormatterParams) {\n\treturn func(ctx *gin.Context, p *plugin.LogFormatterParams) {\n\t\ttoken, _ := e.OAuth2.BearerAuth(ctx.Request)\n\t\tp.Token = token\n\t\tif tokenInfo, err := e.OAuth2.Manager.LoadAccessToken(token); err == nil {\n\t\t\tp.Domain = tokenInfo.GetDomain()\n\t\t\tp.UserID = tokenInfo.GetUserID()\n\t\t}\n\n\t\t// would not be block <-logWorkerPool\n\t\t// but <- p will\n\t\tjobChannel := <-logWorkerPool\n\t\tgo func(p *plugin.LogFormatterParams) {\n\t\t\tjobChannel <- p\n\t\t}(p)\n\t}\n}", "func MakeThreadTracker() *ThreadTracker {\n\treturn &ThreadTracker{\n\t\tactiveThreadsCounter: &sync.WaitGroup{},\n\t}\n}", "func psrInstance() (*PSRTracker, error) {\n\tif sharedInstance == nil {\n\t\treturn nil, fmt.Errorf(\"Missing psrInstance singleton\")\n\t}\n\treturn sharedInstance, nil\n}", "func NewTracker(ctx *pulumi.Context,\n\tname string, args *TrackerArgs, opts ...pulumi.ResourceOption) (*Tracker, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.TrackerName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'TrackerName'\")\n\t}\n\tvar resource Tracker\n\terr := ctx.RegisterResource(\"aws:location/tracker:Tracker\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewScheduleProcessor(ctxIn context.Context, credentialsProvider secret.SecretProvider) (ScheduleProcessor, error) {\n ctx, span := trace.StartSpan(ctxIn, \"NewScheduleProcessor\")\n defer span.End()\n\n backupRepository, err := repository.NewBackupRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n jobRepository, err := repository.NewJobRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n sourceMetadataRepository, err := repository.NewSourceMetadataRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n sourceMetadataJobRepository, err := repository.NewSourceMetadataJobRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n sourceTrashcanRepository, err := repository.NewSourceTrashcanRepository(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n\n return &defaultScheduleProcessor{\n backupRepository: backupRepository,\n jobRepository: jobRepository,\n sourceMetadataRepository: sourceMetadataRepository,\n sourceMetadataJobRepository: sourceMetadataJobRepository,\n sourceTrashcanRepository: sourceTrashcanRepository,\n }, nil\n}", "func (psr *PSRTracker) String() string {\n\treturn \"PSRTracker\"\n}", "func (c *TrackerController) Create(ctx *app.CreateTrackerContext) error {\n\tresult := application.Transactional(c.db, func(appl application.Application) error {\n\t\tt, err := appl.Trackers().Create(ctx.Context, ctx.Payload.URL, ctx.Payload.Type)\n\t\tif err != nil {\n\t\t\tswitch err := err.(type) {\n\t\t\tcase remoteworkitem.BadParameterError, remoteworkitem.ConversionError:\n\t\t\t\treturn goa.ErrBadRequest(err.Error())\n\t\t\tdefault:\n\t\t\t\treturn goa.ErrInternal(err.Error())\n\t\t\t}\n\t\t}\n\t\tctx.ResponseData.Header().Set(\"Location\", app.TrackerHref(t.ID))\n\t\treturn ctx.Created(t)\n\t})\n\tc.scheduler.ScheduleAllQueries()\n\treturn result\n}", "func (bpt *BplusTree) initTracker() {\n\tbpt.tracker.newNodes.Init(\"new\", false)\n\tbpt.tracker.updatedNodes.Init(\"updated\", false)\n\tbpt.tracker.origNodes.Init(\"orig\", true)\n\tbpt.tracker.deletedNodes.Init(\"deleted\", false)\n\tbpt.tracker.origRootKey = bpt.rootKey\n\tbpt.tracker.rootKeyUpdated = false\n}", "func newTrySet(key tryKey) (*trySet, error) {\n\tgoHead := getRepoHead(\"go\")\n\tif key.Repo != \"go\" && goHead == \"\" {\n\t\t// We don't know the go HEAD yet (but we will)\n\t\t// so don't create this trySet yet as we don't\n\t\t// know which Go revision to build against.\n\t\treturn nil, errHeadUnknown\n\t}\n\n\tbuilders := tryBuilders\n\tif key.Repo != \"go\" {\n\t\tbuilders = subTryBuilders\n\t}\n\n\tlog.Printf(\"Starting new trybot set for %v\", key)\n\tts := &trySet{\n\t\ttryKey: key,\n\t\ttryID: \"T\" + randHex(9),\n\t\ttrySetState: trySetState{\n\t\t\tremain: len(builders),\n\t\t\tbuilds: make([]*buildStatus, len(builders)),\n\t\t},\n\t}\n\n\tgo ts.notifyStarting()\n\tfor i, bconf := range builders {\n\t\tbrev := tryKeyToBuilderRev(bconf.Name, key)\n\t\tbs, err := newBuild(brev)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"can't create build for %q: %v\", brev, err)\n\t\t\tcontinue\n\t\t}\n\t\tbs.trySet = ts\n\t\tstatus[brev] = bs\n\t\tts.builds[i] = bs\n\t\tgo bs.start() // acquires statusMu itself, so in a goroutine\n\t\tgo ts.awaitTryBuild(i, bconf, bs)\n\t}\n\treturn ts, nil\n}", "func New(perfGit *perfgit.Git, tracker progress.Tracker, shortcutStore shortcut.Store, dfBuilder dataframe.DataFrameBuilder, paramsProvider regression.ParamsetProvider) *Requests {\n\tret := &Requests{\n\t\tperfGit: perfGit,\n\t\tshortcutStore: shortcutStore,\n\t\tdfBuilder: dfBuilder,\n\t\ttracker: tracker,\n\t\tparamsProvier: paramsProvider,\n\t}\n\treturn ret\n}", "func createPollingTracker(resp *http.Response) (pollingTracker, error) {\n\tvar pt pollingTracker\n\tswitch strings.ToUpper(resp.Request.Method) {\n\tcase http.MethodDelete:\n\t\tpt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}}\n\tcase http.MethodPatch:\n\t\tpt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}}\n\tcase http.MethodPost:\n\t\tpt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}}\n\tcase http.MethodPut:\n\t\tpt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}}\n\tdefault:\n\t\treturn nil, autorest.NewError(\"azure\", \"createPollingTracker\", \"unsupported HTTP method %s\", resp.Request.Method)\n\t}\n\tif err := pt.initializeState(); err != nil {\n\t\treturn pt, err\n\t}\n\t// this initializes the polling header values, we do this during creation in case the\n\t// initial response send us invalid values; this way the API call will return a non-nil\n\t// error (not doing this means the error shows up in Future.Done)\n\treturn pt, pt.updatePollingMethod()\n}", "func New() (*DockerTracker, error) {\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, errPing := cli.Ping(context.Background())\n\tif errPing != nil {\n\t\treturn nil, err\n\t}\n\treturn &DockerTracker{cli: cli}, nil\n}", "func NewForTest(t *testing.T) (context.Context, *pgxpool.Pool, *testutils.GitBuilder, []string, provider.Provider, *config.InstanceConfig) {\n\tctx := cipd_git.UseGitFinder(context.Background())\n\tctx, cancel := context.WithCancel(ctx)\n\n\t// Create a git repo for testing purposes.\n\tgb := testutils.GitInit(t, ctx)\n\thashes := []string{}\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(2*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"bar.txt\", StartTime.Add(3*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(4*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(5*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"bar.txt\", StartTime.Add(6*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(7*time.Minute)))\n\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(8*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(9*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(10*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(11*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(12*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(13*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(14*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(15*time.Minute)))\n\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(16*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(17*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(18*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(19*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(20*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(21*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(22*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(23*time.Minute)))\n\n\t// Init our sql database.\n\tdb := sqltest.NewCockroachDBForTests(t, \"dbgit\")\n\n\t// Get tmp dir to use for repo checkout.\n\ttmpDir, err := ioutil.TempDir(\"\", \"git\")\n\trequire.NoError(t, err)\n\n\t// Create the cleanup function.\n\tt.Cleanup(func() {\n\t\tcancel()\n\t\terr = os.RemoveAll(tmpDir)\n\t\tassert.NoError(t, err)\n\t\tgb.Cleanup()\n\t})\n\n\tinstanceConfig := &config.InstanceConfig{\n\t\tGitRepoConfig: config.GitRepoConfig{\n\t\t\tURL: gb.Dir(),\n\t\t\tDir: filepath.Join(tmpDir, \"checkout\"),\n\t\t},\n\t}\n\tgp, err := git_checkout.New(ctx, instanceConfig)\n\trequire.NoError(t, err)\n\treturn ctx, db, gb, hashes, gp, instanceConfig\n}", "func NewOperationTracker(ctx context.Context, pid peer.ID) *OperationTracker {\n\treturn &OperationTracker{\n\t\tctx: ctx,\n\t\tpid: pid,\n\t\toperations: make(map[string]*Operation),\n\t}\n}", "func NewChainHeadTracker(t mockConstructorTestingTNewChainHeadTracker) *ChainHeadTracker {\n\tmock := &ChainHeadTracker{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (t *FakeObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {\n\treturn nil\n}", "func (o *EnvironmentPullRequestOptions) Create(env *jenkinsv1.Environment, prDir string,\n\tpullRequestDetails *gits.PullRequestDetails, filter *gits.PullRequestFilter, chartName string, autoMerge bool) (*gits.PullRequestInfo, error) {\n\tif prDir == \"\" {\n\t\ttempDir, err := ioutil.TempDir(\"\", \"create-pr\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tprDir = tempDir\n\t\tdefer os.RemoveAll(tempDir)\n\t}\n\n\tdir, base, upstreamRepo, forkURL, err := gits.ForkAndPullRepo(env.Spec.Source.URL, prDir, env.Spec.Source.Ref, pullRequestDetails.BranchName, o.GitProvider, o.Gitter, \"\")\n\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"pulling environment repo %s into %s\", env.Spec.Source.URL,\n\t\t\tprDir)\n\t}\n\n\terr = ModifyChartFiles(dir, pullRequestDetails, o.ModifyChartFn, chartName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlabels := make([]string, 0)\n\tlabels = append(labels, pullRequestDetails.Labels...)\n\tlabels = append(labels, o.Labels...)\n\tif autoMerge {\n\t\tlabels = append(labels, gits.LabelUpdatebot)\n\t}\n\tpullRequestDetails.Labels = labels\n\tprInfo, err := gits.PushRepoAndCreatePullRequest(dir, upstreamRepo, forkURL, base, pullRequestDetails, filter, true, pullRequestDetails.Message, true, false, o.Gitter, o.GitProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn prInfo, nil\n}", "func newReconciler(mgr manager.Manager) (reconcile.Reconciler, *reconcileGitTrackOpts) {\n\t// Create a restMapper (used by informer to look up resource kinds)\n\trestMapper, err := utils.NewRestMapper(mgr.GetConfig())\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to create rest mapper: %v\", err))\n\t}\n\n\tgvrs, err := farosflags.ParseIgnoredResources()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to parse ignored resources: %v\", err))\n\t}\n\n\tapplier, err := farosclient.NewApplier(mgr.GetConfig(), farosclient.Options{})\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to create applier: %v\", err))\n\t}\n\n\trec := &ReconcileGitTrack{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tstore: gitstore.NewRepoStore(farosflags.RepositoryDir),\n\t\trestMapper: restMapper,\n\t\trecorder: mgr.GetEventRecorderFor(\"gittrack-controller\"),\n\t\tignoredGVRs: gvrs,\n\t\tlastUpdateTimes: make(map[string]time.Time),\n\t\tmutex: &sync.RWMutex{},\n\t\tapplier: applier,\n\t\tlog: rlogr.Log.WithName(\"gittrack-controller\"),\n\t\tgitTrackMode: farosflags.GitTrack,\n\t\tnamespace: farosflags.Namespace,\n\t\tclusterGitTrackMode: farosflags.ClusterGitTrack,\n\t}\n\topts := &reconcileGitTrackOpts{\n\t\tgitTrackMode: farosflags.GitTrack,\n\t\tclusterGitTrackMode: farosflags.ClusterGitTrack,\n\t}\n\treturn rec, opts\n}", "func New(checkoutDir string) staticanalysis.Analyzer {\n\treturn &analyzer{checkoutDir: checkoutDir}\n}", "func New(checkoutDir string) staticanalysis.Analyzer {\n\treturn &analyzer{checkoutDir: checkoutDir}\n}", "func createPullRequest(c *cli.Context) error {\n\tbranch := c.String(\"pr-branch\")\n\trepo := c.String(\"pr-repo\")\n\tbase := c.String(\"pr-base\")\n\tremote := c.String(\"pr-remote\")\n\tdirectory := c.String(\"out\")\n\tif repo == \"\" {\n\t\treturn errors.New(\"repo must be defined if create-pr is true\")\n\t}\n\tif branch == \"\" {\n\t\tbranch = c.String(\"name\") + \"-\" + uuid.NewString()[:6]\n\t}\n\tfmt.Printf(\"Creating a PR to repo %s with base %s and branch %s\\n\", repo, base, branch)\n\tr := &runner.CLIRunner{}\n\tg := git.NewCLIGit(git.CLIGitConfig{\n\t\tDirectory: directory,\n\t\tBranch: branch,\n\t\tRemote: remote,\n\t\tBase: base,\n\t}, r)\n\tscmClient, err := git.NewClient(git.SCMConfig{\n\t\tBranch: branch,\n\t\tBase: base,\n\t\tRepo: repo,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create scm client: %w\", err)\n\t}\n\treturn catalog.CreatePullRequest(scmClient, g, branch)\n}", "func init() {\n\tmb.Registry.MustAddMetricSet(\"psoft\", \"stat\", New)\n}", "func (s *RPC) Init(c context.Context, id string, state rpc.State) error {\n\tstepID, err := strconv.ParseInt(id, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tworkflow, err := s.store.WorkflowLoad(stepID)\n\tif err != nil {\n\t\tlog.Error().Msgf(\"error: cannot find step with id %d: %s\", stepID, err)\n\t\treturn err\n\t}\n\n\tagent, err := s.getAgentFromContext(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tworkflow.AgentID = agent.ID\n\n\tcurrentPipeline, err := s.store.GetPipeline(workflow.PipelineID)\n\tif err != nil {\n\t\tlog.Error().Msgf(\"error: cannot find pipeline with id %d: %s\", workflow.PipelineID, err)\n\t\treturn err\n\t}\n\n\trepo, err := s.store.GetRepo(currentPipeline.RepoID)\n\tif err != nil {\n\t\tlog.Error().Msgf(\"error: cannot find repo with id %d: %s\", currentPipeline.RepoID, err)\n\t\treturn err\n\t}\n\n\tif currentPipeline.Status == model.StatusPending {\n\t\tif currentPipeline, err = pipeline.UpdateToStatusRunning(s.store, *currentPipeline, state.Started); err != nil {\n\t\t\tlog.Error().Msgf(\"error: init: cannot update build_id %d state: %s\", currentPipeline.ID, err)\n\t\t}\n\t}\n\n\ts.updateForgeStatus(c, repo, currentPipeline, workflow)\n\n\tdefer func() {\n\t\tcurrentPipeline.Workflows, _ = s.store.WorkflowGetTree(currentPipeline)\n\t\tmessage := pubsub.Message{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"repo\": repo.FullName,\n\t\t\t\t\"private\": strconv.FormatBool(repo.IsSCMPrivate),\n\t\t\t},\n\t\t}\n\t\tmessage.Data, _ = json.Marshal(model.Event{\n\t\t\tRepo: *repo,\n\t\t\tPipeline: *currentPipeline,\n\t\t})\n\t\tif err := s.pubsub.Publish(c, \"topic/events\", message); err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"can not publish step list to\")\n\t\t}\n\t}()\n\n\tworkflow, err = pipeline.UpdateWorkflowToStatusStarted(s.store, *workflow, state)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.updateForgeStatus(c, repo, currentPipeline, workflow)\n\treturn nil\n}", "func New(id TrackingID, rs RouteSpecification) *Cargo {\n\titinerary := Itinerary{}\n\thistory := HandlingHistory{make([]HandlingEvent, 0)}\n\n\treturn &Cargo{\n\t\tTrackingID: id,\n\t\tOrigin: rs.Origin,\n\t\tRouteSpecification: rs,\n\t\tDelivery: DeriveDeliveryFrom(rs, itinerary, history),\n\t}\n}", "func (s AuthSentCode) construct() AuthSentCodeClass { return &s }", "func (p *Pulley) MetricsProcessor(contextOk config.ContextChecker, trackBuildTimes bool) {\n\t// Keep track of live SHAs -- we don't need separation per repository, as SHAs are pretty unique\n\t// map[commitSHA]shaState\n\tliveSHAs := make(liveSHAMap)\n\n\tp.WG.Add(1)\n\n\tgo func(updates <-chan interface{}) {\n\t\tdefer p.WG.Done()\n\n\t\tfor update := range updates {\n\t\t\tswitch up := update.(type) {\n\t\t\tcase events.PullUpdate:\n\t\t\t\t// When a PR is opened, its tracking starts.\n\t\t\t\tlog.Printf(\"updated pr: %d to commit: %s, action=%s\\n\", up.Number, up.SHA, up.Action)\n\n\t\t\t\tprocessPullUpdate(up, &liveSHAs, p.Metrics)\n\n\t\t\tcase events.BranchUpdate:\n\t\t\t\tlog.Printf(\"updated a branch to commit: %s (from %s)\", up.SHA, up.OldSHA)\n\n\t\t\t\tprocessBranchUpdate(up, &liveSHAs, p.Metrics)\n\n\t\t\tcase events.CommitUpdate:\n\t\t\t\t// track good, bad, overall\n\t\t\t\t// Find which PRs are the ones with the status as the HEAD\n\t\t\t\t// and use that\n\t\t\t\tlog.Printf(\"updated commit: %s context: %s status: %s\", up.SHA, up.Context, up.Status)\n\n\t\t\t\tprocessCommitUpdate(up, &liveSHAs, p.Metrics, contextOk, trackBuildTimes)\n\t\t\t}\n\t\t}\n\t}(p.Updates)\n}", "func (r *Repo) setup() error {\n\tvar err error\n\n\tsetupFuncs := []func() error{\n\t\tr.setupCvr,\n\t\tr.setupRefreshTime,\n\t\tr.setupLogDir,\n\t\tr.setupLogServer,\n\t\tr.setupCommentTrigger,\n\t\tr.setupLanguage,\n\t\tr.setupStages,\n\t\tr.setupWhitelist,\n\t\tr.setupEnvars,\n\t}\n\n\tfor _, setupFunc := range setupFuncs {\n\t\tif err = setupFunc(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tr.prConfig = pullRequestConfig{\n\t\tcvr: r.cvr,\n\t\tlogger: r.logger,\n\t\tcommentTrigger: r.CommentTrigger,\n\t\tpostOnFailure: r.PostOnFailure,\n\t\tpostOnSuccess: r.PostOnSuccess,\n\t\twhitelist: r.Whitelist,\n\t}\n\n\tr.logger.Debugf(\"control version repository: %#v\", r.cvr)\n\n\treturn nil\n}", "func PushRepoAndCreatePullRequest(dir string, upstreamRepo *GitRepository, forkRepo *GitRepository, base string, prDetails *PullRequestDetails, filter *PullRequestFilter, commit bool, commitMessage string, push bool, dryRun bool, gitter Gitter, provider GitProvider) (*PullRequestInfo, error) {\n\tuserAuth := provider.UserAuth()\n\tif commit {\n\t\terr := gitter.Add(dir, \"-A\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\t\tchanged, err := gitter.HasChanges(dir)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\t\tif !changed {\n\t\t\tlog.Logger().Warnf(\"No changes made to the source code in %s. Code must be up to date!\", dir)\n\t\t\treturn nil, nil\n\t\t}\n\t\tif commitMessage == \"\" {\n\t\t\tcommitMessage = prDetails.Message\n\t\t}\n\t\terr = gitter.CommitDir(dir, commitMessage)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\t}\n\n\theadPrefix := \"\"\n\n\tusername := upstreamRepo.Organisation\n\tcloneURL := upstreamRepo.CloneURL\n\tif forkRepo != nil {\n\t\tusername = forkRepo.Organisation\n\t\tcloneURL = forkRepo.CloneURL\n\t}\n\n\tif upstreamRepo.Organisation != username {\n\t\theadPrefix = username + \":\"\n\t}\n\n\tgha := &GitPullRequestArguments{\n\t\tGitRepository: upstreamRepo,\n\t\tTitle: prDetails.Title,\n\t\tBody: prDetails.Message,\n\t\tBase: base,\n\t\tLabels: prDetails.Labels,\n\t}\n\tvar existingPr *GitPullRequest\n\n\tforkPushURL, err := gitter.CreateAuthenticatedURL(cloneURL, &userAuth)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"creating push URL for %s\", cloneURL)\n\t}\n\n\tif filter != nil && push {\n\t\t// lets rebase an existing PR\n\t\texistingPrs, err := FilterOpenPullRequests(provider, upstreamRepo.Organisation, upstreamRepo.Name, *filter)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"finding existing PRs using filter %s on repo %s/%s\", filter.String(), upstreamRepo.Organisation, upstreamRepo.Name)\n\t\t}\n\n\t\tif len(existingPrs) > 1 {\n\t\t\tsort.SliceStable(existingPrs, func(i, j int) bool {\n\t\t\t\t// sort in descending order of PR numbers (assumes PRs numbers increment!)\n\t\t\t\treturn util.DereferenceInt(existingPrs[j].Number) < util.DereferenceInt(existingPrs[i].Number)\n\t\t\t})\n\t\t\tprs := make([]string, 0)\n\t\t\tfor _, pr := range existingPrs {\n\t\t\t\tprs = append(prs, pr.URL)\n\t\t\t}\n\t\t\tlog.Logger().Debugf(\"Found more than one PR %s using filter %s on repo %s/%s so rebasing latest PR %s\", strings.Join(prs, \", \"), filter.String(), upstreamRepo.Organisation, upstreamRepo.Name, existingPrs[:1][0].URL)\n\t\t\texistingPr = existingPrs[0]\n\t\t} else if len(existingPrs) == 1 {\n\t\t\texistingPr = existingPrs[0]\n\t\t}\n\t}\n\tremoteBranch := prDetails.BranchName\n\tif existingPr != nil {\n\t\tif util.DereferenceString(existingPr.HeadOwner) == username && existingPr.HeadRef != nil && existingPr.Number != nil {\n\t\t\tremote := \"origin\"\n\t\t\tif forkRepo != nil && forkRepo.Fork {\n\t\t\t\tremote = \"upstream\"\n\t\t\t}\n\t\t\tchangeBranch, err := gitter.Branch(dir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStack(err)\n\t\t\t}\n\t\t\tlocalBranchUUID, err := uuid.NewUUID()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"creating UUID for local branch\")\n\t\t\t}\n\t\t\t// We use this \"dummy\" local branch to pull into to avoid having to work with FETCH_HEAD as our local\n\t\t\t// representation of the remote branch. This is an oddity of the pull/%d/head remote.\n\t\t\tlocalBranch := localBranchUUID.String()\n\t\t\tremoteBranch = *existingPr.HeadRef\n\t\t\tfetchRefSpec := fmt.Sprintf(\"pull/%d/head:%s\", *existingPr.Number, localBranch)\n\t\t\terr = gitter.FetchBranch(dir, remote, fetchRefSpec)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"fetching %s for merge\", fetchRefSpec)\n\t\t\t}\n\n\t\t\terr = gitter.CreateBranchFrom(dir, prDetails.BranchName, localBranch)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"creating branch %s from %s\", prDetails.BranchName, fetchRefSpec)\n\t\t\t}\n\t\t\terr = gitter.Checkout(dir, prDetails.BranchName)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"checking out branch %s\", prDetails.BranchName)\n\t\t\t}\n\t\t\terr = gitter.MergeTheirs(dir, changeBranch)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"merging %s into %s\", changeBranch, fetchRefSpec)\n\t\t\t}\n\t\t\terr = gitter.RebaseTheirs(dir, fmt.Sprintf(localBranch), \"\", true)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStack(err)\n\t\t\t}\n\t\t\tchangedFiles, err := gitter.ListChangedFilesFromBranch(dir, localBranch)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"failed to list changed files\")\n\t\t\t}\n\t\t\tif changedFiles == \"\" {\n\t\t\t\tlog.Logger().Info(\"No file changes since the existing PR. Nothing to push.\")\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t} else {\n\t\t\t// We can only update an existing PR if the owner of that PR is this user, so we clear the existingPr\n\t\t\texistingPr = nil\n\t\t}\n\t}\n\tvar pr *GitPullRequest\n\tif !dryRun && existingPr != nil {\n\t\tgha.Head = headPrefix + remoteBranch\n\t\t// work out the minimal similar title\n\t\tif strings.HasPrefix(existingPr.Title, \"chore(deps): bump \") {\n\t\t\torigWords := strings.Split(existingPr.Title, \" \")\n\t\t\tnewWords := strings.Split(prDetails.Title, \" \")\n\t\t\tanswer := make([]string, 0)\n\t\t\tfor i, w := range newWords {\n\t\t\t\tif len(origWords) > i && origWords[i] == w {\n\t\t\t\t\tanswer = append(answer, w)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif answer[len(answer)-1] == \"bump\" {\n\t\t\t\t// if there are no similarities in the actual dependency, then add a generic form of words\n\t\t\t\tanswer = append(answer, \"dependency\", \"versions\")\n\t\t\t}\n\t\t\tif answer[len(answer)-1] == \"to\" || answer[len(answer)-1] == \"from\" {\n\t\t\t\t// remove trailing prepositions\n\t\t\t\tanswer = answer[:len(answer)-1]\n\t\t\t}\n\t\t\tgha.Title = strings.Join(answer, \" \")\n\t\t} else {\n\t\t\tgha.Title = prDetails.Title\n\t\t}\n\t\tgha.Body = fmt.Sprintf(\"%s\\n<hr />\\n\\n%s\", prDetails.Message, existingPr.Body)\n\t\tvar err error\n\t\tpr, err = provider.UpdatePullRequest(gha, *existingPr.Number)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"updating pull request %s\", existingPr.URL)\n\t\t}\n\t\tlog.Logger().Infof(\"Updated Pull Request: %s\", util.ColorInfo(pr.URL))\n\t}\n\tif dryRun {\n\t\tlog.Logger().Infof(\"Commit created but not pushed; would have updated pull request %s with %s and used commit message %s. Please manually delete %s when you are done\", util.ColorInfo(existingPr.URL), prDetails.String(), commitMessage, util.ColorInfo(dir))\n\t\treturn nil, nil\n\t} else if push {\n\t\terr := gitter.Push(dir, forkPushURL, true, fmt.Sprintf(\"%s:%s\", \"HEAD\", remoteBranch))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"pushing merged branch %s\", remoteBranch)\n\t\t}\n\t}\n\tif existingPr == nil {\n\t\tgha.Head = headPrefix + prDetails.BranchName\n\n\t\tpr, err = provider.CreatePullRequest(gha)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"creating pull request with arguments %v\", gha.String())\n\t\t}\n\t\tlog.Logger().Infof(\"Created Pull Request: %s\", util.ColorInfo(pr.URL))\n\t}\n\n\tprInfo := &PullRequestInfo{\n\t\tGitProvider: provider,\n\t\tPullRequest: pr,\n\t\tPullRequestArguments: gha,\n\t}\n\n\terr = addLabelsToPullRequest(prInfo, prDetails.Labels)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to add labels %+v to PR %s\", prDetails.Labels, pr.URL)\n\t}\n\n\treturn prInfo, nil\n}", "func Start(ctx context.Context, conf *bt_gitstore.BTConfig, repoURL string, includeBranches, excludeBranches []string, gitilesURL, gcsBucket, gcsPath string, interval time.Duration, ts oauth2.TokenSource) error {\n\tsklog.Infof(\"Initializing watcher for %s\", repoURL)\n\tgitStore, err := bt_gitstore.New(ctx, conf, repoURL)\n\tif err != nil {\n\t\treturn skerr.Wrapf(err, \"Error instantiating git store for %s.\", repoURL)\n\t}\n\tclient := httputils.DefaultClientConfig().WithTokenSource(ts).Client()\n\tgr := gitiles.NewRepo(gitilesURL, client)\n\ts, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\treturn skerr.Wrapf(err, \"Failed to create storage client for %s.\", gcsBucket)\n\t}\n\tgcsClient := gcsclient.New(s, gcsBucket)\n\tp, err := pubsub.NewPublisher(ctx, conf, gitStore.RepoID, ts)\n\tif err != nil {\n\t\treturn skerr.Wrapf(err, \"Failed to create PubSub publisher for %s\", repoURL)\n\t}\n\tri, err := newRepoImpl(ctx, gitStore, gr, gcsClient, gcsPath, p, includeBranches, excludeBranches)\n\tif err != nil {\n\t\treturn skerr.Wrapf(err, \"Failed to create RepoImpl for %s; using gs://%s/%s.\", repoURL, gcsBucket, gcsPath)\n\t}\n\tsklog.Infof(\"Building Graph for %s...\", repoURL)\n\trepo, err := repograph.NewWithRepoImpl(ctx, ri)\n\tif err != nil {\n\t\treturn skerr.Wrapf(err, \"Failed to create repo graph for %s.\", repoURL)\n\t}\n\trepo.UpdateBranchInfo()\n\n\t// Start periodic ingestion.\n\tlvGitSync := metrics2.NewLiveness(\"last_successful_git_sync\", map[string]string{\"repo\": repoURL})\n\tcleanup.Repeat(interval, func(ctx context.Context) {\n\t\tdefer metrics2.FuncTimer().Stop()\n\t\t// Catch any panic and log relevant information to find the root cause.\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tsklog.Errorf(\"Panic updating %s: %s\\n%s\", repoURL, err, string(debug.Stack()))\n\t\t\t}\n\t\t}()\n\n\t\tsklog.Infof(\"Updating %s...\", repoURL)\n\t\tif err := repo.Update(ctx); err != nil {\n\t\t\tsklog.Errorf(\"Error updating %s: %s\", repoURL, err)\n\t\t} else {\n\t\t\tgotBranches, err := gitStore.GetBranches(ctx)\n\t\t\tif err != nil {\n\t\t\t\tsklog.Errorf(\"Successfully updated %s but failed to retrieve branch heads: %s\", repoURL, err)\n\t\t\t} else {\n\t\t\t\tsklog.Infof(\"Successfully updated %s\", repoURL)\n\t\t\t\tfor name, branch := range gotBranches {\n\t\t\t\t\tsklog.Debugf(\" %s@%s: %d, %s\", path.Base(repoURL), name, branch.Index, branch.Head)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlvGitSync.Reset()\n\t\t}\n\t}, nil)\n\treturn nil\n}", "func New() (*SingleCrawler, error) {\n\n defer glog.Flush()\n\n var crawler SingleCrawler\n startURL := *UrlPtr\n maxp := *MaxpPtr\n maxc := *MaxcPtr\n maxt := *MaxtPtr\n Filename := *OutfilePtr\n NumWorkers := *NumwPtr\n\n // validate the user input URL and decide if it's okay to use\n if govalidator.IsURL(startURL) == false {\n glog.Error(\"The starting URL is invalid. Please enter a valid URL.\")\n return nil, errors.New(\"Bad starting URL.\")\n }\n if maxp < 0 || maxc < 0 || maxt < 0 {\n glog.Error(\"Please pass in values > = 0 for max constraints (max print, max pages, max time). Please pass > 0 for the number of workers.\")\n return nil, errors.New(\"Bad values for maxprint, maxpages, maxtime, or NumWorkers\")\n }\n if NumWorkers <= 0 || NumWorkers > MAX_WORKERS {\n glog.Error(\"Number of workes is invalid. Must be > 0, and less that MAX_WORKERS.\")\n return nil, errors.New(\"Bad value for NumWorkers\")\n }\n if len(Filename) >= 255 {\n glog.Error(\"Filename can't be larger than 255 characters. Trimming Filename.\")\n Filename = Filename[0:100]\n }\n\n\n crawler.MAX_PAGES = maxc\n crawler.PRINT_LIMIT = maxp\n crawler.NumPages = 0\n crawler.NumWorkers = NumWorkers\n crawler.MAX_TIME = time.Duration(maxt) * time.Second\n crawler.Sitemap = make( [] Page, crawler.MAX_PAGES)\n \n\n // Parse the URL - make sure it's ok to use\n domain, err := url.Parse(startURL)\n if err != nil {\n glog.Error(\"Error parsing domain of starting URL\")\n return nil, errors.New(\"Unable to parse domain of start URL.\")\n }\n err = DomainCheck( domain )\n if err != nil {\n glog.Error(\"Error parsing domain of starting URL\")\n return nil, err\n }\n crawler.Site = domain\n \n if Filename != \"\" {\n crawler.Filename = Filename\n } else {\n crawler.Filename = crawler.Site.Host + \".txt\"\n if len( crawler.Filename ) >= 255 {\n crawler.Filename = crawler.Filename[0:100]\n }\n }\n\n if err = IsOk( &crawler ); err!=nil{\n return nil, err\n }\n\n return &crawler, nil\n\n}", "func New() *Collector { return &Collector{} }", "func NewImportTracker() *ImportTracker {\n\treturn &ImportTracker{\n\t\tmake(map[string]string),\n\t\tmake(map[string]string),\n\t\tmake(map[string]bool),\n\t}\n}", "func (t *Task) createPVProgressCR() error {\n\tpvcMap := t.getPVCNamespaceMap()\n\tlabels := t.Owner.GetCorrelationLabels()\n\tfor bothNs, vols := range pvcMap {\n\t\tns := getSourceNs(bothNs)\n\t\tfor _, vol := range vols {\n\t\t\tdvmp := migapi.DirectVolumeMigrationProgress{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: getMD5Hash(t.Owner.Name + vol.Name + ns),\n\t\t\t\t\tLabels: labels,\n\t\t\t\t\tNamespace: migapi.OpenshiftMigrationNamespace,\n\t\t\t\t},\n\t\t\t\tSpec: migapi.DirectVolumeMigrationProgressSpec{\n\t\t\t\t\tClusterRef: t.Owner.Spec.SrcMigClusterRef,\n\t\t\t\t\tPodNamespace: ns,\n\t\t\t\t\tPodSelector: GetRsyncPodSelector(vol.Name),\n\t\t\t\t},\n\t\t\t}\n\t\t\t// make sure existing CRs that don't have required fields are deleted\n\t\t\terr := t.deleteInvalidPVProgressCR(&dvmp)\n\t\t\tif err != nil {\n\t\t\t\treturn liberr.Wrap(err)\n\t\t\t}\n\t\t\tmigapi.SetOwnerReference(t.Owner, t.Owner, &dvmp)\n\t\t\tt.Log.Info(\"Creating DVMP on host MigCluster to track Rsync Pod completion on MigCluster\",\n\t\t\t\t\"dvmp\", path.Join(dvmp.Namespace, dvmp.Name),\n\t\t\t\t\"srcNamespace\", dvmp.Spec.PodNamespace,\n\t\t\t\t\"selector\", dvmp.Spec.PodSelector,\n\t\t\t\t\"migCluster\", path.Join(t.Owner.Spec.SrcMigClusterRef.Namespace,\n\t\t\t\t\tt.Owner.Spec.SrcMigClusterRef.Name))\n\t\t\terr = t.Client.Create(context.TODO(), &dvmp)\n\t\t\tif k8serror.IsAlreadyExists(err) {\n\t\t\t\tt.Log.Info(\"DVMP already exists on destination cluster\",\n\t\t\t\t\t\"dvmp\", path.Join(dvmp.Namespace, dvmp.Name))\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Log.Info(\"Rsync client progress CR created\", \"dvmp\", path.Join(dvmp.Name, \"namespace\", dvmp.Namespace))\n\t\t}\n\n\t}\n\treturn nil\n}", "func (i InputCheckPasswordSRP) construct() InputCheckPasswordSRPClass { return &i }", "func Init(repo *config.RepoConfig, opr *operator.Operator) *Watcher {\n\twatcher := &Watcher{\n\t\trepo: repo,\n\t\topr: opr,\n\t\tcheckPoints: make(map[string]map[string]checkPoint),\n\t}\n\tif repo.WatchFiles == nil {\n\t\treturn watcher\n\t}\n\n\tgo func() {\n\t\tutil.Error(watcher.composeJobs())\n\t\tutil.Error(watcher.initCheckPoints())\n\t\tutil.Println(\"init complete\", watcher.checkPoints)\n\t\tgo watcher.polling()\n\t}()\n\n\treturn watcher\n}", "func NewRefCountTracker(round uint32, et *EligibilityTracker, expectedSize int) *RefCountTracker {\n\treturn &RefCountTracker{\n\t\tround: round,\n\t\teTracker: et,\n\t\texpectedSize: expectedSize,\n\t\ttable: make(map[any]*item, inboxCapacity),\n\t}\n}", "func Init(repo *config.RepoConfig, opr *operator.Operator) Cherry {\n\tc := cherry{\n\t\towner: repo.Owner,\n\t\trepo: repo.Repo,\n\t\tready: false,\n\t\trule: repo.Rule,\n\t\trelease: repo.Release,\n\t\ttypeLabel: repo.TypeLabel,\n\t\tignoreLabel: repo.IgnoreLabel,\n\t\tdryrun: repo.Dryrun,\n\t\tforkedRepoCollaborators: make(map[string]struct{}),\n\t\tcollaboratorInvitation: make(map[string]time.Time),\n\t\topr: opr,\n\t\tcfg: repo,\n\t}\n\tgo c.runLoadCollaborators()\n\treturn &c\n}", "func (PingCIMunger) Initialize(config *github_util.Config) error { return nil }", "func newRepoImpl(ctx context.Context, gs gitstore.GitStore, repo *gitiles.Repo, gcsClient gcs.GCSClient, gcsPath string, p *pubsub.Publisher, includeBranches, excludeBranches []string) (repograph.RepoImpl, error) {\n\tindexCommits, err := gs.RangeByTime(ctx, vcsinfo.MinTime, vcsinfo.MaxTime, gitstore.ALL_BRANCHES)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed loading IndexCommits from GitStore.\")\n\t}\n\tvar commits []*vcsinfo.LongCommit\n\tif len(indexCommits) > 0 {\n\t\thashes := make([]string, 0, len(indexCommits))\n\t\tfor _, c := range indexCommits {\n\t\t\thashes = append(hashes, c.Hash)\n\t\t}\n\t\tcommits, err = gs.Get(ctx, hashes)\n\t\tif err != nil {\n\t\t\treturn nil, skerr.Wrapf(err, \"Failed loading LongCommits from GitStore.\")\n\t\t}\n\t}\n\tgb, err := gs.GetBranches(ctx)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed loading branches from GitStore.\")\n\t}\n\tbranches := make([]*git.Branch, 0, len(gb))\n\tfor name, branch := range gb {\n\t\tbranches = append(branches, &git.Branch{\n\t\t\tName: name,\n\t\t\tHead: branch.Head,\n\t\t})\n\t}\n\tcommitsMap := make(map[string]*vcsinfo.LongCommit, len(commits))\n\tfor _, c := range commits {\n\t\tcommitsMap[c.Hash] = c\n\t}\n\tsklog.Infof(\"Repo %s has %d commits and %d branches.\", repo.URL(), len(commits), len(branches))\n\tfor _, b := range branches {\n\t\tsklog.Infof(\" branch %s @ %s\", b.Name, b.Head)\n\t}\n\treturn &repoImpl{\n\t\tMemCacheRepoImpl: repograph.NewMemCacheRepoImpl(commitsMap, branches),\n\t\tgcsClient: gcsClient,\n\t\tgcsPath: gcsPath,\n\t\tgitiles: repo,\n\t\tgitstore: gs,\n\t\tpubsub: p,\n\t\tincludeBranches: includeBranches,\n\t\texcludeBranches: excludeBranches,\n\t}, nil\n}", "func (this *Worker) init(manager *Manager, kind, filename string, seq uint16) {\n this.manager = manager\n this.kind = kind\n this.filename = filename\n this.seq = seq\n this.CreatedAt = time.Now()\n\n this.Logger = this.manager.Logger\n}", "func msgtrackerStart(t *testing.T, expectedCnt int) (mt msgtracker) {\n\tmt = msgtracker{t: t}\n\tmt.msgs = make([]*Message, expectedCnt)\n\treturn mt\n}", "func (t *TaskChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n //初始化snowflake的IdWorker\n\t//iw, _ = NewIdWorker(1)\n\t//if err!= nil {\n\t//\tfmt.Println(err) UTC\n\t//}\n\treturn shim.Success(nil)\n}", "func (ts *Tester) Build() error {\n\t// no-op\n\treturn nil\n}", "func newProcBase(name, bin, serviceAddr string, loggers []Logger) *procBase {\n\tlog.Infof(\"%s has addr %s\", name, serviceAddr)\n\treturn &procBase{\n\t\tname: name,\n\t\tbin: bin,\n\t\tserviceAddr: serviceAddr,\n\t\tloggers: loggers,\n\t}\n}", "func Build() {\n\tLogger = zap.New(zapcore.NewTee(corelist...))\n}", "func (ut *utilizationTracker) Start() error {\n\tut.Lock()\n\tdefer ut.Unlock()\n\n\tif ut.started {\n\t\treturn fmt.Errorf(\"Attempted to use UtilizationTracker.Start() when the tracker was already started\")\n\t}\n\n\tif ut.stopped {\n\t\treturn fmt.Errorf(\"Attempted to use UtilizationTracker.Start() after the tracker was stopped\")\n\t}\n\n\t// Initialize the worker expvar\n\texpvars.SetWorkerStats(\n\t\tut.workerName,\n\t\t&expvars.WorkerStats{\n\t\t\tUtilization: 0,\n\t\t},\n\t)\n\n\t// Start the ticker\n\terr := ut.utilizationStats.Start(ut.pollingFunc, ut.statsUpdateFunc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tut.started = true\n\n\treturn nil\n}", "func (s MessagesSponsoredMessages) construct() MessagesSponsoredMessagesClass { return &s }", "func buildReportingCore(streamName string, enc zapcore.EncoderConfig, bufSize int64, flushInterval time.Duration) (zapcore.Core, io.Closer, error) {\n\tw, err := writer.NewKinesisWriter(streamName)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tbuf, closer := writer.Buffer(zapcore.AddSync(w), int(bufSize), flushInterval)\n\n\tcore := zapcore.NewCore(\n\t\tzapcore.NewJSONEncoder(enc),\n\t\tbuf,\n\t\tzapcore.InfoLevel,\n\t)\n\n\treturn core, closer, nil\n}", "func NewUsageTracker() *UsageTracker {\n\treturn &UsageTracker{\n\t\tusage: make(map[string]*UsageRecord),\n\t}\n}", "func (core *coreService) Track(ctx context.Context, start time.Time, method string, size int64, success bool) {\n\tif core.apiStats == nil {\n\t\treturn\n\t}\n\telapsed := time.Since(start)\n\tcore.apiStats.ReportCall(nodestats.APIReport{\n\t\tMethod: method,\n\t\tHandlingTime: elapsed,\n\t\tSuccess: success,\n\t}, size)\n}", "func NewIPFixGen(ipfix *PluginIPFixClient, initJson *fastjson.RawMessage) (*IPFixGen, error) {\n\n\tinit := IPFixGenParams{TemplateRate: DefaultIPFixTemplateRate, DataRate: DefaultIPFixDataRate, AutoStart: true}\n\terr := ipfix.Tctx.UnmarshalValidateDisallowUnknownFields(*initJson, &init)\n\tif err != nil {\n\t\tipfix.stats.invalidJson++\n\t\treturn nil, err\n\t}\n\n\t// validate fields as well, not only outside Json.\n\tvalidator := ipfix.Tctx.GetJSONValidator()\n\tfor i := range init.Fields {\n\t\terr = validator.Struct(init.Fields[i])\n\t\tif err != nil {\n\t\t\tipfix.stats.invalidJson++\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif _, ok := ipfix.generatorsMap[init.Name]; ok {\n\t\tipfix.stats.duplicateGenName++\n\t\treturn nil, fmt.Errorf(\"duplicate generator name %s\", init.Name)\n\t}\n\n\tif _, ok := ipfix.templateIDSet[init.TemplateID]; ok {\n\t\tipfix.stats.duplicateTemplateID++\n\t\treturn nil, fmt.Errorf(\"duplicate template ID %d\", init.TemplateID)\n\t}\n\n\tif init.TemplateID <= 0xFF {\n\t\tipfix.stats.invalidTemplateID++\n\t\treturn nil, fmt.Errorf(\"invalid template ID %d\", init.TemplateID)\n\t}\n\n\tif init.OptionsTemplate && (init.ScopeCount == 0) {\n\t\tipfix.stats.invalidScopeCount++\n\t\treturn nil, fmt.Errorf(\"invalid scope count %d\", init.ScopeCount)\n\t}\n\n\to := new(IPFixGen)\n\to.ipfixPlug = ipfix\n\to.OnCreate()\n\n\to.name = init.Name\n\to.enabled = init.AutoStart\n\to.templateID = init.TemplateID\n\to.templateRate = init.TemplateRate\n\to.dataRate = init.DataRate\n\to.recordsNum = init.RecordsNum\n\to.optionsTemplate = init.OptionsTemplate\n\to.scopeCount = init.ScopeCount\n\to.fields = init.Fields\n\n\t// Create Engine Manager\n\tif init.Engines != nil {\n\t\to.engineMgr, err = engines.NewEngineManager(o.ipfixPlug.Tctx, init.Engines)\n\t\tif err != nil {\n\t\t\to.ipfixPlug.stats.failedBuildingEngineMgr++\n\t\t\treturn nil, fmt.Errorf(\"could not create engine manager: %w\", err)\n\t\t}\n\t\to.engineMap = o.engineMgr.GetEngineMap()\n\t}\n\n\tvar clientsGen *ClientsGen\n\t// For auto-triggered devices, if clients generator is configured - create a ClientsGen objects based\n\t// on the current device config.\n\tif ipfix.autoTriggered {\n\t\ttrgDeviceInfo := o.ipfixPlug.trgDeviceInfo\n\t\tif trgDeviceInfo.clientsGenParams != nil {\n\t\t\tclientsGen, err = NewClientsGen(ipfix, trgDeviceInfo.clientsGenParams)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tpanic(\"Failed to create NewClientsGen\")\n\t\t\t}\n\t\t}\n\t}\n\n\to.fieldNames = make(map[string]bool, len(o.fields))\n\t// Build Template Fields and Data Buffer.\n\tfor i, field := range o.fields {\n\t\t// If clients generator was created, replace the corresponding field engine definitions.\n\t\tif (clientsGen != nil) && (field.Name == clientsGen.GetClientIpv4FieldName()) {\n\t\t\to.fields[i] = clientsGen.GetField()\n\t\t\to.engineMap[field.Name] = clientsGen.GetEngine()\n\t\t}\n\n\t\tif o.ipfixPlug.ver == 9 && field.isEnterprise() {\n\t\t\to.ipfixPlug.stats.enterpriseFieldv9++\n\t\t\treturn nil, fmt.Errorf(\"NetFlow version 9 does not support enterprise field %s\", field.Name)\n\t\t}\n\t\tif o.ipfixPlug.ver == 9 && field.isVariableLength() {\n\t\t\to.ipfixPlug.stats.variableLengthFieldv9++\n\t\t\treturn nil, fmt.Errorf(\"NetFlow version 9 does not support var len field %s\", field.Name)\n\t\t}\n\t\to.templateFields = append(o.templateFields, field.getIPFixField())\n\t\tif !field.isVariableLength() && (len(field.Data) != int(field.Length)) {\n\t\t\tipfix.stats.dataIncorrectLength++\n\t\t\treturn nil, fmt.Errorf(\"Field %s data size differs from declared field length %d\", field.Name, field.Length)\n\t\t}\n\t\to.fieldNames[field.Name] = true // add each field to the field names\n\t\tif !field.isVariableLength() {\n\t\t\t// don't add variable length fields to the data buffer, they don't have a data buffer.\n\t\t\to.dataBuffer = append(o.dataBuffer, field.Data...)\n\t\t} else {\n\t\t\to.variableLengthFields = true\n\t\t\t// variable length field, verify that no data.\n\t\t\tif len(field.Data) != 0 {\n\t\t\t\tipfix.stats.dataIncorrectLength++\n\t\t\t\treturn nil, fmt.Errorf(\"variable length field %s has data\", field.Name)\n\t\t\t}\n\t\t\t// also we must have an engine for variable length fields\n\t\t\tif o.engineMgr == nil {\n\t\t\t\tipfix.stats.variableLengthNoEngine++\n\t\t\t\treturn nil, fmt.Errorf(\"No engine for var len field %s\", field.Name)\n\t\t\t} else {\n\t\t\t\tif _, ok := o.engineMap[field.Name]; !ok {\n\t\t\t\t\tipfix.stats.variableLengthNoEngine++\n\t\t\t\t\treturn nil, fmt.Errorf(\"No engine for var len field %s\", field.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// verify each engine name is a correct field\n\tif o.engineMgr != nil {\n\t\tfor engineName := range o.engineMap {\n\t\t\tif _, ok := o.fieldNames[engineName]; !ok {\n\t\t\t\to.ipfixPlug.stats.invalidEngineName++\n\t\t\t\treturn nil, fmt.Errorf(\"Got engine for unexisting field %s\", engineName)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Calculate Ticks for Timer.\n\to.templateTicks = o.timerw.DurationToTicks(time.Duration(float32(time.Second) / o.templateRate))\n\to.dataTicks, o.dataPktsPerInterval = o.timerw.DurationToTicksBurst(time.Duration(float32(time.Second) / o.dataRate))\n\n\tif o.ipfixPlug.dgMacResolved {\n\t\t// If resolved before the generators were created, we call on resolve explicitly.\n\t\tif ok := o.OnResolve(); !ok {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve DG\")\n\t\t}\n\t}\n\n\treturn o, nil\n}", "func NewSourceControl() *SourceControl {\n\tsc := new(SourceControl)\n\tsc.heartbeats = make(chan Heartbeat)\n\tsc.queuedRequests = make(chan func())\n\tsc.queuedResults = make(chan error)\n\n\tsc.simPulses = NewSimPulseSource()\n\tsc.triangle = NewTriangleSource()\n\tsc.erroring = NewErroringSource()\n\tlan, _ := NewLanceroSource()\n\tsc.lancero = lan\n\tsc.roach, _ = NewRoachSource()\n\tsc.abaco, _ = NewAbacoSource()\n\n\tsc.simPulses.heartbeats = sc.heartbeats\n\tsc.triangle.heartbeats = sc.heartbeats\n\tsc.erroring.heartbeats = sc.heartbeats\n\tsc.lancero.heartbeats = sc.heartbeats\n\tsc.roach.heartbeats = sc.heartbeats\n\tsc.abaco.heartbeats = sc.heartbeats\n\n\tsc.status.ChanGroups = make([]GroupIndex, 0)\n\treturn sc\n}", "func newProber(s *Server, nodes nodeMap) *prober {\n\tprober := &prober{\n\t\tPinger: fastping.NewPinger(),\n\t\tserver: s,\n\t\tproberExited: make(chan bool),\n\t\tstop: make(chan bool),\n\t\tresults: make(map[ipString]*models.PathStatus),\n\t\tnodes: make(nodeMap),\n\t}\n\tprober.MaxRTT = s.ProbeDeadline\n\t// FIXME: Doubling the default payload size to 16 is a workaround for GH-18177\n\tprober.Size = 2 * fastping.TimeSliceLength\n\tprober.setNodes(nodes, nil)\n\tprober.OnRecv = func(addr *net.IPAddr, rtt time.Duration) {\n\t\tprober.Lock()\n\t\tdefer prober.Unlock()\n\t\tnode, exists := prober.nodes[ipString(addr.String())]\n\n\t\t// Only add fields to the scoped logger if debug is enabled, to save on resources.\n\t\t// This can be done since all logs in this function are debug-level only.\n\t\tscopedLog := log\n\t\tif logging.CanLogAt(log.Logger, logrus.DebugLevel) {\n\t\t\tscopedLog = log.WithFields(logrus.Fields{\n\t\t\t\tlogfields.IPAddr: addr,\n\t\t\t\tlogfields.NodeName: node.Name,\n\t\t\t\t\"rtt\": rtt,\n\t\t\t})\n\t\t}\n\n\t\tif !exists {\n\t\t\tscopedLog.Debug(\"Node disappeared, skip result\")\n\t\t\treturn\n\t\t}\n\n\t\tprober.results[ipString(addr.String())].Icmp = &models.ConnectivityStatus{\n\t\t\tLatency: rtt.Nanoseconds(),\n\t\t\tStatus: \"\",\n\t\t}\n\t\tscopedLog.Debug(\"probe successful\")\n\t}\n\n\treturn prober\n}", "func New(opts ...Option) (Info, error) {\n\ti := &info{\n\t\tdetail: Detail{\n\t\t\tServerName: \"\",\n\t\t\tVersion: Version,\n\t\t\tGitCommit: GitCommit,\n\t\t\tBuildTime: BuildTime,\n\t\t\tGoVersion: GoVersion,\n\t\t\tGoOS: GoOS,\n\t\t\tGoArch: GoArch,\n\t\t\tGoRoot: GoRoot,\n\t\t\tCGOEnabled: CGOEnabled,\n\t\t\tNGTVersion: NGTVersion,\n\t\t\tBuildCPUInfoFlags: strings.Split(strings.TrimSpace(BuildCPUInfoFlags), \" \"),\n\t\t\tStackTrace: nil,\n\t\t},\n\t}\n\n\tfor _, opt := range append(defaultOpts, opts...) {\n\t\tif err := opt(i); err != nil {\n\t\t\twerr := errors.ErrOptionFailed(err, reflect.ValueOf(opt))\n\n\t\t\te := new(errors.ErrCriticalOption)\n\t\t\tif errors.As(err, &e) {\n\t\t\t\tlog.Error(werr)\n\t\t\t\treturn nil, werr\n\t\t\t}\n\t\t\tlog.Warn(werr)\n\t\t}\n\t}\n\n\tif i.rtCaller == nil || i.rtFuncForPC == nil {\n\t\treturn nil, errors.ErrRuntimeFuncNil\n\t}\n\n\ti.prepare()\n\n\treturn i, nil\n}", "func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {\n\tif !keyType.IsValid() {\n\t\treturn nil, InvalidPolicyFormatError(fmt.Sprintf(\"invalid keyType \\\"%s\\\"\", keyType))\n\t}\n\tkeySources := 0\n\tif keyPath != \"\" {\n\t\tkeySources++\n\t}\n\tif keyPaths != nil {\n\t\tkeySources++\n\t}\n\tif keyData != nil {\n\t\tkeySources++\n\t}\n\tif keySources != 1 {\n\t\treturn nil, InvalidPolicyFormatError(\"exactly one of keyPath, keyPaths and keyData must be specified\")\n\t}\n\tif signedIdentity == nil {\n\t\treturn nil, InvalidPolicyFormatError(\"signedIdentity not specified\")\n\t}\n\treturn &prSignedBy{\n\t\tprCommon: prCommon{Type: prTypeSignedBy},\n\t\tKeyType: keyType,\n\t\tKeyPath: keyPath,\n\t\tKeyPaths: keyPaths,\n\t\tKeyData: keyData,\n\t\tSignedIdentity: signedIdentity,\n\t}, nil\n}", "func init() {\n\tgrpccmd.RegisterServiceCmd(_WorkerCmd)\n\t_WorkerCmd.AddCommand(\n\t\t_Worker_PushTaskCmd,\n\t\t_Worker_PushTaskCmd_gen,\n\t\t_Worker_PullTaskCmd,\n\t\t_Worker_PullTaskCmd_gen,\n\t\t_Worker_StartTaskCmd,\n\t\t_Worker_StartTaskCmd_gen,\n\t\t_Worker_StopTaskCmd,\n\t\t_Worker_StopTaskCmd_gen,\n\t\t_Worker_TaskStatusCmd,\n\t\t_Worker_TaskStatusCmd_gen,\n\t\t_Worker_JoinNetworkCmd,\n\t\t_Worker_JoinNetworkCmd_gen,\n\t\t_Worker_TaskLogsCmd,\n\t\t_Worker_TaskLogsCmd_gen,\n\t\t_Worker_GetDealInfoCmd,\n\t\t_Worker_GetDealInfoCmd_gen,\n\t)\n}", "func (gr *ActivityReport) Run() error {\n\n\t// create a client (safe to share across requests)\n\tctx := context.Background()\n\ttokenSource := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: gr.gitHubToken},\n\t)\n\thttpClient := oauth2.NewClient(ctx, tokenSource)\n\tclient := graphql.NewClient(\"https://api.github.com/graphql\", graphql.WithHTTPClient(httpClient), graphql.UseInlineJSON())\n\t//client.Log = func(s string) { fmt.Println(s) }\n\n\tnow := time.Now()\n\tsince := now.AddDate(0, 0, -gr.Duration)\n\n\tgr.ReportDate = now\n\n\trepositories, err := gr.listRepositories(ctx, client, gr.Organization, \"\")\n\tif err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"An error occured during repositories listing %v\\n\", err))\n\t} else {\n\t\tfor _, repoName := range repositories {\n\t\t\treport, err2 := gr.reportRepository(ctx, client, gr.Organization, repoName, since)\n\t\t\tif err2 != nil {\n\t\t\t\treturn errors.New(fmt.Sprintf(\"An error occured during report for %s: %v\\n\", repoName, err2))\n\t\t\t} else {\n\t\t\t\t// Build report\n\n\t\t\t\t// Extract Merged PR (keep the ones merged during last 7 days)\n\t\t\t\tfor _, pullrequest := range report.Repository.MergedPR.Nodes {\n\t\t\t\t\tt, _ := time.Parse(ISO_FORM, pullrequest.MergedAt)\n\t\t\t\t\tif t.After(since) {\n\t\t\t\t\t\tpullrequest.Repository = repoName\n\t\t\t\t\t\tgr.Result.MergedPRs = append(gr.Result.MergedPRs, pullrequest)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Extract Open PR with and without activity\n\t\t\t\tfor _, pullrequest := range report.Repository.OpenPR.Nodes {\n\t\t\t\t\tpullrequest.Repository = repoName\n\t\t\t\t\tif pullrequest.Timeline.TotalCount > 0 {\n\t\t\t\t\t\tgr.Result.OpenPRsWithActivity = append(gr.Result.OpenPRsWithActivity, pullrequest)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgr.Result.OpenPRsWithoutActivity = append(gr.Result.OpenPRsWithoutActivity, pullrequest)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tgr.logf(\"Nb merged pr:%d\\n\", len(gr.Result.MergedPRs))\n\t\tgr.logf(\"Nb open pr with activity:%d\\n\", len(gr.Result.OpenPRsWithActivity))\n\t\tgr.logf(\"Nb open pr without activity:%d\\n\", len(gr.Result.OpenPRsWithoutActivity))\n\t\treturn nil\n\t}\n}", "func New(reporter services.UsageReporter, log logrus.FieldLogger, inner apievents.Emitter) (*UsageLogger, error) {\n\tif log == nil {\n\t\tlog = logrus.StandardLogger()\n\t}\n\n\treturn &UsageLogger{\n\t\tEntry: log.WithField(\n\t\t\ttrace.Component,\n\t\t\tteleport.Component(teleport.ComponentUsageReporting),\n\t\t),\n\t\treporter: reporter,\n\t\tinner: inner,\n\t}, nil\n}", "func MustNew() spec.Tracker {\n\tnewTracker, err := New(DefaultConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn newTracker\n}", "func (b *MockPrBuilder) Create() *PrMock {\n\tfor _, mock := range b.mockCreators {\n\t\tmock(b)\n\t}\n\tgomega.Expect(b.errors).To(gomega.BeEmpty())\n\n\treturn &PrMock{PullRequest: b.pullRequest}\n}", "func NewTrack(distance int) Track {\n\tpanic(\"Please implement the NewTrack function\")\n}", "func setupFileIngestion(ctx context.Context) error {\n\tsklog.Info(\"Checking out skia\")\n\trepo, err := gitinfo.CloneOrUpdate(ctx, common.REPO_SKIA, filepath.Join(*gitDir, \"skia\"), false)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not clone skia repo: %s\", err)\n\t}\n\n\tclient, err := auth.NewDefaultJWTServiceAccountClient(auth.SCOPE_READ_ONLY)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Problem setting up client OAuth: %s\", err)\n\t}\n\n\tstorageClient, err = storage.NewClient(ctx, option.WithHTTPClient(client))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Problem authenticating: %s\", err)\n\t}\n\n\tgcsClient := gcs.NewGCSClient(storageClient, *bucket)\n\tboltDB, err := db.NewBoltDB(*cachePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not set up bolt db cache: %s\", err)\n\t}\n\tcoverageIngester = coverageingest.New(*extractDir, gcsClient, boltDB)\n\n\tcycle := func(v vcsinfo.VCS, coverageIngester coverageingest.Ingester) {\n\t\tsklog.Info(\"Begin coverage ingest cycle\")\n\t\tif err := v.Update(ctx, true, false); err != nil {\n\t\t\tsklog.Warningf(\"Could not update git repo, but continuing anyway: %s\", err)\n\t\t}\n\t\tcommits := []*vcsinfo.LongCommit{}\n\t\tfor _, c := range v.LastNIndex(*nCommits) {\n\t\t\tlc, err := v.Details(ctx, c.Hash, false)\n\t\t\tif err != nil {\n\t\t\t\tsklog.Errorf(\"Could not get commit info for git revision %s: %s\", c.Hash, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Reverse the order so the most recent commit is first\n\t\t\tcommits = append([]*vcsinfo.LongCommit{lc}, commits...)\n\t\t}\n\t\tcoverageIngester.IngestCommits(ctx, commits)\n\t\tsklog.Info(\"End coverage ingest cycle\")\n\t}\n\n\tgo func(v vcsinfo.VCS, coverageIngester coverageingest.Ingester) {\n\t\tcycle(repo, coverageIngester)\n\t\tfor range time.Tick(*ingestPeriod) {\n\t\t\tcycle(repo, coverageIngester)\n\t\t}\n\t}(repo, coverageIngester)\n\treturn nil\n}", "func (s StatsGraphError) construct() StatsGraphClass { return &s }" ]
[ "0.56856984", "0.53329396", "0.5233629", "0.51901466", "0.5128143", "0.5112698", "0.5102944", "0.507692", "0.49822295", "0.48981684", "0.48829168", "0.48177564", "0.4798724", "0.47585854", "0.47286558", "0.4725269", "0.47240978", "0.4699421", "0.4683478", "0.46781945", "0.46722546", "0.46357787", "0.45803407", "0.4573137", "0.4508737", "0.44981447", "0.44759962", "0.44632432", "0.44601735", "0.4451647", "0.44224238", "0.4421602", "0.440653", "0.43985623", "0.43980438", "0.4371114", "0.43665755", "0.43627593", "0.43425673", "0.43375993", "0.43309385", "0.43308207", "0.43185973", "0.43090183", "0.42932096", "0.4275362", "0.4273618", "0.42707628", "0.42652428", "0.42528537", "0.4234388", "0.42325917", "0.42173907", "0.4204678", "0.42020804", "0.4192279", "0.4192279", "0.4178816", "0.4177983", "0.41716206", "0.4170695", "0.41678888", "0.4167351", "0.41637602", "0.4160682", "0.41554976", "0.41401547", "0.41389483", "0.41288057", "0.4126897", "0.4126627", "0.41244543", "0.41205317", "0.4120187", "0.41083184", "0.41021496", "0.4095631", "0.40943295", "0.40797517", "0.40686476", "0.40642303", "0.4062741", "0.40595552", "0.40538302", "0.4052872", "0.40500113", "0.40486324", "0.4047121", "0.4038797", "0.40334034", "0.4031408", "0.4030846", "0.40282667", "0.40274706", "0.40261483", "0.40258956", "0.40234506", "0.40233463", "0.40201017", "0.40068117" ]
0.8170067
0
String name of this tracker
func (psr *PSRTracker) String() string { return "PSRTracker" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o TrackerOutput) TrackerName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Tracker) pulumi.StringOutput { return v.TrackerName }).(pulumi.StringOutput)\n}", "func (m *Mgr) Name() string {\n\treturn \"collection configuration history listener\"\n}", "func (src *Tracer) Name() string {\n\treturn src.name\n}", "func (l *JSONFileLogger) Name() string {\n\treturn Name\n}", "func (l *JSONFileLogger) Name() string {\n\treturn Name\n}", "func (r *reporter) Name() string {\n\treturn r.goTestName\n}", "func (o *influxDBLogger) GetName() string {\n\treturn \"influxDBLogger\"\n}", "func (m *Metric) StackdriverName() string {\n\treturn fmt.Sprintf(\"custom.googleapis.com/datadog/%s\", m.Name)\n}", "func (collector *CollectorV2[T]) Name() string {\n\treturn collector.name\n}", "func (s *SendEventToMeshAndCheckEventId) Name() string {\n\treturn \"Send event to mesh and check event id\"\n}", "func (p *stats) Name() string {\n\treturn \"Stats\"\n}", "func (l *RelayDriver) Name() string { return l.name }", "func (p *perfStoreManager) Name() string {\n\treturn perfResourceName\n}", "func (p Prometheus) Name() string {\n\treturn \"Prometheus probe for \" + p.URL + \" [\" + p.Key + \"]\"\n}", "func (n *SQSNotify) Name() string {\n\treturn n.name\n}", "func (t *LogProviderHandler) Name() string {\n\treturn LogProvider\n}", "func (re *Records) Name() string { return \"records\" }", "func (e *DeviceHistory) Name() string {\n\treturn \"DeviceHistory\"\n}", "func (f *AnalyzerFingerprint) Name() string {\n\treturn f.name\n}", "func (l *BasicLogger) getName() string {\n\treturn l.name\n}", "func (e *EntryBase) Name() string {\n\treturn e.name()\n}", "func (j *DSRocketchat) Name() string {\n\treturn j.DS\n}", "func (m *SDKScriptCollectorAttribute) Name() string {\n\treturn \"sdkscript\"\n}", "func Name() string { return note.Name }", "func (r *nodesStatusChecker) Name() string { return NodesStatusCheckerID }", "func (r *nodesStatusChecker) Name() string { return NodesStatusCheckerID }", "func (r *checker) Name() string {\n\treturn checkerID\n}", "func (p Packet) Name() (name string) {\n\t// todo: think of ways to make this not a compiled in hack\n\t// todo: collectd 4 uses different patterns for some plugins\n\t// https://collectd.org/wiki/index.php/V4_to_v5_migration_guide\n\tswitch p.Plugin {\n\tcase \"df\":\n\t\tname = fmt.Sprintf(\"df_%s_%s\", p.PluginInstance, p.TypeInstance)\n\tcase \"interface\":\n\t\tname = fmt.Sprintf(\"%s_%s\", p.Type, p.PluginInstance)\n\tcase \"load\":\n\t\tname = \"load\"\n\tcase \"memory\":\n\t\tname = fmt.Sprintf(\"memory_%s\", p.TypeInstance)\n\tdefault:\n\t\tname = fmt.Sprintf(\"%s_%s_%s_%s\", p.Plugin, p.PluginInstance, p.Type, p.TypeInstance)\n\t}\n\treturn name\n}", "func Name() string {\n\treturn driver.AppName()\n}", "func (c *auditLog) getName() string {\n\treturn c.name\n}", "func (m *MeasurementStatus) Name() string {\n return m.data.Name\n}", "func (te *TelemetryEmitter) Name() string {\n\treturn te.name\n}", "func (te *TelemetryEmitter) Name() string {\n\treturn te.name\n}", "func (t *Task) Name() string { t.mutex.RLock(); defer t.mutex.RUnlock(); return t.name }", "func (c *Client) GetName() string {\n\treturn \"pubsub-reporter\"\n}", "func (factory) Name() string {\n\treturn tanLogDBName\n}", "func (o LoggerEventhubOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LoggerEventhub) string { return v.Name }).(pulumi.StringOutput)\n}", "func (d *Driver) Name() string { return d.name }", "func (c *Counter) Name() string {\n\treturn c.name\n}", "func (c *Counter) Name() string {\n\treturn c.name\n}", "func (*isisCollector) Name() string {\n\treturn \"ISIS\"\n}", "func (e *EDNS) Name() string { return name }", "func (d *FileDescriptor) VarName() string { return fmt.Sprintf(\"fileDescriptor%d\", d.index) }", "func (this *BufferedLog) Name() string {\n\tthis.lock.RLock()\n\tdefer this.lock.RUnlock()\n\n\treturn this.name\n}", "func (r *nodeStatusChecker) Name() string { return NodeStatusCheckerID }", "func (r *nodeStatusChecker) Name() string { return NodeStatusCheckerID }", "func (bl *LogBuffer) Name() string {\n\treturn bl.logger.Name()\n}", "func (b *BulkLGTM) Name() string {\n\treturn \"bulk-lgtm\"\n}", "func (pp *PerfPlugin) Name() string { return \"perf\" }", "func (l *LogSink) Name() string {\n\treturn l.Sink\n}", "func (gc *gcsCache) Name() string {\n\treturn gc.name\n}", "func (fs *FileSystem) Name() string {\n\treturn \"Datastore Fs\"\n}", "func (g *GaugeUint64) Name() string {\n\treturn g.name\n}", "func (hm HashMap) Name() string { return hm.name }", "func (d *APA102Driver) Name() string { return d.name }", "func (p *GogoPlugin) Name() string {\n\treturn \"gogo:protobuf:protoc-gen-\" + p.variant\n}", "func (a *Audited) Name() string {\n\treturn \"gorm:audited\"\n}", "func (c *Client) GetName() string {\n\treturn \"gerrit-reporter\"\n}", "func (l *LogDB) Name() string {\n\treturn tanLogDBName\n}", "func (cb *Breaker) Name() string {\n\treturn cb.name\n}", "func (w *WebHook) Name() string {\n\treturn moduleName\n}", "func (s StatsEntry) Name() string {\n\treturn s.name\n}", "func (cmd *TsListKeysCommand) Name() string {\n\treturn cmd.getName(\"TsListKeys\")\n}", "func (e Notify) Name() string { return \"notify\" }", "func (mif *MetricInterFlow) Name() string {\n\treturn \"InterFlowTimes\"\n}", "func (ev *EventMeta) Name() string {\n\treturn \"EventMeta\"\n}", "func (c *Event) Name() string {\n\treturn c.name\n}", "func (s *StopEvent) Name() string {\n\treturn s.name\n}", "func (*bgpCollector) Name() string {\n\treturn \"BGP\"\n}", "func (e *EndComponent) Name() string {\n\treturn \"name\"\n}", "func (server *Server) Name() string {\n\treturn \"Gophermine\"\n}", "func (o LogDescriptorOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v LogDescriptor) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (m TaskManager) Name() string {\n\treturn \"gitiles\"\n}", "func (m TaskManager) Name() string {\n\treturn \"gitiles\"\n}", "func (obj *label) Name() string {\n\treturn obj.name\n}", "func (d *Dentry) Name() string {\n\treturn d.name\n}", "func (e *Event) Track() string {\n\treturn e.track\n}", "func (dt *Targeter) name() string {\n\tvar id string\n\tif dt.IDs != nil {\n\t\tid = \"{id}\"\n\t}\n\treturn fmt.Sprintf(\"%s %s/%s/%s\", dt.Method, dt.BaseURL, dt.Endpoint, id)\n}", "func (r *regulator) Name() string {\n\tr.enter()\n\tdefer r.exit()\n\n\treturn r.StorageDriver.Name()\n}", "func (c *Config) Name() string {\n\treturn \"mongodb_exporter\"\n}", "func (fs *OneFile) Name() string {\n\tif fs.r.Hourly {\n\t\treturn fmt.Sprintf(\"files: %s\\\\%s_YYYYMMDD_HH.%s (keep: %s)\", fs.r.Directory, fs.r.Prefix, fs.r.Extension, fs.r.Retention.String())\n\t}\n\treturn fmt.Sprintf(\"files: %s\\\\%s_YYYYMMDD_.%s (keep: %s)\", fs.r.Directory, fs.r.Prefix, fs.r.Extension, fs.r.Retention.String())\n}", "func (d *Descriptor) Name() string {\n\treturn knownDescriptors[d.uuid.String()].Name\n}", "func (e *ListTrackersUnverifiedEngine) Name() string {\n\treturn \"ListTrackersUnverifiedEngine\"\n}", "func (c *MonitorComponent) Name() string {\n\treturn ComponentPrometheus\n}", "func (e *BasicEvent) Name() string {\n\treturn e.name\n}", "func (j *DSGitHub) Name() string {\n\treturn j.DS\n}", "func (s *Source) Name() string {\n\treturn \"spyse\"\n}", "func (s *LocalService) Name() string {\n\treturn \"local\"\n}", "func (h *MPU6050Driver) Name() string { return h.name }", "func (m *MyMetric) Name() string {\n\treturn m.NameStr\n}", "func (l *ActivityDumpRuntimeSetting) Name() string {\n\treturn l.ConfigKey\n}", "func (set Set) Name() string { return set.name }", "func (s *SharingKey) Name() string {\n\treturn string(s.name)\n}", "func (g *Guessit) Name() string {\n\treturn moduleName\n}", "func (e *Entry) Name() string {\n\treturn e.name\n}", "func (e *Exporter) Name() string {\n\treturn e.name\n}", "func (b *TestDriver) Name() string { return b.name }", "func (ra *RenameAnalysis) Name() string {\n\treturn \"RenameAnalysis\"\n}", "func (s Source) Name() string { return \"rdt\" }", "func (l *LessonTut) Name() string {\n\treturn l.path.Base()\n}", "func (s *CommandLineSource) Name() (name string) {\n\treturn \"command-line\"\n}" ]
[ "0.6869942", "0.6641681", "0.6456122", "0.63871306", "0.63871306", "0.63696337", "0.63530385", "0.63476366", "0.6339862", "0.6325382", "0.6311702", "0.6283495", "0.6269804", "0.6266775", "0.62627256", "0.6244386", "0.6236249", "0.62310374", "0.6217214", "0.6207833", "0.62043965", "0.6189203", "0.6184578", "0.6182909", "0.61741406", "0.61741406", "0.6172353", "0.6172337", "0.61675084", "0.6163849", "0.616147", "0.6155189", "0.6155189", "0.61434853", "0.6136054", "0.61328363", "0.6132273", "0.6131769", "0.6127166", "0.6127166", "0.61202216", "0.61187685", "0.61125624", "0.61033434", "0.6098276", "0.6098276", "0.6085355", "0.6084421", "0.6081511", "0.60769093", "0.6069821", "0.6068101", "0.60672057", "0.6065734", "0.6065276", "0.6061121", "0.6059061", "0.6055811", "0.6048629", "0.60376614", "0.60346603", "0.60288924", "0.6025319", "0.6017985", "0.60110545", "0.6001598", "0.600094", "0.5983152", "0.59817547", "0.5980021", "0.59775054", "0.5972595", "0.59635884", "0.59635884", "0.5960278", "0.5959088", "0.5952884", "0.59446585", "0.5943604", "0.5940104", "0.593021", "0.5928312", "0.59205806", "0.59189236", "0.59140843", "0.59093124", "0.5905044", "0.590362", "0.59035754", "0.5903044", "0.59001875", "0.5898562", "0.5897291", "0.5895312", "0.58943003", "0.5893742", "0.5892054", "0.58904254", "0.58897597", "0.5889075", "0.5887189" ]
0.0
-1
Exec implements tracker API
func (psr *PSRTracker) Exec(ctx context.Context) error { //TODO: retrieve github updates of psr config file. For now, we'll just pull //PSR's as defined by psr.json file var syncGroup sync.WaitGroup var doneGroup sync.WaitGroup ctx = context.WithValue(ctx, psrWaitGroupKey, &syncGroup) errorCh := make(chan error) doneGroup.Add(1) go func() { defer doneGroup.Done() for { e := <-errorCh if e != nil { psrLog.Error("Problem in PSR fetch: %v]\n", e) } else { return } } }() for i := 0; i < len(psr.Requests); i++ { p := psr.Requests[i] syncGroup.Add(1) go p.fetch(ctx, errorCh) } syncGroup.Wait() errorCh <- nil doneGroup.Wait() psrLog.Info("PSR Tracker cycle complete") return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (hu *HistorytakingUpdate) Exec(ctx context.Context) error {\n\t_, err := hu.Save(ctx)\n\treturn err\n}", "func (huo *HistorytakingUpdateOne) Exec(ctx context.Context) error {\n\t_, err := huo.Save(ctx)\n\treturn err\n}", "func (h *DriverHandle) Exec(timeout time.Duration, cmd string, args []string) ([]byte, int, error) {\n\tcommand := append([]string{cmd}, args...)\n\tres, err := h.driver.ExecTask(h.taskID, command, timeout)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn res.Stdout, res.ExitResult.ExitCode, res.ExitResult.Err\n}", "func (b *DisputeTracker) Exec(ctx context.Context) error {\n\t//cast client using type assertion since context holds generic interface{}\n\tclient := ctx.Value(berryCommon.ClientContextKey).(rpc.ETHClient)\n\tDB := ctx.Value(berryCommon.DBContextKey).(db.DB)\n\n\t//get the single config instance\n\tcfg := config.GetConfig()\n\n\t//get address from config\n\t_fromAddress := cfg.PublicAddress\n\n\t//convert to address\n\tfromAddress := common.HexToAddress(_fromAddress)\n\n\t_conAddress := cfg.ContractAddress\n\n\t//convert to address\n\tcontractAddress := common.HexToAddress(_conAddress)\n\n\tinstance, err := berry.NewBerryMaster(contractAddress, client)\n\tif err != nil {\n\t\tfmt.Println(\"instance Error, disputeStatus\")\n\t\treturn err\n\t}\n\n\tstatus, _, err := instance.GetStakerInfo(nil, fromAddress)\n\t\n\tif err != nil {\n\t\tfmt.Println(\"instance Error, disputeStatus\")\n\t\treturn err\n\t}\n\tenc := hexutil.EncodeBig(status)\n\tlog.Printf(\"Staker Status: %v\", enc)\n\terr = DB.Put(db.DisputeStatusKey, []byte(enc))\n\tif err != nil {\n\t\tfmt.Printf(\"Problem storing dispute info: %v\\n\", err)\n\t\treturn err\n\t}\n\t//Issue #50, bail out of not able to mine\n\t// if status.Cmp(big.NewInt(1)) != 0 {\n\t// \tlog.Fatalf(\"Miner is not able to mine with status %v. Stopping all mining immediately\", status)\n\t// }\n\n\t//add all whitelisted miner addresses as well since they will be coming in\n\t//asking for dispute status\n\tfor _, addr := range cfg.ServerWhitelist {\n\t\taddress := common.HexToAddress(addr)\n\t\t//fmt.Println(\"Getting staker info for address\", addr)\n\t\tstatus, _, err := instance.GetStakerInfo(nil, address)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Could not get staker dispute status for miner address %s: %v\\n\", addr, err)\n\t\t}\n\t\tfmt.Printf(\"Whitelisted Miner %s Dispute Status: %v\\n\", addr, status)\n\t\tdbKey := fmt.Sprintf(\"%s-%s\", strings.ToLower(address.Hex()), db.DisputeStatusKey)\n\t\terr = DB.Put(dbKey, []byte(hexutil.EncodeBig(status)))\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Problem storing staker dispute status: %v\\n\", err)\n\t\t}\n\t}\n\t//fmt.Println(\"Finished updated dispute status\")\n\treturn nil\n}", "func (tu *TimingUpdate) Exec(ctx context.Context) error {\n\t_, err := tu.Save(ctx)\n\treturn err\n}", "func (b *NewCurrentVariablesTracker) Exec(ctx context.Context) error {\r\n\t//cast client using type assertion since context holds generic interface{}\r\n\tDB := ctx.Value(berryCommon.DBContextKey).(db.DB)\r\n\t//get the single config instance\r\n\tcfg := config.GetConfig()\r\n\r\n\t//get address from config\r\n\t_fromAddress := cfg.PublicAddress\r\n\r\n\t//convert to address\r\n\tfromAddress := common.HexToAddress(_fromAddress)\r\n\r\n\tinstance := ctx.Value(berryCommon.NewBerryContractContextKey).(*contracts2.Berry)\r\n\treturnNewVariables, err := instance.GetNewCurrentVariables(nil)\r\n\tif err != nil{\r\n\t\tfmt.Println(\"New Current Variables Retrieval Error - Contract might not be upgraded\")\r\n\t\treturn nil\r\n\t}\r\n\tif returnNewVariables.RequestIds[0].Int64() > int64(100) || returnNewVariables.RequestIds[0].Int64() == 0 {\r\n\t\tfmt.Println(\"New Current Variables Request ID not correct - Contract about to be upgraded\")\r\n\t\treturn nil\r\n\t}\r\n\tfmt.Println(returnNewVariables)\r\n\r\n\t//if we've mined it, don't save it\r\n\t\r\n\tinstance2 := ctx.Value(berryCommon.MasterContractContextKey).(*contracts.BerryMaster)\r\n\tmyStatus, err := instance2.DidMine(nil, returnNewVariables.Challenge, fromAddress)\r\n\tif err != nil {\r\n\t\tfmt.Println(\"My Status Retrieval Error\")\r\n\t\treturn err\r\n\t}\r\n\tbitSetVar := []byte{0}\r\n\tif myStatus {\r\n\t\tbitSetVar = []byte{1}\r\n\t}\r\n\r\n\thash := solsha3.SoliditySHA3(\r\n\t\t// types\r\n\t\t[]string{\"string\"},\r\n\t\t// values\r\n\t\t[]interface{}{\r\n\t\t\t\"timeOfLastNewValue\",\r\n\t\t},\r\n\t)\r\n\tvar ret [32]byte\r\n\tcopy(ret[:], hash)\r\n\ttimeOfLastNewValue, err := instance2.GetUintVar(nil,ret)\r\n\tif err != nil {\r\n\t\tfmt.Println(\"Time of Last New Value Retrieval Error\")\r\n\t\treturn err\r\n\t}\r\n\terr = DB.Put(db.LastNewValueKey, []byte(hexutil.EncodeBig(timeOfLastNewValue)))\r\n\tif err != nil {\r\n\t\tfmt.Println(\"New Current Variables Put Error\")\r\n\t\treturn err\r\n\t}\r\n\terr = DB.Put(db.CurrentChallengeKey, returnNewVariables.Challenge[:])\r\n\tif err != nil {\r\n\t\tfmt.Println(\"New Current Variables Put Error\")\r\n\t\treturn err\r\n\t}\r\n\r\n\r\n\tfor i:= 0; i < 5; i++ {\r\n\t\tconc := fmt.Sprintf(\"%s%d\",\"current_requestId\",i)\r\n\t\terr = DB.Put(conc, []byte(hexutil.EncodeBig(returnNewVariables.RequestIds[i])))\r\n\t\tif err != nil {\r\n\t\t\tfmt.Println(\"New Current Variables Put Error\")\r\n\t\t\treturn err\r\n\t\t}\r\n\t}\r\n\r\n\terr = DB.Put(db.DifficultyKey, []byte(hexutil.EncodeBig(returnNewVariables.Difficulty)))\r\n\tif err != nil {\r\n\t\tfmt.Println(\"New Current Variables Put Error\")\r\n\t\treturn err\r\n\t}\r\n\r\n\terr = DB.Put(db.TotalTipKey, []byte(hexutil.EncodeBig(returnNewVariables.Tip)))\r\n\tif err != nil {\r\n\t\tfmt.Println(\"New Current Variables Put Error\")\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn DB.Put(db.MiningStatusKey, bitSetVar)\r\n}", "func (e *executor) run(timeout time.Duration, opts opts, path string, args ...string) error {\n\t_, err := e.execute(true, timeout, opts, path, args...)\n\treturn err\n}", "func (pu *PharmacistUpdate) Exec(ctx context.Context) error {\n\t_, err := pu.Save(ctx)\n\treturn err\n}", "func (du *DoctorinfoUpdate) Exec(ctx context.Context) error {\n\t_, err := du.Save(ctx)\n\treturn err\n}", "func (hu *HeartbeatUpdate) Exec(ctx context.Context) error {\n\t_, err := hu.Save(ctx)\n\treturn err\n}", "func (tu *TeamUpdate) Exec(ctx context.Context) error {\n\t_, err := tu.Save(ctx)\n\treturn err\n}", "func (mu *MoneytransferUpdate) Exec(ctx context.Context) error {\n\t_, err := mu.Save(ctx)\n\treturn err\n}", "func (au *AntenatalinformationUpdate) Exec(ctx context.Context) error {\n\t_, err := au.Save(ctx)\n\treturn err\n}", "func (ktu *KqiTargetUpdate) Exec(ctx context.Context) error {\n\t_, err := ktu.Save(ctx)\n\treturn err\n}", "func (huo *HistorytakingUpdateOne) ExecX(ctx context.Context) {\n\tif err := huo.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Execute(commits int, override bool) {\n configuration := CommonParsingAndValidation().(Configuration)\n\n interval := 1 * time.Hour\n duration := 1752000 * time.Hour // run the app for 200 years (means never stop lol)\n\n if configuration.Interval == \"daily\" {\n interval = 24 * time.Hour\n }\n\n // make the first commit now\n ExecuteGatherAndPush(commits, override, configuration)\n\n ticker := time.NewTicker(interval)\n quit := make(chan struct{})\n go func() {\n for {\n select {\n case <-ticker.C:\n ExecuteGatherAndPush(commits, override, configuration)\n case <-quit:\n ticker.Stop()\n return\n }\n }\n }()\n\n log.Printf(\"Gathering and storing data each %v during %v.\", interval, duration)\n <-time.After(time.Duration(duration))\n log.Println(\"Finished Execution\")\n}", "func (oiu *OrderInfoUpdate) Exec(ctx context.Context) error {\n\t_, err := oiu.Save(ctx)\n\treturn err\n}", "func (hu *HistorytakingUpdate) ExecX(ctx context.Context) {\n\tif err := hu.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (luu *LastUpdatedUpdate) Exec(ctx context.Context) error {\n\t_, err := luu.Save(ctx)\n\treturn err\n}", "func (lu *LocationUpdate) Exec(ctx context.Context) error {\n\t_, err := lu.Save(ctx)\n\treturn err\n}", "func (ru *RevisionUpdate) Exec(ctx context.Context) error {\n\t_, err := ru.Save(ctx)\n\treturn err\n}", "func (a *Agent) exec(ctx context.Context) error {\n\tvar err error\n\tdefer func() {\n\t\tif err == nil {\n\t\t\ta.lock.Lock()\n\t\t\ta.isDone = true\n\t\t\ta.lock.Unlock()\n\t\t}\n\t}()\n\n\tvar (\n\t\tcurrent = 0\n\t\tstep = 1\n\t\tdone = make(chan error)\n\t)\n\n\tif a.state == stateDown {\n\t\tcurrent = len(a.orderedServices) + 1\n\t\tstep = -1\n\t}\n\n\t// Iterate over priority groups. Move in the direction from\n\t// priority [1..n] for startup sequences, and from priority [n..1]\n\t// for shutdown sequences. There is no guarantee regarding order\n\t// of execution within each priority group. It's possible\n\t// to interrupt the sequence between each priority group.\n\tfor i := 0; i < len(a.orderedServices); i++ {\n\t\tcurrent += step\n\n\t\tgo a.execPriority(ctx, uint16(current), done)\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\t<-done // Wait for execPriority to finish before stopping execution.\n\t\t\t//a.report(Progress{Service: \"\", Err: err})\n\n\t\t\treturn err\n\t\tcase err = <-done:\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t//a.report(Progress{Service: \"\", Err: err})\n\n\treturn err\n}", "func (piu *ProviderIDUpdate) Exec(ctx context.Context) error {\n\t_, err := piu.Save(ctx)\n\treturn err\n}", "func (ui *UI) exec(ctx context.Context, line string, reqCh chan execReq) int {\n\treq := execReq{\n\t\tctx: ctx,\n\t\tline: line,\n\t\tui: ui,\n\t\trespCh: make(chan int),\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn 0\n\tcase reqCh <- req:\n\t}\n\treturn <-req.respCh\n}", "func (tuo *TeamUpdateOne) Exec(ctx context.Context) error {\n\t_, err := tuo.Save(ctx)\n\treturn err\n}", "func influunt_ExecutorRun(self, args *pyObject) *C.PyObject {\n\teCapsule, inputs, outputs := parse3ObjectFromArgs(args)\n\te := capsuleToPointer(eCapsule)\n\texec := pointer.Restore(e).(*executor.Executor)\n\n\tinputMap, err := convertPyDictNodeMap(inputs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\toutputArr, err := convertPyListToNodeArr(outputs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tres, err := exec.Run(inputMap, outputArr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresPyObj, err := convertGoTypeToPyObject(res)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn resPyObj\n}", "func (cmd InspectCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {\n\tap := cmd.ArgParser()\n\thelp, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, cli.CommandDocumentationContent{}, ap))\n\tapr := cli.ParseArgsOrDie(ap, args, help)\n\n\tvar verr errhand.VerboseError\n\tif apr.Contains(tableFileIndexFlag) {\n\t\tverr = cmd.measureChunkIndexDistribution(ctx, dEnv)\n\t}\n\n\treturn HandleVErrAndExitCode(verr, usage)\n}", "func (t *Task) Exec(agent *Agent) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\n\t\t\t//todo send task status to DCMS-agent\n\t\t\t// log.Warningf(\"run task: %s jobname: failed : %s\", t.TaskId, t.Job.Name, e)\n\t\t\tts := &TaskStatus{\n\t\t\t\tTaskPtr: t,\n\t\t\t\tCommand: nil,\n\t\t\t\tStatus: StatusFailed,\n\t\t\t\tCreateAt: time.Now().Unix(),\n\t\t\t\tErr: fmt.Errorf(\"run task: %s jobname: failed : %s\", t.TaskId, t.Job.Name, e),\n\t\t\t}\n\n\t\t\terrstr := fmt.Sprintf(\"%s\", e)\n\t\t\tif errstr == \"signal: killed\" {\n\t\t\t\tts.Status = StatusKilled\n\t\t\t}\n\t\t\tt.Job.Dcms.JobStatusChan <- ts\n\t\t}\n\t}()\n\n\tvar ts *TaskStatus\n\tvar err error\n\t// log.Info(\"task run Exec function in goroutine\")\n\n\tt.genLogFile()\n\t// check file signature\n\ttmp_md5 := util.Md5File(t.Job.Executor)\n\tif t.Job.Signature != tmp_md5 {\n\t\tts = &TaskStatus{\n\t\t\tTaskPtr: t,\n\t\t\tCommand: nil,\n\t\t\tStatus: StatusFailed,\n\t\t\tCreateAt: time.Now().Unix(),\n\t\t\tErr: fmt.Errorf(\"cronjob: %s executor: %s signature:%s does't match db's sig:%s\", t.Job.Name, t.Job.Executor, tmp_md5, t.Job.Signature),\n\t\t}\n\t\tt.Job.Dcms.JobStatusChan <- ts\n\t\treturn\n\t} else {\n\t\tlog.Info(\"cronjob signature match for \", t.Job.Name, t.Job.ExecutorFlags)\n\t}\n\n\tvar u *user.User\n\tu, err = user.Lookup(t.Job.Runner)\n\tif err != nil {\n\t\t// log.Warningf(\"user %s not exists, task %s quit \", err, t.TaskId)\n\t\tts = &TaskStatus{\n\t\t\tTaskPtr: t,\n\t\t\tCommand: nil,\n\t\t\tStatus: StatusFailed,\n\t\t\tCreateAt: time.Now().Unix(),\n\t\t\tErr: fmt.Errorf(\"user %s not exists, task %s quit \", err, t.TaskId),\n\t\t}\n\t\tt.Job.Dcms.JobStatusChan <- ts\n\t\treturn\n\t}\n\n\tvar uid int\n\tuid, err = strconv.Atoi(u.Uid)\n\tif err != nil {\n\t\t// log.Warningf(\"uid %s conver to int failed \", uid)\n\t\tts = &TaskStatus{\n\t\t\tTaskPtr: t,\n\t\t\tCommand: nil,\n\t\t\tStatus: StatusFailed,\n\t\t\tCreateAt: time.Now().Unix(),\n\t\t\tErr: fmt.Errorf(\"uid %s conver to int failed \", uid),\n\t\t}\n\t\tt.Job.Dcms.JobStatusChan <- ts\n\t\treturn\n\t}\n\n\t// chown log file to specific t.Job.Runner user\n\tif err = t.logfile.Chown(uid, uid); err != nil {\n\t\t// log.Warningf(\"chown logfile: %s to uid: %s failed, %s\", t.logfile.Name(), u.Uid, err)\n\t\tt.logfile = nil\n\t}\n\tvar cmd *exec.Cmd\n\tif t.Job.Executor != \"\" && t.Job.ExecutorFlags != \"\" {\n\t\tcmd = exec.Command(t.Job.Executor, t.Job.ExecutorFlags)\n\t} else if t.Job.Executor != \"\" && t.Job.ExecutorFlags == \"\" {\n\t\tcmd = exec.Command(t.Job.Executor)\n\t} else {\n\t\tts = &TaskStatus{\n\t\t\tTaskPtr: t,\n\t\t\tCommand: cmd,\n\t\t\tStatus: StatusFailed,\n\t\t\tCreateAt: time.Now().Unix(),\n\t\t\tErr: fmt.Errorf(\"job %s must have Executor \", t.Job.Name),\n\t\t}\n\t\tt.Job.Dcms.JobStatusChan <- ts\n\t\treturn\n\t}\n\n\tcmd.SysProcAttr = &syscall.SysProcAttr{}\n\tcmd.SysProcAttr.Credential = &syscall.Credential{Uid: uint32(uid)}\n\tcmd.SysProcAttr.Setsid = true\n\t// Pdeathsig only valid on linux system\n\t//\n\tcmd.SysProcAttr.Pdeathsig = syscall.SIGUSR1\n\n\tcmd.Stderr = t.logfile\n\tcmd.Stdout = t.logfile\n\n\tif err = cmd.Start(); err != nil {\n\t\t// log.Warningf(\"taskid:%s cmd Start failed: %s\", t.TaskId, err)\n\t\tts = &TaskStatus{\n\t\t\tTaskPtr: t,\n\t\t\tCommand: cmd,\n\t\t\tStatus: StatusFailed,\n\t\t\tCreateAt: time.Now().Unix(),\n\t\t\tErr: fmt.Errorf(\"taskid:%s cmd Start failed: %s\", t.TaskId, err),\n\t\t}\n\t\tt.Job.Dcms.JobStatusChan <- ts\n\t\treturn\n\t}\n\n\tts = &TaskStatus{\n\t\tTaskPtr: t,\n\t\tCommand: cmd,\n\t\tStatus: StatusRunning,\n\t\tCreateAt: time.Now().Unix(),\n\t\tErr: nil,\n\t}\n\tt.Job.Dcms.JobStatusChan <- ts\n\t// send cmd.process to dcms-agent\n\n\tif err = cmd.Wait(); err != nil {\n\t\t// log.Warningf(\"taskid:%s cmd Wait failed: %s\", t.TaskId, err)\n\t\tts = &TaskStatus{\n\t\t\tTaskPtr: t,\n\t\t\tCommand: cmd,\n\t\t\tStatus: StatusFailed,\n\t\t\tCreateAt: time.Now().Unix(),\n\t\t\tErr: fmt.Errorf(\"taskid:%s cmd Wait failed: %s\", t.TaskId, err),\n\t\t}\n\t\terrstr := fmt.Sprintf(\"%s\", err.Error())\n\t\tif errstr == \"signal: killed\" {\n\t\t\tts.Status = StatusKilled\n\t\t}\n\t\tt.Job.Dcms.JobStatusChan <- ts\n\t\treturn\n\t}\n\t// log.Warning(\"task run DONE\")\n\tts = &TaskStatus{\n\t\tTaskPtr: t,\n\t\tCommand: cmd,\n\t\tStatus: StatusSuccess,\n\t\tCreateAt: time.Now().Unix(),\n\t\tErr: nil,\n\t}\n\tt.Job.Dcms.JobStatusChan <- ts\n\treturn\n}", "func (gsu *GameServerUpdate) Exec(ctx context.Context) error {\n\t_, err := gsu.Save(ctx)\n\treturn err\n}", "func (tu *TransactionfactorUpdate) Exec(ctx context.Context) error {\n\t_, err := tu.Save(ctx)\n\treturn err\n}", "func (tuo *TimingUpdateOne) Exec(ctx context.Context) error {\n\t_, err := tuo.Save(ctx)\n\treturn err\n}", "func (mu *MannerUpdate) Exec(ctx context.Context) error {\n\t_, err := mu.Save(ctx)\n\treturn err\n}", "func (mu *MedicalfileUpdate) Exec(ctx context.Context) error {\n\t_, err := mu.Save(ctx)\n\treturn err\n}", "func Execute() {\n\t// Load config\n\tloadConfig()\n\n\t// Init Redis Client\n\tinitRedisClient()\n\n\t// Synchronize composer.phar\n\tgo composerPhar(\"composerPhar\", 1)\n\n\t// Synchronize packages.json\n\tgo packagesJsonFile(\"PackagesJson\", 1)\n\n\t// Update status\n\tgo status(\"Status\", 1)\n\n\tWg.Add(1)\n\n\tfor i := 0; i < 12; i++ {\n\t\tgo providers(\"Provider\", i)\n\t}\n\n\tfor i := 0; i < 30; i++ {\n\t\tgo packages(\"Packages\", i)\n\t}\n\n\tfor i := 0; i < 50; i++ {\n\t\tgo dists(\"Dists\", i)\n\t}\n\n\tfor i := 0; i < 1; i++ {\n\t\tgo distsRetry(403, i)\n\t}\n\n\tfor i := 0; i < 1; i++ {\n\t\tgo distsRetry(500, i)\n\t}\n\n\tfor i := 0; i < 1; i++ {\n\t\tgo distsRetry(502, i)\n\t}\n\n}", "func (e *executor) Execute() error {\n\tif len(e.executables) < 1 {\n\t\treturn errors.New(\"nothing to Work\")\n\t}\n\n\tlog(e.id).Infof(\"processing %d item(s)\", len(e.executables))\n\treturn nil\n}", "func (du *DeviceUpdate) Exec(ctx context.Context) error {\n\t_, err := du.Save(ctx)\n\treturn err\n}", "func (ft *CustomTask) Exec(t *f.TaskNode, p *f.Params, out *io.PipeWriter) {\n\tglog.Info(\"executing custom task \", p.Complete)\n\n\tft.customFunc(t, p, out)\n\n\treturn\n}", "func (r *remoteRuntimeService) Exec(ctx context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] Exec\", \"timeout\", r.timeout)\n\tctx, cancel := context.WithTimeout(ctx, r.timeout)\n\tdefer cancel()\n\n\treturn r.execV1(ctx, req)\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func (au *AnnouncementUpdate) Exec(ctx context.Context) error {\n\t_, err := au.Save(ctx)\n\treturn err\n}", "func (tfm *trxFlowMonitor) execute() {\n\tdefer func() {\n\t\tclose(tfm.sigStop)\n\t\ttfm.mgr.finished(tfm)\n\t}()\n\n\t// do initial trx count update\n\tgo tfm.updateCount()\n\n\t// start to control the monitor\n\ttfm.flowTicker = time.NewTicker(trxFlowUpdaterPeriod)\n\ttfm.countTicker = time.NewTicker(trxCountUpdaterPeriod)\n\n\t// loop here\n\tfor {\n\t\tselect {\n\t\tcase <-tfm.sigStop:\n\t\t\treturn\n\t\tcase <-tfm.flowTicker.C:\n\t\t\trepo.TrxFlowUpdate()\n\t\tcase <-tfm.countTicker.C:\n\t\t\tgo tfm.updateCount()\n\t\t}\n\t}\n}", "func (ggc *GithubGistCreate) Exec(ctx context.Context) error {\n\t_, err := ggc.Save(ctx)\n\treturn err\n}", "func (pu *PatientrecordUpdate) Exec(ctx context.Context) error {\n\t_, err := pu.Save(ctx)\n\treturn err\n}", "func (p PHPEnvPHPinfoCLI) Execute(options tasks.Options, upstream map[string]tasks.Result) tasks.Result {\n\tresult := tasks.Result{\n\t\tStatus: tasks.None,\n\t\tSummary: \"PHP Agent was not detected on this host. Skipping PHP info check.\",\n\t}\n\tif upstream[\"PHP/Config/Agent\"].Status != tasks.Success {\n\t\treturn result\n\t}\n\n\t//Running PHP info\n\n\tgatheredOutput, err := p.gatherPHPInfoCLI()\n\n\tif err != nil {\n\t\tresult.Status = tasks.Error\n\t\tresult.Summary = \"error executing PHP -i\"\n\t\treturn result\n\t}\n\n\tresult.Status = tasks.Success\n\tresult.Summary = \"PHP info has been gathered\"\n\tresult.Payload = gatheredOutput\n\n\treturn result\n}", "func (luuo *LastUpdatedUpdateOne) Exec(ctx context.Context) error {\n\t_, err := luuo.Save(ctx)\n\treturn err\n}", "func (ktuo *KqiTargetUpdateOne) Exec(ctx context.Context) error {\n\t_, err := ktuo.Save(ctx)\n\treturn err\n}", "func (fu *FlowUpdate) Exec(ctx context.Context) error {\n\t_, err := fu.Save(ctx)\n\treturn err\n}", "func Execute(bTime, gHash string) {\n\tbuildTime = bTime\n\tgitHash = gHash\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}", "func (au *AntenatalinformationUpdate) ExecX(ctx context.Context) {\n\tif err := au.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (aru *AppointmentResultsUpdate) Exec(ctx context.Context) error {\n\t_, err := aru.Save(ctx)\n\treturn err\n}", "func (su *StateUpdate) Exec(ctx context.Context) error {\n\t_, err := su.Save(ctx)\n\treturn err\n}", "func (duo *DoctorinfoUpdateOne) Exec(ctx context.Context) error {\n\t_, err := duo.Save(ctx)\n\treturn err\n}", "func Execute() {\n\t// cfg contains tenant related information, e.g. `travel0-dev`,\n\t// `travel0-prod`. some of its information can be sourced via:\n\t// 1. env var (e.g. AUTH0_API_KEY)\n\t// 2. global flag (e.g. --api-key)\n\t// 3. JSON file (e.g. api_key = \"...\" in ~/.config/auth0/config.json)\n\tcli := &cli{\n\t\trenderer: display.NewRenderer(),\n\t\ttracker: analytics.NewTracker(),\n\t}\n\n\trootCmd := buildRootCmd(cli)\n\n\trootCmd.SetUsageTemplate(namespaceUsageTemplate())\n\taddPersistentFlags(rootCmd, cli)\n\taddSubcommands(rootCmd, cli)\n\n\t// TODO(cyx): backport this later on using latest auth0/v5.\n\t// rootCmd.AddCommand(actionsCmd(cli))\n\t// rootCmd.AddCommand(triggersCmd(cli))\n\n\tdefer func() {\n\t\tif v := recover(); v != nil {\n\t\t\terr := fmt.Errorf(\"panic: %v\", v)\n\n\t\t\t// If we're in development mode, we should throw the\n\t\t\t// panic for so we have less surprises. For\n\t\t\t// non-developers, we'll swallow the panics.\n\t\t\tif instrumentation.ReportException(err) {\n\t\t\t\tfmt.Println(panicMessage)\n\t\t\t} else {\n\t\t\t\tpanic(v)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// platform specific terminal initialization:\n\t// this should run for all commands,\n\t// for most of the architectures there's no requirements:\n\tansi.InitConsole()\n\n\tcancelCtx := contextWithCancel()\n\tif err := rootCmd.ExecuteContext(cancelCtx); err != nil {\n\t\tcli.renderer.Heading(\"error\")\n\t\tcli.renderer.Errorf(err.Error())\n\n\t\tinstrumentation.ReportException(err)\n\t\tos.Exit(1)\n\t}\n\n\ttimeoutCtx, cancel := context.WithTimeout(cancelCtx, 3*time.Second)\n\t// defers are executed in LIFO order\n\tdefer cancel()\n\tdefer cli.tracker.Wait(timeoutCtx) // No event should be tracked after this has run, or it will panic e.g. in earlier deferred functions\n}", "func (du *DatumUpdate) Exec(ctx context.Context) error {\n\t_, err := du.Save(ctx)\n\treturn err\n}", "func (t *Test) exec(tc testCommand) error {\n\tswitch cmd := tc.(type) {\n\tcase *clearCmd:\n\t\treturn t.clear()\n\n\tcase *loadCmd:\n\t\treturn cmd.append()\n\n\tcase *evalCmd:\n\t\texpr, err := parser.ParseExpr(cmd.expr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tt := time.Unix(0, startingTime+(cmd.start.Unix()*1000000000))\n\t\tbodyBytes, err := cmd.m3query.query(expr.String(), t)\n\t\tif err != nil {\n\t\t\tif cmd.fail {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.Wrapf(err, \"error in %s %s, line %d\", cmd, cmd.expr, cmd.line)\n\t\t}\n\t\tif cmd.fail {\n\t\t\treturn fmt.Errorf(\"expected to fail at %s %s, line %d\", cmd, cmd.expr, cmd.line)\n\t\t}\n\n\t\terr = cmd.compareResult(bodyBytes)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error in %s %s, line %d. m3query response: %s\", cmd, cmd.expr, cmd.line, string(bodyBytes))\n\t\t}\n\n\tdefault:\n\t\tpanic(\"promql.Test.exec: unknown test command type\")\n\t}\n\treturn nil\n}", "func (ruo *RevisionUpdateOne) Exec(ctx context.Context) error {\n\t_, err := ruo.Save(ctx)\n\treturn err\n}", "func (du *DoctorinfoUpdate) ExecX(ctx context.Context) {\n\tif err := du.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (puo *PharmacistUpdateOne) Exec(ctx context.Context) error {\n\t_, err := puo.Save(ctx)\n\treturn err\n}", "func (ktu *KqiTargetUpdate) ExecX(ctx context.Context) {\n\tif err := ktu.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (tu *TagUpdate) Exec(ctx context.Context) error {\n\t_, err := tu.Save(ctx)\n\treturn err\n}", "func (du *DiagnosisUpdate) Exec(ctx context.Context) error {\n\t_, err := du.Save(ctx)\n\treturn err\n}", "func (pu *PrenameUpdate) Exec(ctx context.Context) error {\n\t_, err := pu.Save(ctx)\n\treturn err\n}", "func (auo *AntenatalinformationUpdateOne) Exec(ctx context.Context) error {\n\t_, err := auo.Save(ctx)\n\treturn err\n}", "func (bu *BankdetailUpdate) Exec(ctx context.Context) error {\n\t_, err := bu.Save(ctx)\n\treturn err\n}", "func (du *DealUpdate) Exec(ctx context.Context) error {\n\t_, err := du.Save(ctx)\n\treturn err\n}", "func (du *DoctorUpdate) Exec(ctx context.Context) error {\n\t_, err := du.Save(ctx)\n\treturn err\n}", "func (or *orchestrator) execute() {\n\tdefer func() {\n\t\tor.mgr.finished(or)\n\t}()\n\n\t// access the new heads queue\n\t// it's filled with new heads as the connected node processes blocks from the network\n\theads := repo.ObservedHeaders()\n\tfor {\n\t\tselect {\n\t\tcase <-or.sigStop:\n\t\t\treturn\n\t\tcase h, ok := <-heads:\n\t\t\tif ok {\n\t\t\t\tor.handleNewHead(h)\n\t\t\t}\n\t\tcase idle, ok := <-or.inScanStateSwitch:\n\t\t\tif ok {\n\t\t\t\tor.pushHeads = idle\n\t\t\t\tif idle {\n\t\t\t\t\tor.unloadCache()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (tu *TransactionUpdate) Exec(ctx context.Context) error {\n\t_, err := tu.Save(ctx)\n\treturn err\n}", "func (tu *TeamUpdate) ExecX(ctx context.Context) {\n\tif err := tu.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (e *SoracomExecutor) Execute(s string) {\n\tfmt.Printf(\"%s\", e.worker.Execute(s))\n}", "func (oiuo *OrderInfoUpdateOne) Exec(ctx context.Context) error {\n\t_, err := oiuo.Save(ctx)\n\treturn err\n}", "func (pu *PharmacistUpdate) ExecX(ctx context.Context) {\n\tif err := pu.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (ttru *TradeTimeRangeUpdate) Exec(ctx context.Context) error {\n\t_, err := ttru.Save(ctx)\n\treturn err\n}", "func (piuo *ProviderIDUpdateOne) Exec(ctx context.Context) error {\n\t_, err := piuo.Save(ctx)\n\treturn err\n}", "func (tu *TimingUpdate) ExecX(ctx context.Context) {\n\tif err := tu.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (ts *TaskService) Exec(requestCtx context.Context, req *taskAPI.ExecProcessRequest) (*types.Empty, error) {\n\tdefer logPanicAndDie(log.G(requestCtx))\n\n\ttaskID := req.ID\n\texecID := req.ExecID\n\n\tlogger := log.G(requestCtx).WithField(\"TaskID\", taskID).WithField(\"ExecID\", execID)\n\tlogger.Debug(\"exec\")\n\n\textraData, err := unmarshalExtraData(req.Spec)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal extra data\")\n\t}\n\n\t// Just provide runc the options it knows about, not our wrapper\n\treq.Spec = extraData.RuncOptions\n\n\tbundleDir := bundle.Dir(filepath.Join(containerRootDir, taskID))\n\n\tvar ioConnectorSet vm.IOProxy\n\n\tif vm.IsAgentOnlyIO(req.Stdout, logger) {\n\t\tioConnectorSet = vm.NewNullIOProxy()\n\t} else {\n\t\t// Override the incoming stdio FIFOs, which have paths from the host that we can't use\n\t\tfifoSet, err := cio.NewFIFOSetInDir(bundleDir.RootPath(), fifoName(taskID, execID), req.Terminal)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"failed opening stdio FIFOs\")\n\t\t\treturn nil, errors.Wrap(err, \"failed to open stdio FIFOs\")\n\t\t}\n\n\t\tvar stdinConnectorPair *vm.IOConnectorPair\n\t\tif req.Stdin != \"\" {\n\t\t\treq.Stdin = fifoSet.Stdin\n\t\t\tstdinConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.VSockAcceptConnector(extraData.StdinPort),\n\t\t\t\tWriteConnector: vm.FIFOConnector(fifoSet.Stdin),\n\t\t\t}\n\t\t}\n\n\t\tvar stdoutConnectorPair *vm.IOConnectorPair\n\t\tif req.Stdout != \"\" {\n\t\t\treq.Stdout = fifoSet.Stdout\n\t\t\tstdoutConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.FIFOConnector(fifoSet.Stdout),\n\t\t\t\tWriteConnector: vm.VSockAcceptConnector(extraData.StdoutPort),\n\t\t\t}\n\t\t}\n\n\t\tvar stderrConnectorPair *vm.IOConnectorPair\n\t\tif req.Stderr != \"\" {\n\t\t\treq.Stderr = fifoSet.Stderr\n\t\t\tstderrConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.FIFOConnector(fifoSet.Stderr),\n\t\t\t\tWriteConnector: vm.VSockAcceptConnector(extraData.StderrPort),\n\t\t\t}\n\t\t}\n\n\t\tioConnectorSet = vm.NewIOConnectorProxy(stdinConnectorPair, stdoutConnectorPair, stderrConnectorPair)\n\t}\n\n\tresp, err := ts.taskManager.ExecProcess(requestCtx, req, ts.runcService, ioConnectorSet)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"exec failed\")\n\t\treturn nil, err\n\t}\n\n\tlogger.Debug(\"exec succeeded\")\n\treturn resp, nil\n}", "func (gsuo *GameServerUpdateOne) Exec(ctx context.Context) error {\n\t_, err := gsuo.Save(ctx)\n\treturn err\n}", "func main() {\n\tcmd.Execute(version, gitCommit, buildDate)\n}", "func (tru *TradeRecordUpdate) Exec(ctx context.Context) error {\n\t_, err := tru.Save(ctx)\n\treturn err\n}", "func (tuo *TeamUpdateOne) ExecX(ctx context.Context) {\n\tif err := tuo.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (luo *LocationUpdateOne) Exec(ctx context.Context) error {\n\t_, err := luo.Save(ctx)\n\treturn err\n}", "func (huo *HeartbeatUpdateOne) Exec(ctx context.Context) error {\n\t_, err := huo.Save(ctx)\n\treturn err\n}", "func (c CommitterProbe) Execute(ctx context.Context) error {\n\treturn c.execute(ctx)\n}", "func (oiu *OrderInfoUpdate) ExecX(ctx context.Context) {\n\tif err := oiu.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (cc *computer) execute() error {\n\tif cc.instrPointer < 0 || cc.instrPointer >= len(cc.instructions) {\n\t\treturn oobInstrPointerError{ptr: cc.instrPointer, len: len(cc.instructions)}\n\t}\n\tinstr := cc.instructions[cc.instrPointer]\n\tcc.history[cc.instrPointer]++\n\n\tswitch instr.op {\n\tcase \"nop\":\n\t\tcc.instrPointer++\n\t\treturn nil\n\tcase \"acc\":\n\t\tcc.accumulator += instr.arg\n\t\tcc.instrPointer++\n\t\treturn nil\n\tcase \"jmp\":\n\t\tcc.instrPointer += instr.arg\n\t\treturn nil\n\tdefault:\n\t\treturn errors.Errorf(\"could not parse instruction %v\", instr)\n\t}\n}", "func (otu *OutboundTransactionUpdate) Exec(ctx context.Context) error {\n\t_, err := otu.Save(ctx)\n\treturn err\n}", "func LogExec(activityID string, start time.Time, err error) {\n\tlatency := time.Since(start)\n\tzap.L().Info(\"execSql\", zap.Int(\"latency\", int(latency.Seconds()*1000)), zap.Bool(\"success\", err == nil), zap.String(\"activityId\", activityID))\n}", "func (oupu *OrgUnitPositionUpdate) Exec(ctx context.Context) error {\n\t_, err := oupu.Save(ctx)\n\treturn err\n}", "func (auo *AntenatalinformationUpdateOne) ExecX(ctx context.Context) {\n\tif err := auo.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (tuo *TransactionfactorUpdateOne) Exec(ctx context.Context) error {\n\t_, err := tuo.Save(ctx)\n\treturn err\n}", "func (oupuo *OrgUnitPositionUpdateOne) Exec(ctx context.Context) error {\n\t_, err := oupuo.Save(ctx)\n\treturn err\n}", "func (muo *MoneytransferUpdateOne) Exec(ctx context.Context) error {\n\t_, err := muo.Save(ctx)\n\treturn err\n}", "func (ktuo *KqiTargetUpdateOne) ExecX(ctx context.Context) {\n\tif err := ktuo.Exec(ctx); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (pu *PostUpdate) Exec(ctx context.Context) error {\n\t_, err := pu.Save(ctx)\n\treturn err\n}" ]
[ "0.6369432", "0.60868657", "0.6056162", "0.59250087", "0.5914857", "0.58780116", "0.5856472", "0.5839285", "0.58369344", "0.5797308", "0.579371", "0.5787672", "0.5784593", "0.57815367", "0.5781444", "0.57663274", "0.5749594", "0.57355666", "0.5730872", "0.57300276", "0.5721693", "0.5720637", "0.569663", "0.56965965", "0.56776094", "0.56605667", "0.56595623", "0.56578326", "0.5652874", "0.56472856", "0.5643849", "0.56383276", "0.56357455", "0.56310254", "0.56210506", "0.56180584", "0.5604307", "0.5604112", "0.56040907", "0.56040907", "0.56040907", "0.56040907", "0.56040907", "0.56040907", "0.56040907", "0.55993307", "0.5582437", "0.5576658", "0.5574813", "0.5570445", "0.5569784", "0.556589", "0.55653495", "0.55556035", "0.55555594", "0.5554476", "0.5550044", "0.5549404", "0.5547896", "0.55406857", "0.554043", "0.55383307", "0.5538069", "0.553801", "0.5536996", "0.55315316", "0.5524325", "0.55235773", "0.55231595", "0.5523018", "0.55145425", "0.5508743", "0.55023974", "0.5498917", "0.54980767", "0.5497026", "0.54922545", "0.54794794", "0.5476043", "0.54708165", "0.54661125", "0.5463561", "0.54635036", "0.54630595", "0.54498184", "0.544958", "0.5446724", "0.5446494", "0.54457325", "0.5444082", "0.54408705", "0.54385066", "0.5438325", "0.54342663", "0.543318", "0.54314995", "0.54314053", "0.5430086", "0.5429873", "0.54266226" ]
0.60363847
3
IsOpen returns wether the MIDI in port is open
func (o *in) IsOpen() (open bool) { o.RLock() open = o.isOpen o.RUnlock() return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (i *in) IsOpen() (open bool) {\n\t//\ti.RLock()\n\topen = i.midiIn != nil\n\t//i.RUnlock()\n\treturn\n}", "func (p *TSocket) IsOpen() bool {\n\tif p.conn == nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *out) IsOpen() bool {\n\treturn o.stream != nil\n}", "func (pipe *PipeWS) IsOpen() bool {\n\treturn pipe.open\n}", "func (a ProblemAdapter) IsOpen() bool {\n\treturn a.GetState() == \"OPEN\"\n}", "func (p *TBufferedReadTransport) IsOpen() bool {\n\treturn true\n}", "func (ses *Ses) IsOpen() bool {\n\treturn ses.checkClosed() == nil\n}", "func IsOpen() bool {\n\treturn store != nil\n}", "func IsOpen() bool {\n\tif adp != nil {\n\t\treturn adp.IsOpen()\n\t}\n\n\treturn false\n}", "func (p *RedisConnectionPool) IsOpen() bool {\n\treturn nil != p.myPool\n}", "func (this *Window) IsOpen() bool {\n\treturn sfBool2Go(C.sfWindow_isOpen(this.cptr))\n}", "func (d *Door) IsOpen() bool {\n\t_, ok := d.DoorState.(OpenDoorState)\n\treturn ok\n}", "func (m *display) Open() (bool) {\n if m.status == OPENED {\n return true\n }\n\n config := &serial.Config{\n Name: m.options.Port,\n Baud: m.options.Baud,\n ReadTimeout: 1 * time.Second,\n }\n\n var err error\n m.port, err = serial.OpenPort(config)\n if err != nil {\n //log.Println(err)\n return false\n }\n\n m.status = OPENED\n return true\n}", "func TestIsOpen(t *testing.T) {\n\t_, err := Open()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !IsOpen() {\n\t\tt.Error(\"Expected IsOpen to be true\")\n\t}\n\tCloseAll()\n}", "func (c *Circuit) IsOpen() bool {\n\tif c == nil {\n\t\treturn false\n\t}\n\tif c.threadSafeConfig.CircuitBreaker.ForceOpen.Get() {\n\t\treturn true\n\t}\n\tif c.threadSafeConfig.CircuitBreaker.ForcedClosed.Get() {\n\t\treturn false\n\t}\n\treturn c.isOpen.Get()\n}", "func isOpen() bool {\n\tif _, err := exec.Command(\"bash\", \"-c\", \"ps cax | grep Nox\").Output(); err != nil {\n\t\tlog.Println(\"not open\")\n\t\treturn false\n\t}\n\tlog.Println(\"open\")\n\treturn true\n}", "func (body *Body) IsOpen() bool {\n\treturn body.isOpen\n}", "func (env *Env) IsOpen() bool {\n\tenv.RLock()\n\tok := env.ocienv != nil\n\tenv.RUnlock()\n\treturn ok\n}", "func (g *GitDriver) IsOpen() bool {\n\tif g.Repository == nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (t DefaultBuildManager) QueueIsOpen() bool {\n\treturn <-getShutdownChan == \"open\"\n}", "func (p *Position) IsOpen() bool {\n\treturn p.EntranceOrder() != nil && p.ExitOrder() == nil\n}", "func (ch *Channel) IsClosed() bool {\n\treturn atomic.LoadInt32(&ch.closed) == 1\n}", "func IsOpen(err error) bool {\n\tfor err != nil {\n\t\tif bserr, ok := err.(stater); ok {\n\t\t\treturn bserr.State() == Open\n\t\t}\n\n\t\tif cerr, ok := err.(causer); ok {\n\t\t\terr = cerr.Cause()\n\t\t}\n\t}\n\treturn false\n}", "func (l *line) Open() bool {\n\treturn l.Prefix == \"\" && (l.Name == \"metadata\" || l.Name == \"spec\")\n}", "func (t *Tx) IsOpen() bool {\n\treturn t.Status == TxOpen\n}", "func TestIsOpen(t *testing.T) {\n\tmockTr := new(mockTTransport)\n\ttr := NewTFramedTransport(mockTr)\n\tmockTr.On(\"IsOpen\").Return(true)\n\n\tassert.True(t, tr.IsOpen())\n\tmockTr.AssertExpectations(t)\n}", "func (cl *Client) Open() (bool, error) {\n\n\tif cl.connected {\n\t\treturn false, errors.New(\"already connected\")\n\t}\n\t_, err := cl.netQuery(commandPersist, buildBoolRequest(true), nil)\n\treturn err == nil, err\n}", "func (device *Device) isClosed() bool {\n\treturn device.deviceState() == deviceStateClosed\n}", "func (s *Server) isClosed() bool {\n\treturn atomic.LoadInt64(&s.closed) == 1\n}", "func (r *Producer) Connected() bool {\n\treturn (atomic.LoadInt32(&r.open) == 1)\n}", "func (g PercentageOfTimeGate) IsOpen(f feature.Feature, a actor.Actor) bool {\n\tr := seed.Uint32()\n\treturn r < (g.value / rangeFactor)\n}", "func (c *StreamerController) IsClosed() bool {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\treturn c.closed\n}", "func (b *Consecutive) Open() bool {\n\tb.mutex.Lock()\n\tdefer b.mutex.Unlock()\n\n\tif b.instantProvider == nil {\n\t\tb.instantProvider = systemTimer\n\t}\n\n\tswitch {\n\tcase b.state == closed:\n\t\treturn false\n\tcase b.nextClose.Before(b.instantProvider.Now()):\n\t\tb.reset()\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n\n\tpanic(\"unreachable\")\n}", "func (g GroupGate) IsOpen(f feature.Feature, a actor.Actor) bool {\n\tfor name := range g.value {\n\t\tif f, ok := registry[name]; ok && f(a) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (console *testConsole) isClosed() bool {\n\tconsole.closedMx.RLock()\n\tclosed := console.closed\n\tconsole.closedMx.RUnlock()\n\treturn closed\n}", "func (l *Logger) isClosed() (ok bool) {\n\treturn l.closed.Get()\n}", "func (s *Session) IsClosed() bool {\n\treturn s.IsShutdown()\n}", "func (socket *Socket) IsClosed() bool {\n\tsocket.Lock()\n\tdefer socket.Unlock()\n\treturn socket.closed\n}", "func (rx *RotationX) IsClosed() bool {\r\n\treturn rx.Primitive.IsClosed()\r\n}", "func (p *Percolator) IsOpen(row, col int) bool {\n\treturn p.sites.Get(p.calculateIndex(row, col)).Value\n}", "func IsClosed() bool {\n\treturn server.IsClosed()\n}", "func (r *Runtime) IsConnected() bool { return r.isConnected }", "func (i *in) Open() (err error) {\n\tif i.IsOpen() {\n\t\treturn nil\n\t}\n\n\t//i.Lock()\n\n\ti.midiIn, err = rtmidi.NewMIDIInDefault()\n\tif err != nil {\n\t\ti.midiIn = nil\n\t\t//i.Unlock()\n\t\treturn fmt.Errorf(\"can't open default MIDI in: %v\", err)\n\t}\n\n\terr = i.midiIn.OpenPort(i.number, \"\")\n\t//i.Unlock()\n\n\tif err != nil {\n\t\ti.Close()\n\t\treturn fmt.Errorf(\"can't open MIDI in port %v (%s): %v\", i.number, i, err)\n\t}\n\n\t//i.driver.Lock()\n\t//i.midiIn.IgnoreTypes(i.driver.ignoreSysex, i.driver.ignoreTimeCode, i.driver.ignoreActiveSense)\n\ti.driver.opened = append(i.driver.opened, i)\n\t//i.driver.Unlock()\n\n\treturn nil\n}", "func (s *Session) Closed() bool {\n\treturn s.s == nil\n}", "func (me TitemIconStateEnumType) IsOpen() bool { return me == \"open\" }", "func (r *RLockedFile) IsClosed() bool {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\treturn r.refs == 0\n}", "func (s *Station) IsOpen(time time.Time) bool {\n\treturn s.OpeningDate.Before(time)\n}", "func (m *display) Close() bool {\n if m.status == CLOSED {\n return true\n }\n\n if err := m.port.Close(); err != nil {\n return false\n }\n m.status = CLOSED\n return true\n}", "func (s *Server) IsClosed() bool {\n\treturn s.listener.IsClosed()\n}", "func isChanClosed(ch chan struct{}) bool {\n\tselect {\n\tcase <-ch:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (dev *Device) Close() bool {\n\tif err := dev.port.Close(); err != nil {\n\t\tlog.Printf(\"close err: %s\", err)\n\t\treturn false\n\t}\n\treturn true\n}", "func (ref *EventClientRef) Closed() bool {\n\treturn atomic.LoadInt32(&ref.closed) == 1\n}", "func (q *MyQueue) IsClose() bool {\n\treturn q.closed\n}", "func (blt *Bolt) Open() bool {\n\treturn blt.opened\n}", "func (cd *CloseDetector) IsClosed() bool {\n\treturn cd.closed\n}", "func (r *RecordStream) Closed() bool { return r.state == closed || r.state == serverLost }", "func (a *AdminClient) IsClosed() bool {\n\treturn atomic.LoadUint32(&a.isClosed) == 1\n}", "func (r *Summary) IsOpen() bool {\n\treturn !r.Submitted && !r.IsAbandoned()\n}", "func proxyOpen(item ProxyItem) bool {\n\tvar ipPort = fmt.Sprintf(\"%s:%d\", item.IP, item.Port)\n\t_, err := net.DialTimeout(\"tcp\", ipPort, time.Second)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (image *image) IsClosed() bool {\n\treturn image.isClosed.Get()\n}", "func (sub *Subscription) IsClosed() bool {\n return sub.closed\n}", "func (c *NATSTestClient) IsClosed() bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn !c.connected\n}", "func (_BREMICO *BREMICOSession) HasClosed() (bool, error) {\n\treturn _BREMICO.Contract.HasClosed(&_BREMICO.CallOpts)\n}", "func (cli *Client) Closed() bool {\n\treturn cli.State() == Closed\n}", "func (mrc *MockReadCloser) IsClosed() bool {\n\treturn mrc.closed\n}", "func (hs100 *Hs100) IsOn() (bool, error) {\n\tresp, err := hs100.commandSender.SendCommand(hs100.Address, isOnCommand)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\ton, err := isOn(resp)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn on, nil\n}", "func (client *SyncClient) IsClosed() bool {\n\treturn client.cClient == nil\n}", "func (e *FbEvent) IsClosed() bool {\n\treturn atomic.LoadInt32(&e.closed) == 1\n}", "func (c *Conn) IsClosed() bool {\n\treturn atomic.LoadInt32(&c.closeFlag) == 1\n}", "func (_BREMICO *BREMICOCallerSession) HasClosed() (bool, error) {\n\treturn _BREMICO.Contract.HasClosed(&_BREMICO.CallOpts)\n}", "func (q *queue) IsClosed() bool {\n\tif q.closed {\n\t\treturn true\n\t}\n\n\tselect {\n\tcase <-q.ch:\n\t\tq.closed = true\n\tdefault:\n\t\tq.closed = q.Size() > 0\n\t}\n\treturn q.closed\n}", "func (s *Snapshot) Open() bool {\n\tif atomic.LoadInt32(&s.refCount) == 0 {\n\t\treturn false\n\t}\n\tatomic.AddInt32(&s.refCount, 1)\n\treturn true\n}", "func (manager *transportManager) IsClosed() bool {\n\t// If the context is cancelled, the livesOnce is closed.\n\treturn manager.ctx.Err() != nil\n}", "func CheckOpenPort(addr string) error {\n\t// Attempt to listen on port to determine if it is already in use\n\tln, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ln.Close()\n\tif err != nil {\n\t\treturn errors.New(\"could not stop listening when checking open port\")\n\t}\n\n\treturn nil\n}", "func (d *DB) isClosed() bool {\n\treturn atomic.LoadUint32(&d.closed) != 0\n}", "func (s *Conn) IsClosed() bool {\n\treturn s.state == stateClosed\n}", "func (d *Device) Connected() bool {\n\tdata := []byte{0}\n\td.bus.ReadRegister(uint8(d.Address), WHO_AM_I, data)\n\treturn data[0] == 0x40\n}", "func IsSessionClosed(s *mgo.Session) (res bool) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Print(\"[MGO2_IS_SESSION_CLOSED] check session closed panic:\", err)\n\t\t}\n\t}()\n\tres = true\n\treturn s.Ping() != nil\n}", "func (p *RedisConnectionPool) IsClosed() bool {\n\treturn nil == p.myPool\n}", "func (throttler *Throttler) CheckIsOpen() error {\n\tif throttler.IsOpen() {\n\t\t// all good\n\t\treturn nil\n\t}\n\treturn ErrThrottlerNotOpen\n}", "func (mwc *MockWriteCloser) IsClosed() bool {\n\treturn mwc.closed\n}", "func (b *OGame) IsConnected() bool {\n\treturn atomic.LoadInt32(&b.isConnectedAtom) == 1\n}", "func IsDocked() (docked bool) {\n\tf, err := os.Open(IBMDockDocked)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\tstatus := make([]byte, 1)\n\t_, err = f.Read(status)\n\tif err != nil {\n\t\treturn\n\t}\n\tdocked = status[0] == '1'\n\treturn\n}", "func (w *WindowWidget) IsOpen(open *bool) *WindowWidget {\n\tw.open = open\n\treturn w\n}", "func (d *Device) Connected() bool {\n\tdata1, data2 := d.buf[:1], d.buf[1:2]\n\td.bus.ReadRegister(d.AccelAddress, WHO_AM_I, data1)\n\td.bus.ReadRegister(d.MagAddress, WHO_AM_I_M, data2)\n\treturn data1[0] == 0x68 && data2[0] == 0x3D\n}", "func (bs *Listener) IsClosed() bool {\n\tbs.mu.Lock()\n\tclosed := bs.isClosed\n\tbs.mu.Unlock()\n\n\treturn closed\n}", "func (g *Game) IsClosed() bool {\n\treturn g.isClosed\n}", "func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) {\n\treturn exclusiveOpenFailsOnDevice(mounter.logger, pathname)\n}", "func (f *FsWatcher) IsStarted() bool {\r\n\treturn f.started\r\n}", "func (c *minecraftConn) Closed() bool {\n\tselect {\n\tcase <-c.closed:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (w *writer) IsClosed() bool {\n\tw.mutex.Lock()\n\tdefer w.mutex.Unlock()\n\treturn w.isClosed\n}", "func (c *Conn) IsClosed() bool {\n\treturn c.pgConn.IsClosed()\n}", "func (subs *Subscription) IsClosed() bool {\n\tsubs.mutex.Lock()\n\tdefer subs.mutex.Unlock()\n\treturn subs.closed\n}", "func (d *Device) Connected() bool {\n\tdata := d.buf[:1]\n\td.bus.ReadRegister(uint8(d.Address), WHO_AM_I, data)\n\treturn data[0] == 0x6A\n}", "func (o *V0037JobProperties) HasOpenMode() bool {\n\tif o != nil && o.OpenMode != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (dev *Device) Close() bool {\n\tdev.Ok = false\n\tclose(dev.chQuit)\n\tif err := dev.port.Close(); err != nil {\n\t\tlog.Printf(\"close err: %s\", err)\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *out) Open() (err error) {\n\tif o.stream != nil {\n\t\treturn nil\n\t}\n\to.stream, err = portmidi.NewOutputStream(o.id, o.driver.buffersizeOut, 0)\n\tif err != nil {\n\t\to.stream = nil\n\t\treturn fmt.Errorf(\"can't open MIDI out port %v (%s): %v\", o.Number(), o, err)\n\t}\n\to.driver.opened = append(o.driver.opened, o)\n\treturn nil\n}", "func (p *Port) Open() {\n\tif !p.closed {\n\t\treturn\n\t}\n\n\tp.closed = false\n\n\tif p.buf != nil {\n\t\tp.buf = make(chan interface{}, CHANNEL_SIZE)\n\t}\n\n\tif p.sub != nil {\n\t\tp.sub.Open()\n\t}\n\n\tif len(p.subs) > 0 {\n\t\tfor _, sub := range p.subs {\n\t\t\tsub.Open()\n\t\t}\n\t}\n}", "func (p *Path) Closed() bool {\n\tvar cmd float64\n\tfor i := p.i0; i < len(p.d); {\n\t\tcmd = p.d[i]\n\t\ti += cmdLen(cmd)\n\t}\n\treturn cmd == closeCmd\n}", "func (frame *Frame) IsClosed() bool {\n\t// if frame.header != nil {\n\t// \tif !frame.header.closed {\n\t// \t\treturn false\n\t// \t}\n\t// }\n\t//\n\t// for _, line := range frame.activeLines {\n\t// \tif !line.closed {\n\t// \t\treturn false\n\t// \t}\n\t// }\n\t//\n\t// if frame.footer != nil {\n\t// \tif !frame.footer.closed {\n\t// \t\treturn false\n\t// \t}\n\t// }\n\t// return true\n\treturn frame.closed\n}" ]
[ "0.7955156", "0.71728", "0.710641", "0.71045107", "0.68884057", "0.6873219", "0.679175", "0.67898077", "0.6747613", "0.66009116", "0.6595629", "0.65706176", "0.651939", "0.63915795", "0.6387186", "0.6384081", "0.6356068", "0.63177913", "0.62845826", "0.6273986", "0.6177326", "0.61513823", "0.6150423", "0.6124868", "0.6111155", "0.60562444", "0.6048596", "0.6033662", "0.5997543", "0.59864914", "0.5979227", "0.5966734", "0.59611225", "0.5950272", "0.59476495", "0.5904142", "0.58587426", "0.5854526", "0.58492404", "0.5818005", "0.5798682", "0.5761718", "0.57524616", "0.5744041", "0.57329667", "0.57174903", "0.57065773", "0.56647635", "0.5644131", "0.5638379", "0.5625699", "0.56150705", "0.56075776", "0.56069803", "0.55899525", "0.55802894", "0.557051", "0.5565655", "0.55496913", "0.5544827", "0.55336535", "0.55311286", "0.5530772", "0.55299205", "0.55197567", "0.55175894", "0.5514207", "0.5514193", "0.55112386", "0.5503532", "0.5502262", "0.5492771", "0.54802716", "0.5474476", "0.54633665", "0.54462385", "0.5439338", "0.54281664", "0.541827", "0.54181516", "0.5415338", "0.5407164", "0.54002535", "0.5397197", "0.53937113", "0.53847617", "0.5384028", "0.53702676", "0.5366964", "0.53640467", "0.5358266", "0.53540474", "0.53365713", "0.533548", "0.5333218", "0.53192115", "0.53098756", "0.5303808", "0.53035885", "0.53025913" ]
0.6914696
4
String returns the name of the MIDI in port.
func (i *in) String() string { return i.name }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *LegoPort) String() string { return fmt.Sprint(portPrefix, p.id) }", "func (p *Port) Name() string {\n\tif p == nil {\n\t\treturn \"<nil>\"\n\t}\n\n\treturn p.String()\n}", "func port() string {\n\treturn strconv.Itoa(int(atomic.AddUint32(nport, 1)))\n}", "func (m *Machine) Port() string {\n\treturn m.port\n}", "func (p *PortInfo) String() string {\n\treturn p.Device\n}", "func (p Port) String() string {\n\tif p.PortName != \"\" {\n\t\treturn p.PortName\n\t} else if p.MinPort == p.MaxPort {\n\t\treturn strconv.FormatUint(uint64(p.MinPort), 10)\n\t} else {\n\t\treturn fmt.Sprintf(\"%d:%d\", p.MinPort, p.MaxPort)\n\t}\n}", "func (o NamedPortOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v NamedPort) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func getMIDIPort(ctx context.Context) (string, error) {\n\tconst emptyPort = \"\"\n\tout, err := testexec.CommandContext(ctx, \"/usr/bin/arecordmidi\", \"-l\").Output()\n\tif err != nil {\n\t\treturn emptyPort, errors.Wrap(err, \"couldn't start arecordmidi\")\n\t}\n\n\tconst MIDIClientName = \"Midi Through\"\n\t// The output of arecordmidi is assumed to be of the following format:\n\t//\n\t// Port Client name Port name\n\t// 14:0 Midi Through Midi Through Port-0\n\t//\n\t// So, we parse the output string and search for the port associated\n\t// with \"Midi Through\" assuming the above.\n\tre := regexp.MustCompile(`(\\d+:\\d+)\\s{2,}(.+)\\s{2,}`)\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tfields := re.FindStringSubmatch(line)\n\t\tif fields == nil {\n\t\t\tcontinue\n\t\t}\n\t\tclient := strings.TrimSpace(fields[2])\n\t\tif client == MIDIClientName {\n\t\t\t// Return the port.\n\t\t\treturn strings.TrimSpace(fields[1]), nil\n\t\t}\n\t}\n\treturn emptyPort, errors.Errorf(\"%q client not found\", MIDIClientName)\n}", "func (o GRPCHealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GRPCHealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func (i *Port) String() string {\n\treturn i.iface.String() + \" \" + i.in.String() + \" \" + i.out.String()\n}", "func (o TCPHealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TCPHealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func (c *FromCommand) Port() string {\n\treturn ExpandEnv(c.cmd.args[\"port\"])\n}", "func (c *Config) PortName() string {\n\tswitch c.Protocol {\n\tcase HTTP:\n\t\treturn \"http port\"\n\tcase HTTPS:\n\t\treturn \"https port\"\n\tcase Socks:\n\t\treturn \"socks port\"\n\t}\n\treturn \"\"\n}", "func (h ZmqBUF) Port() string {\n\treturn h.port\n}", "func (o GRPCHealthCheckPtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *GRPCHealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func PortToString(port int32) string {\n\tif port >= 0 {\n\t\treturn strconv.Itoa(int(port))\n\t}\n\tif port == EphemeralPort {\n\t\treturn \"*\"\n\t}\n\t// this should never happen since port is either zero/positive or -1 (ephemeral port), no other value is currently supported\n\treturn \"invalid\"\n}", "func (o *Port) Identifier() string {\n\n\treturn o.ID\n}", "func (c parser) GetPort() string {\n\treturn \"0x0000\"\n}", "func (b *BluetoothAdapter) Port() string { return b.portName }", "func (_Erc777 *Erc777Session) Name() (string, error) {\n\treturn _Erc777.Contract.Name(&_Erc777.CallOpts)\n}", "func (s *Socket) Port() string {\n\tif s.port == \"\" {\n\t\ts.port = strings.Split(path.Base(s.fname), \":\")[util.PORT]\n\t\treturn s.port\n\t}\n\treturn s.port\n}", "func (o SSLHealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SSLHealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func (c *portChecker) Name() string {\n\treturn portCheckerID\n}", "func (t *TransportPort) String() string {\n\treturn fmt.Sprintf(\"%s/%d\", t.Proto.String(), t.Port)\n}", "func (m *Mobile) Name() (string, error) {\n\tif !m.node.Started() {\n\t\treturn \"\", core.ErrStopped\n\t}\n\n\treturn m.node.Name(), nil\n}", "func (o TCPHealthCheckPtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TCPHealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func Port() string {\n\treturn strconv.Itoa(app.Port)\n}", "func (o ConnectedRegistryNotificationOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ConnectedRegistryNotification) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o BuildStrategySpecBuildStepsPortsOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsPorts) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o TCPHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TCPHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func channelName() (string, error) {\n\t// Get hostname\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to get hostname! Got: %s\", err)\n\t}\n\n\t// Get random bytes\n\tbytes := make([]byte, 4)\n\tn, err := io.ReadFull(rand.Reader, bytes)\n\tif n != len(bytes) || err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to read random bytes! Got: %s\", err)\n\t}\n\n\t// Convert to hex\n\th := hex.EncodeToString(bytes)\n\n\t// Return the new name\n\treturn host + \".\" + h, nil\n}", "func (c Channel) String() string {\n\treturn c.name\n}", "func (w TargetService) PortName() string {\n\treturn w.portName\n}", "func (o HTTP2HealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func midi_full_note_name(note byte) string {\n\ts_name := midi_note_name(note, \"#\")\n\tf_name := midi_note_name(note, \"b\")\n\tif s_name != f_name {\n\t\treturn fmt.Sprintf(\"%s/%s\", s_name, f_name)\n\t}\n\treturn s_name\n}", "func (exp *Exposed) Name() string {\n\n\tn := fmt.Sprintf(\"port%d\", exp.BindPort)\n\n\tif exp.IsDefault {\n\t\tn = fmt.Sprintf(\"%s.defaulthost\", n)\n\t} else {\n\t\tn = fmt.Sprintf(\"%s.host-%s\", n, exp.HostName)\n\t}\n\n\tif exp.PathBegins != \"\" && exp.PathBegins != \"/\" {\n\t\tn = fmt.Sprintf(\"%s.path-%s\", n, exp.PathBegins)\n\t}\n\n\treturn n\n}", "func (o HTTPHealthCheckTypeOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTPHealthCheckType) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func (d *Device) Name() string {\n\treturn string(d.Data.name[:])\n}", "func (mc *MeetingCanhui) String() string {\n\treturn mc.Name\n}", "func (p Packet) Name() (name string) {\n\t// todo: think of ways to make this not a compiled in hack\n\t// todo: collectd 4 uses different patterns for some plugins\n\t// https://collectd.org/wiki/index.php/V4_to_v5_migration_guide\n\tswitch p.Plugin {\n\tcase \"df\":\n\t\tname = fmt.Sprintf(\"df_%s_%s\", p.PluginInstance, p.TypeInstance)\n\tcase \"interface\":\n\t\tname = fmt.Sprintf(\"%s_%s\", p.Type, p.PluginInstance)\n\tcase \"load\":\n\t\tname = \"load\"\n\tcase \"memory\":\n\t\tname = fmt.Sprintf(\"memory_%s\", p.TypeInstance)\n\tdefault:\n\t\tname = fmt.Sprintf(\"%s_%s_%s_%s\", p.Plugin, p.PluginInstance, p.Type, p.TypeInstance)\n\t}\n\treturn name\n}", "func (Sequencer) Name() compiler.String {\n\treturn compiler.String{\n\t\tcompiler.English: `sequencer`,\n\t}\n}", "func (o HTTP2HealthCheckPtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTP2HealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (t *tmux) socketName() string { return fmt.Sprintf(\"%s@%s\", t.options.Profile, t.options.Story) }", "func (l *RelayDriver) Name() string { return l.name }", "func (o GRPCHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GRPCHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func (o ClusterBuildStrategySpecBuildStepsPortsOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsPorts) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (n *piName) Name() string {\n\treturn n.name\n}", "func (o SSLHealthCheckPtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *SSLHealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o DebugSessionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DebugSession) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (reca PhoneConnecter9)Name()string{\n\treturn reca.name\n}", "func (j *Joystick) Name() string {\n\treturn C.GoString(C.al_get_joystick_name((*C.ALLEGRO_JOYSTICK)(j)))\n}", "func (o HTTPSHealthCheckTypeOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckType) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func (m *OverDIDComm) Name() string {\n\treturn m.name\n}", "func (poc PortOpenCheck) Name() string {\n\tif poc.label != \"\" {\n\t\treturn poc.label\n\t}\n\treturn fmt.Sprintf(\"Port-%d\", poc.port)\n}", "func (o HTTPHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func (o HTTP2HealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func (o MetricIdentifierPatchOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MetricIdentifierPatch) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o HTTPHealthCheckTypePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTPHealthCheckType) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o SSLHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SSLHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func GetPort() string {\n\treturn fmt.Sprintf(\":%d\", viper.GetInt(port))\n}", "func (c *Config) GetPortString() string {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\treturn strconv.Itoa(c.Port)\n}", "func (p *PortForwarder) Port() string {\n\treturn p.tunnel.PortMap()\n}", "func (s *session) Name() string {\n\treturn s.name\n}", "func (o GRPCHealthCheckResponsePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *GRPCHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (p *Protocol) Name() string {\n\treturn protocolID\n}", "func (p *Protocol) Name() string {\n\treturn protocolID\n}", "func (p *Protocol) Name() string {\n\treturn protocolID\n}", "func (config *Configuration) PIDFileName() string {\n name := \"~/.run/\" + config.ServiceName + \".pid\"\n name = Util.AbsolutePath(name)\n return name\n}", "func (o TCPHealthCheckResponsePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TCPHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (e *EndComponent) Name() string {\n\treturn \"name\"\n}", "func (e MessageType) String() string {\n name, _ := messageTypeMap[int32(e)]\n return name\n}", "func (h *MPU6050Driver) Name() string { return h.name }", "func (p ProcessID) String() string {\n\treturn fmt.Sprintf(\"<%s:%s>\", p.Name, p.Node)\n}", "func (this *channelMeterStruct) name() string {\n\tname := this.channelName\n\treturn name\n}", "func (o HTTPSHealthCheckTypePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTPSHealthCheckType) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (s SysFsDevice) name() (name string, err error) {\n\treturn s.Read(\"name\")\n}", "func (o SignalingChannelOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *SignalingChannel) pulumi.StringPtrOutput { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (p portCheck) Name() string {\n\treturn p.name\n}", "func (o HTTP2HealthCheckResponsePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTP2HealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (b *ModbusBaseServer) String() string {\n\treturn fmt.Sprintf(\"%s:%s\", b.Host, b.Port)\n}", "func (o InstanceGroupNamedPortOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *InstanceGroupNamedPort) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (dlmg RawMessageHandler) Name() string {\n\treturn nameFromFunc(dlmg)\n}", "func GetPort(connID string) string {\n\tid, err := strconv.ParseUint(connID, 16, 64)\n\tif err != nil {\n\t\tid = 0\n\t}\n\treturn strconv.FormatUint(BasePort+id, 10)\n}", "func (_this *URL) Port() string {\n\tvar ret string\n\tvalue := _this.Value_JS.Get(\"port\")\n\tret = (value).String()\n\treturn ret\n}", "func (c *Connector) Name() string {\n\tc.cmu.Lock()\n\tdefer c.cmu.Unlock()\n\treturn fmt.Sprintf(\"%s:%s\", c.kind, c.id)\n}", "func (o HTTPHealthCheckResponsePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTPHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o HTTPSHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func (_Wmatic *WmaticSession) Name() (string, error) {\n\treturn _Wmatic.Contract.Name(&_Wmatic.CallOpts)\n}", "func (i Uint64) Name() string {\n\treturn string(i)\n}", "func (m *Mgr) Name() string {\n\treturn \"collection configuration history listener\"\n}", "func (i Uint8) Name() string {\n\treturn string(i)\n}", "func (o SSLHealthCheckResponsePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *SSLHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (r OS) Name() string {\n\treturn fmt.Sprintf(\"%v %v\", r.ID, r.Version)\n}", "func (c *MonitorComponent) Name() string {\n\treturn ComponentPrometheus\n}", "func (server *Server) Port() string {\n\treturn server.port\n}", "func (e E_OpenconfigQos_Qos_Classifiers_Classifier_Terms_Term_Conditions_Transport_Config_DestinationPort) String() string {\n\treturn ygot.EnumLogString(e, int64(e), \"E_OpenconfigQos_Qos_Classifiers_Classifier_Terms_Term_Conditions_Transport_Config_DestinationPort\")\n}", "func (e *EDNS) Name() string { return name }", "func (o NamedPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v NamedPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func mirrorPort() string {\n\treturn \"8123\"\n}", "func (host Host) String() string {\n\tif !host.IsPortSet {\n\t\treturn host.Name\n\t}\n\n\treturn net.JoinHostPort(host.Name, host.Port.String())\n}", "func (p *grpcPort) nameForLog() string {\n\treturn fmt.Sprintf(\"grpc://%s [grpc]\", p.listener.Addr())\n}" ]
[ "0.6403726", "0.621417", "0.5972801", "0.59483373", "0.5890254", "0.5816692", "0.572876", "0.56816727", "0.56746775", "0.5657657", "0.56104225", "0.5601564", "0.560065", "0.55906856", "0.5590527", "0.55653834", "0.55536354", "0.55326116", "0.55316645", "0.5511247", "0.55013496", "0.5499417", "0.5496682", "0.54749435", "0.54611874", "0.54259986", "0.54201686", "0.5419527", "0.5417088", "0.5411437", "0.5404419", "0.53920877", "0.5387304", "0.5379005", "0.5360029", "0.53543085", "0.5349365", "0.5345364", "0.53414166", "0.53399765", "0.5315223", "0.53103524", "0.5299479", "0.5295609", "0.52922416", "0.5290484", "0.528796", "0.52874845", "0.52772987", "0.527647", "0.5274803", "0.5254726", "0.52500224", "0.52446693", "0.52387744", "0.5234481", "0.5230212", "0.52291816", "0.52289104", "0.52183044", "0.52165663", "0.5211464", "0.52078605", "0.5197282", "0.5192158", "0.5192158", "0.5192158", "0.5189135", "0.51678866", "0.5163461", "0.51494676", "0.5126801", "0.5126199", "0.51238066", "0.5121485", "0.5119326", "0.51142675", "0.51127774", "0.5108598", "0.5105754", "0.510448", "0.50903326", "0.5071077", "0.5058347", "0.50561595", "0.5051833", "0.5051632", "0.50509155", "0.50502366", "0.5045534", "0.5039864", "0.50325936", "0.50319505", "0.50218165", "0.50198084", "0.50159514", "0.50154674", "0.5015176", "0.5012884", "0.5011685", "0.5011362" ]
0.0
-1
Underlying returns the underlying driver. Here returns the js midi port.
func (i *in) Underlying() interface{} { return i.jsport }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getMIDIPort(ctx context.Context) (string, error) {\n\tconst emptyPort = \"\"\n\tout, err := testexec.CommandContext(ctx, \"/usr/bin/arecordmidi\", \"-l\").Output()\n\tif err != nil {\n\t\treturn emptyPort, errors.Wrap(err, \"couldn't start arecordmidi\")\n\t}\n\n\tconst MIDIClientName = \"Midi Through\"\n\t// The output of arecordmidi is assumed to be of the following format:\n\t//\n\t// Port Client name Port name\n\t// 14:0 Midi Through Midi Through Port-0\n\t//\n\t// So, we parse the output string and search for the port associated\n\t// with \"Midi Through\" assuming the above.\n\tre := regexp.MustCompile(`(\\d+:\\d+)\\s{2,}(.+)\\s{2,}`)\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tfields := re.FindStringSubmatch(line)\n\t\tif fields == nil {\n\t\t\tcontinue\n\t\t}\n\t\tclient := strings.TrimSpace(fields[2])\n\t\tif client == MIDIClientName {\n\t\t\t// Return the port.\n\t\t\treturn strings.TrimSpace(fields[1]), nil\n\t\t}\n\t}\n\treturn emptyPort, errors.Errorf(\"%q client not found\", MIDIClientName)\n}", "func (remote *SerialRemote) Channel() chan []byte {\n\treturn remote.channel\n}", "func (o *os) GetCurrentVideoDriver() _OSVideoDriver {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetCurrentVideoDriver()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_current_video_driver\")\n\n\t// Call the parent method.\n\t// enum._OS::VideoDriver\n\tretPtr := gdnative.NewEmptyInt()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewIntFromPointer(retPtr)\n\treturn _OSVideoDriver(ret)\n}", "func (socket *MockSocket) Browser() *socket.BrowserProtocol {\n\treturn socket.browser\n}", "func (c *nrConnector) Driver() driver.Driver {\n\treturn c.driver\n}", "func (c connector) Driver() driver.Driver { return c.drv }", "func (c connector) Driver() driver.Driver { return c.drv }", "func (d *RedisDriver) GetNativeConnector() interface{} {\n\treturn d.connector\n}", "func main() {\n\tdrv, err := driver.New()\n\tu.CheckErr(err)\n\n\t// make sure to close all open ports at the end\n\tdefer drv.Close()\n\n\tins, err := drv.Ins()\n\tu.CheckErr(err)\n\n\touts, err := drv.Outs()\n\tu.CheckErr(err)\n\n\tif len(ins) == 0 || len(outs) == 0 {\n\t\tpanic(\"no such midi device, on mac, you can use Audio Midi Setup > Midi Studio > Enable IAC Driver\")\n\t}\n\n\tlog.Println(ins)\n\tlog.Println(outs)\n\n\tin, out := ins[0], outs[0]\n\n\tu.CheckErr(in.Open())\n\tu.CheckErr(out.Open())\n\n\twr := writer.New(out)\n\n\t// listen for MIDI\n\t// go mid.NewReader().ReadFrom(in)\n\n\tserver, err := socketio.NewServer(nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tserver.OnConnect(\"/\", func(s socketio.Conn) error {\n\t\ts.SetContext(\"\")\n\t\tlog.Println(\"connected:\", s.ID())\n\n\t\tif err := writer.NoteOn(wr, 61, 100); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t\tif err := writer.NoteOff(wr, 61); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\ttype drawingMsg struct {\n\t\tColor string\n\t\tX0 float64\n\t\tX1 float64\n\t\tY0 float64\n\t\tY1 float64\n\t}\n\tserver.OnEvent(\"/\", \"drawing\", func(s socketio.Conn, msg drawingMsg) {\n\t\tif err := func() error {\n\t\t\t//s.Emit(\"reply\", \"have \"+msg)\n\t\t\tnote := uint8(msg.X1 * 127)\n\t\t\tif note < 20 {\n\t\t\t\tnote = 20\n\t\t\t}\n\t\t\tvelocity := uint8(msg.Y1 * 127)\n\t\t\tif velocity < 20 {\n\t\t\t\tvelocity = 20\n\t\t\t}\n\t\t\tlog.Println(\"note:\", note, \"velocity:\", velocity, \"input:\", msg)\n\t\t\tif err := writer.NoteOn(wr, note, velocity); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(time.Nanosecond * 1000000)\n\t\t\tif err := writer.NoteOff(wr, note); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\tlog.Printf(\"error on drawing event: %v\", err)\n\t\t}\n\t})\n\tserver.OnError(\"/\", func(e error) {\n\t\tlog.Println(\"error:\", e)\n\n\t\tif err := func() error {\n\t\t\tif err := writer.NoteOn(wr, 62, 100); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t\tif err := writer.NoteOff(wr, 62); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\tlog.Printf(\"socket.io error event: %v\", err)\n\t\t}\n\t})\n\tserver.OnDisconnect(\"/\", func(s socketio.Conn, msg string) {\n\t\tif err := func() error {\n\t\t\tlog.Println(\"closed\", msg)\n\n\t\t\tif err := writer.NoteOn(wr, 63, 100); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t\tif err := writer.NoteOff(wr, 63); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\tlog.Printf(\"error on disconnect event: %v\", err)\n\t\t}\n\t})\n\tgo func() {\n\t\tif err := server.Serve(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tdefer server.Close()\n\n\thttp.Handle(\"/socket.io/\", server)\n\thttp.Handle(\"/\", http.FileServer(http.Dir(\"./asset\")))\n\tlog.Println(\"Serving at localhost:8000...\")\n\tlog.Fatal(http.ListenAndServe(\":8000\", nil))\n}", "func Interface(ctx context.Context, i, ch uint8) (gate, key, vel control.CV, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"midi.Interface: %v\", err)\n\t\t}\n\t}()\n\n\tdrv, err := portmididrv.New()\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tins, err := drv.Ins()\n\tif len(ins) <= int(i) {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tin := ins[i]\n\tif err := in.Open(); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\trd := reader.New(reader.NoLogger())\n\n\tgateCh := make(chan modular.V)\n\tkeyCh := make(chan modular.V)\n\tvelCh := make(chan modular.V)\n\n\trd.Channel.NoteOn = func(p *reader.Position, channel, key, vel uint8) {\n\t\tif channel != ch {\n\t\t\treturn\n\t\t}\n\t\tgateCh <- 1\n\t\tkeyCh <- modular.V(key)\n\t\tvelCh <- modular.V(vel) / 127\n\t}\n\n\trd.Channel.NoteOff = func(p *reader.Position, channel, key, vel uint8) {\n\t\tif channel != ch {\n\t\t\treturn\n\t\t}\n\t\tgateCh <- 0\n\t\tvelCh <- 0\n\t}\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(gateCh)\n\t\t\tclose(keyCh)\n\t\t\tclose(velCh)\n\t\t}()\n\t\tif err := rd.ListenTo(in); err != nil {\n\t\t\tpanic(fmt.Errorf(\"midi.Interface: %v\", err))\n\t\t}\n\t}()\n\n\treturn gateCh, keyCh, velCh, nil\n}", "func (i *in) Underlying() interface{} {\n\treturn i.midiIn\n}", "func (i *in) Open() (err error) {\n\tif i.IsOpen() {\n\t\treturn nil\n\t}\n\n\t//i.Lock()\n\n\ti.midiIn, err = rtmidi.NewMIDIInDefault()\n\tif err != nil {\n\t\ti.midiIn = nil\n\t\t//i.Unlock()\n\t\treturn fmt.Errorf(\"can't open default MIDI in: %v\", err)\n\t}\n\n\terr = i.midiIn.OpenPort(i.number, \"\")\n\t//i.Unlock()\n\n\tif err != nil {\n\t\ti.Close()\n\t\treturn fmt.Errorf(\"can't open MIDI in port %v (%s): %v\", i.number, i, err)\n\t}\n\n\t//i.driver.Lock()\n\t//i.midiIn.IgnoreTypes(i.driver.ignoreSysex, i.driver.ignoreTimeCode, i.driver.ignoreActiveSense)\n\ti.driver.opened = append(i.driver.opened, i)\n\t//i.driver.Unlock()\n\n\treturn nil\n}", "func (m *TeamworkActivePeripherals) GetCommunicationSpeaker()(TeamworkPeripheralable) {\n val, err := m.GetBackingStore().Get(\"communicationSpeaker\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(TeamworkPeripheralable)\n }\n return nil\n}", "func (*Radio) Device() string {\n\treturn spiDevice\n}", "func (o *os) GetConnectedMidiInputs() gdnative.PoolStringArray {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetConnectedMidiInputs()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_connected_midi_inputs\")\n\n\t// Call the parent method.\n\t// PoolStringArray\n\tretPtr := gdnative.NewEmptyPoolStringArray()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewPoolStringArrayFromPointer(retPtr)\n\treturn ret\n}", "func (socket *MockSocket) Emulation() *socket.EmulationProtocol {\n\treturn socket.emulation\n}", "func (o *os) OpenMidiInputs() {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.OpenMidiInputs()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"open_midi_inputs\")\n\n\t// Call the parent method.\n\t// void\n\tretPtr := gdnative.NewEmptyVoid()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n}", "func openOut(d Driver, number int, name string) (out Out, err error) {\n\touts, err := d.Outs()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't find MIDI output ports: %v\", err)\n\t}\n\n\tif number >= 0 {\n\t\tfor _, port := range outs {\n\t\t\tif number == port.Number() {\n\t\t\t\tout = port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif out == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI output port %v\", number)\n\t\t}\n\t} else {\n\t\tif name != \"\" {\n\t\t\tfor _, port := range outs {\n\t\t\t\tif strings.Contains(port.String(), name) {\n\t\t\t\t\tout = port\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif out == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI output port %v\", name)\n\t\t}\n\t}\n\n\t// should not happen here, since we already returned above\n\tif out == nil {\n\t\tpanic(\"unreachable\")\n\t}\n\n\terr = out.Open()\n\treturn\n}", "func (o IopingSpecVolumeVolumeSourceFlexVolumePtrOutput) Driver() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceFlexVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Driver\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *out) Open() (err error) {\n\tif o.stream != nil {\n\t\treturn nil\n\t}\n\to.stream, err = portmidi.NewOutputStream(o.id, o.driver.buffersizeOut, 0)\n\tif err != nil {\n\t\to.stream = nil\n\t\treturn fmt.Errorf(\"can't open MIDI out port %v (%s): %v\", o.Number(), o, err)\n\t}\n\to.driver.opened = append(o.driver.opened, o)\n\treturn nil\n}", "func (socket *MockSocket) DOM() *socket.DOMProtocol {\n\treturn socket.dom\n}", "func (r *Radio) Hardware() *radio.Hardware {\n\treturn r.hw\n}", "func (e *Engine) DefaultOutputDevice() *portaudio.DeviceInfo {\n\tif !e.initialized {\n\t\treturn nil\n\t}\n\tif defaultOutputDeviceInfo, err := portaudio.DefaultOutputDevice(); err != nil {\n\t\treturn nil\n\t} else {\n\t\treturn defaultOutputDeviceInfo\n\t}\n}", "func (res channelBase) Channel() *types.Channel {\n\treturn res.channel\n}", "func (h ZmqBUF) Port() string {\n\treturn h.port\n}", "func (e *Engine) DefaultInputDevice() *portaudio.DeviceInfo {\n\tif !e.initialized {\n\t\treturn nil\n\t}\n\tif defaultInputDeviceInfo, err := portaudio.DefaultInputDevice(); err != nil {\n\t\treturn nil\n\t} else {\n\t\treturn defaultInputDeviceInfo\n\t}\n}", "func (d Display) GetDriver() string {\n\treturn C.GoString(C.caca_get_display_driver(d.Dp))\n}", "func (s TiFlashSpec) GetMainPort() int {\n\treturn s.TCPPort\n}", "func (mf *MethodFrame) Channel() uint16 { return mf.ChannelID }", "func (m *UserSimulationEventInfo) GetBrowser()(*string) {\n return m.browser\n}", "func (h *Host) Port() uint16 {\n}", "func (o IntegrationSlackOutput) NoteChannel() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IntegrationSlack) pulumi.StringPtrOutput { return v.NoteChannel }).(pulumi.StringPtrOutput)\n}", "func (p *HostedProgramInfo) Channel() io.ReadWriteCloser {\n\treturn p.TaoChannel\n}", "func (o ListenerOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v Listener) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (c *Connector) Driver() driver.Driver {\n\treturn &Driver{}\n}", "func GetHardware() Hardware { return hardware }", "func (m *PrinterDefaults) GetDuplexMode()(*PrintDuplexMode) {\n val, err := m.GetBackingStore().Get(\"duplexMode\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*PrintDuplexMode)\n }\n return nil\n}", "func getPlayer(outputFormat string) player.NotePlayer {\n\tswitch outputFormat {\n\tcase \"Audio\":\n\t\treturn player.NewWavNotePlayer()\n\tcase \"Text\":\n\t\treturn player.NewTextNotePlayer()\n\tdefault:\n\t\treturn player.NewWavNotePlayer()\n\t}\n\n}", "func (m *Media) GetCallerDevice()(DeviceInfoable) {\n return m.callerDevice\n}", "func (o FioSpecVolumeVolumeSourceFlexVolumePtrOutput) Driver() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceFlexVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Driver\n\t}).(pulumi.StringPtrOutput)\n}", "func (m *MetricsExtracor) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (p *Port) Delegate() *Delegate {\n\treturn p.delegate\n}", "func (m *Monitor) GetDesktopMode() VideoMode {\n\treturn m.internal.getDesktopMode()\n}", "func (o PgbenchSpecPostgresPtrOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *PgbenchSpecPostgres) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Port\n\t}).(pulumi.IntPtrOutput)\n}", "func (socket *MockSocket) Target() *socket.TargetProtocol {\n\treturn socket.target\n}", "func (d *portworx) GetDriver() (*v1.StorageCluster, error) {\n\t// TODO: Need to implement it for Daemonset deployment as well, right now its only for StorageCluster\n\tstcList, err := pxOperator.ListStorageClusters(d.namespace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed get StorageCluster list from namespace [%s], Err: %v\", d.namespace, err)\n\t}\n\n\tstc, err := pxOperator.GetStorageCluster(stcList.Items[0].Name, stcList.Items[0].Namespace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get StorageCluster [%s] from namespace [%s], Err: %v\", stcList.Items[0].Name, stcList.Items[0].Namespace, err.Error())\n\t}\n\n\treturn stc, nil\n}", "func (s serverConfigV14) GetBrowser() string {\n\tserverConfigMu.RLock()\n\tdefer serverConfigMu.RUnlock()\n\n\treturn s.Browser\n}", "func (p *FlutterGoMidiPlugin) InitPlugin(messenger plugin.BinaryMessenger) error {\n\tdrv, err = driver.New()\n\n\tchannel := plugin.NewMethodChannel(messenger, channelName, plugin.StandardMethodCodec{})\n\n\teventChannel := plugin.NewEventChannel(messenger, channelEventName, plugin.StandardMethodCodec{})\n\teventChannel.Handle(p)\n\n\tchannel.HandleFunc(\"getPlatformVersion\", p.handlePlatformVersion)\n\tchannel.HandleFunc(\"outs\", p.outs)\n\tchannel.HandleFunc(\"ins\", p.ins)\n\tchannel.HandleFunc(\"openInPort\", p.openInPort)\n\tchannel.HandleFunc(\"openOutPort\", p.openOutPort)\n\tchannel.HandleFunc(\"closeInPort\", p.closeInPort)\n\tchannel.HandleFunc(\"closeOutPort\", p.closeOutPort)\n\tchannel.HandleFunc(\"listen\", p.listen)\n\tchannel.HandleFunc(\"noteOn\", p.noteOn)\n\tchannel.HandleFunc(\"noteOff\", p.noteOff)\n\tchannel.HandleFunc(\"lightOn\", p.lightOn)\n\tchannel.HandleFunc(\"lightOff\", p.lightOff)\n\tchannel.HandleFunc(\"lightOffAll\", p.lightOffAll)\n\tchannel.HandleFunc(\"initPopPiano\", p.initPopPiano)\n\n\trd = reader.New(\n\t\t// reader.Each(func(_ *reader.Position, msg midi.Message) {\n\t\t// \tfmt.Println(\"Each\", msg.String())\n\t\t// }),\n\n\t\treader.NoteOn(func(p *reader.Position, channel, key, velocity uint8) {\n\t\t\t// fmt.Println(\"NoteOn\", channel, key, velocity)\n\t\t\tevent := map[interface{}]interface{}{\n\t\t\t\t\"event\": \"NoteOn\",\n\t\t\t\t\"channel\": int32(channel),\n\t\t\t\t\"key\": int32(key),\n\t\t\t\t\"velocity\": int32(velocity),\n\t\t\t}\n\t\t\teventSink.Success(event)\n\t\t}),\n\t\treader.NoteOff(func(p *reader.Position, channel, key, velocity uint8) {\n\t\t\t// fmt.Println(\"NoteOn\", channel, key, velocity)\n\t\t\tevent := map[interface{}]interface{}{\n\t\t\t\t\"event\": \"NoteOff\",\n\t\t\t\t\"channel\": int32(channel),\n\t\t\t\t\"key\": int32(key),\n\t\t\t\t\"velocity\": int32(velocity),\n\t\t\t}\n\t\t\teventSink.Success(event)\n\t\t}),\n\t)\n\n\treturn nil\n}", "func (h *Handler) getPlatform(m *v1alpha1.PerconaServerMongoDB) v1alpha1.Platform {\n\tif m.Spec.Platform != nil {\n\t\treturn *m.Spec.Platform\n\t}\n\tif h.serverVersion != nil {\n\t\treturn h.serverVersion.Platform\n\t}\n\treturn v1alpha1.PlatformKubernetes\n}", "func (self Switch) ToNative() unsafe.Pointer {\n\treturn unsafe.Pointer(self.object)\n}", "func (o GroupExposedPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupExposedPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o IopingSpecVolumeVolumeSourceCsiPtrOutput) Driver() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceCsi) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Driver\n\t}).(pulumi.StringPtrOutput)\n}", "func (c *Client) Channel() string {\n\treturn c.channel\n}", "func (s Song) ToMIDI() midi.MIDI {\n\tvar ticks uint16 = 64 //should be inputed or calculated\n\tm := midi.MIDI{\n\t\tTracks: []midi.Track{},\n\t}\n\n\tvar ch uint8 = 0\n\tfor name, stave := range s.Instruments {\n\t\tt := midi.Track{\n\t\t\tmidi.MetaChannelPrefix(ch),\n\t\t\tmidi.MetaInstrument(name),\n\t\t}\n\t\tt = append(t, UpdateTempo(s.Tempo).ToMIDI(0, 0, 0)...)\n\t\tt = append(t, UpdateMeter(s.Meter).ToMIDI(0, 0, 0)...)\n\t\tt = append(t, UpdateKey(s.Key, s.Maj).ToMIDI(0, 0, 0)...)\n\n\t\tfor stave.HasNext() {\n\t\t\tt = append(t, stave.Next().ToMIDI(ticks, ch, 40)...)\n\t\t}\n\n\t\tt = append(t, midi.MetaEndTrack())\n\n\t\tm.Tracks = append(m.Tracks, t)\n\t\tch++\n\t}\n\n\tm.Header = midi.Header{\n\t\tTracks: uint16(len(m.Tracks)),\n\t\tFormat: midi.Single,\n\t\tDivision: ticks,\n\t}\n\treturn m\n}", "func (v *Switch) native() *C.GtkSwitch {\n\tif v == nil {\n\t\treturn nil\n\t}\n\tptr := unsafe.Pointer(v.Object.Native())\n\treturn C.toGtkSwitch(ptr)\n}", "func (o GRPCHealthCheckPtrOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *GRPCHealthCheck) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Port\n\t}).(pulumi.IntPtrOutput)\n}", "func (v *Volume) Driver() string {\n\treturn v.config.Driver\n}", "func (s *PhysicalSensor) GetPort() byte {\n\treturn s.port\n}", "func (device *SilentStepperBrick) GetMotorCurrent() (current uint16, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionGetMotorCurrent), buf.Bytes())\n\tif err != nil {\n\t\treturn current, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 10 {\n\t\t\treturn current, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 10)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn current, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &current)\n\n\t}\n\n\treturn current, nil\n}", "func (s *TiFlashSpec) GetMainPort() int {\n\treturn s.TCPPort\n}", "func (o HybridConnectionOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *HybridConnection) pulumi.IntOutput { return v.Port }).(pulumi.IntOutput)\n}", "func (m *MetricsHolder) Channel() chan<- interface{} {\n\treturn m.channel\n}", "func (self *SinglePad) Connect(rawPad interface{}) {\n self.Object.Call(\"connect\", rawPad)\n}", "func (socket *MockSocket) Runtime() *socket.RuntimeProtocol {\n\treturn socket.runtime\n}", "func (o IopingSpecVolumeVolumeSourceFlexVolumeOutput) Driver() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceFlexVolume) string { return v.Driver }).(pulumi.StringOutput)\n}", "func (m *Machine) Port() string {\n\treturn m.port\n}", "func (ticker *PausableTicker) GetChannel() <-chan time.Time {\n\treturn ticker.channel\n}", "func (o NamedPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v NamedPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (a *MockApp) Driver() fyne.Driver {\n\targs := a.Called()\n\treturn args.Get(0).(fyne.Driver)\n}", "func (v *SettingsBackend) native() *C.GSettingsBackend {\n\tif v == nil || v.GObject == nil {\n\t\treturn nil\n\t}\n\treturn C.toGSettingsBackend(unsafe.Pointer(v.GObject))\n}", "func openIn(d Driver, number int, name string) (in In, err error) {\n\tins, err := d.Ins()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't find MIDI input ports: %v\", err)\n\t}\n\n\tif number >= 0 {\n\t\tfor _, port := range ins {\n\t\t\tif number == port.Number() {\n\t\t\t\tin = port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif in == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI input port %v\", number)\n\t\t}\n\t} else {\n\t\tif name != \"\" {\n\t\t\tfor _, port := range ins {\n\t\t\t\tif strings.Contains(port.String(), name) {\n\t\t\t\t\tin = port\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif in == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI input port %v\", name)\n\t\t}\n\t}\n\n\t// should not happen here, since we already returned above\n\tif in == nil {\n\t\tpanic(\"unreachable\")\n\t}\n\n\terr = in.Open()\n\treturn\n}", "func (o FioSpecVolumeVolumeSourceCsiPtrOutput) Driver() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCsi) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Driver\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ElastigroupIntegrationNomadPtrOutput) MasterPort() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ElastigroupIntegrationNomad) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.MasterPort\n\t}).(pulumi.IntPtrOutput)\n}", "func (socket *MockSocket) Debugger() *socket.DebuggerProtocol {\n\treturn socket.debugger\n}", "func (m *TeamworkActivePeripherals) GetSpeaker()(TeamworkPeripheralable) {\n val, err := m.GetBackingStore().Get(\"speaker\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(TeamworkPeripheralable)\n }\n return nil\n}", "func (b *BluetoothAdapter) Port() string { return b.portName }", "func (f *Factor) driver() mfaDriver {\n\tif f.drv == nil {\n\t\tf.drv = newDriver(f)\n\t}\n\treturn f.drv\n}", "func (s *Server) GetPort() (port uint16) {\n\tport = defaultIrcPort\n\tif s.Port != 0 {\n\t\tport = s.Port\n\t} else if s.parent != nil && s.parent.Global.Port != 0 {\n\t\tport = s.parent.Global.Port\n\t}\n\treturn\n}", "func NewWriter(to midi.Writer) io.Writer {\n\tp := &iowriter{}\n\tp.to = to\n\tp.from = midireader.New(&p.bf, p.writeRealtime)\n\treturn p\n}", "func NewWriter(to midi.Writer) io.Writer {\n\tp := &iowriter{}\n\tp.to = to\n\tp.from = midireader.New(&p.bf, p.writeRealtime)\n\treturn p\n}", "func (t Tie) ToMIDI(ticks uint16, ch, vel uint8) []midi.Event {\n\tout := t[0].ToMIDI(ticks, ch, vel)\n\n\tout[len(out)/2].SetDelta(midi.VLQ(t.TickDuration(ticks)))\n\treturn out\n}", "func (m *IosiPadOSWebClip) GetUseManagedBrowser()(*bool) {\n val, err := m.GetBackingStore().Get(\"useManagedBrowser\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}", "func (self *holder) GetPort() int {\n\treturn self.port\n}", "func (o GRPCHealthCheckOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GRPCHealthCheck) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func NewMIDIOutDefault() (MIDIOut, error) {\n\tout := C.rtmidi_out_create_default()\n\tif !out.ok {\n\t\tdefer C.rtmidi_out_free(out)\n\t\treturn nil, errors.New(C.GoString(out.msg))\n\t}\n\treturn &midiOut{out: out, midi: midi{midi: C.RtMidiPtr(out)}}, nil\n}", "func (_Upgradeable *UpgradeableSession) Implementation() (common.Address, error) {\n\treturn _Upgradeable.Contract.Implementation(&_Upgradeable.CallOpts)\n}", "func (c ConnectionAdapter) Channel() (Channel, error) {\n\treturn c.Connection.Channel()\n}", "func (conn *Connection) Channel() chan []byte {\n\treturn conn.channel\n}", "func mirrorPort() string {\n\treturn \"8123\"\n}", "func (p *Pydio) Name() string {\n\treturn pydioBackend\n}", "func Port(port uint16) *types.Port {\n\treturn &types.Port{Port: port}\n}", "func (o *AudioStreamPlayer) GetBus() gdnative.String {\n\t//log.Println(\"Calling AudioStreamPlayer.GetBus()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"AudioStreamPlayer\", \"get_bus\")\n\n\t// Call the parent method.\n\t// String\n\tretPtr := gdnative.NewEmptyString()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewStringFromPointer(retPtr)\n\treturn ret\n}", "func serialDefault() (device string, speed int) {\n\treturn defaultSerialDefault()\n}", "func (c *SpannerConnector) Driver() driver.Driver {\n\treturn &SpannerDriver{}\n}", "func (p Pin) Port() *Port {\n\treturn (*Port)(unsafe.Pointer(p.h &^ 0xf))\n}", "func (o FioSpecVolumeVolumeSourceFlexVolumeOutput) Driver() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceFlexVolume) string { return v.Driver }).(pulumi.StringOutput)\n}", "func New() messageprocessors.PayloadEncodeDecoder {\n\treturn &host{\n\t\tengine: js.New(scripting.DefaultOptions),\n\t}\n}", "func New() messageprocessors.PayloadEncodeDecoder {\n\treturn &host{\n\t\tengine: js.New(scripting.DefaultOptions),\n\t}\n}", "func (o ListenerPtrOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *Listener) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Port\n\t}).(pulumi.IntPtrOutput)\n}", "func (socket *MockSocket) Console() *socket.ConsoleProtocol {\n\treturn socket.console\n}", "func (self Assistant) ToNative() unsafe.Pointer {\n\treturn unsafe.Pointer(self.object)\n}" ]
[ "0.5289841", "0.5230791", "0.51282376", "0.5072998", "0.5056589", "0.504264", "0.504264", "0.5040927", "0.5034585", "0.50150853", "0.4984029", "0.48842004", "0.48419452", "0.48164445", "0.4814614", "0.47880062", "0.47832578", "0.46753252", "0.46344787", "0.4626876", "0.46245325", "0.4597473", "0.4590366", "0.45741507", "0.4538774", "0.45241094", "0.4520923", "0.45191035", "0.45144206", "0.4510043", "0.45092836", "0.45060822", "0.45021737", "0.44985208", "0.4498282", "0.4493469", "0.44923505", "0.44790426", "0.4478851", "0.4471104", "0.44691584", "0.4461157", "0.44534433", "0.44519177", "0.44515064", "0.44202864", "0.44193262", "0.44177148", "0.44171226", "0.4410756", "0.44079497", "0.43992963", "0.43958622", "0.43862158", "0.4372005", "0.43596217", "0.43592992", "0.43495706", "0.4349537", "0.43474784", "0.4343629", "0.4334088", "0.43332666", "0.43194333", "0.43152794", "0.43102443", "0.43089142", "0.43011096", "0.42977557", "0.42946362", "0.42938578", "0.42795673", "0.4266192", "0.42595917", "0.4253754", "0.42518815", "0.42479497", "0.4240431", "0.4232073", "0.4232073", "0.42298046", "0.42277277", "0.42276904", "0.42189318", "0.4217004", "0.4208249", "0.42005023", "0.4194911", "0.41901323", "0.41879234", "0.4184253", "0.4184041", "0.4183209", "0.41831225", "0.41817936", "0.4179975", "0.4175639", "0.4175639", "0.4166191", "0.41608825", "0.41597435" ]
0.0
-1
Number returns the number of the MIDI in port. Note that with rtmidi, out and in ports are counted separately. That means there might exists out ports and an in ports that share the same number.
func (i *in) Number() int { return i.number }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Port) Num() int {\n\treturn int(portnum(p))\n}", "func (o *out) Number() int {\n\treturn int(o.id)\n}", "func nr(resName string) int {\n\th := fnv.New32a()\n\th.Write([]byte(resName))\n\tn := h.Sum32()\n\n\tnr := n % (defs.DrbdPortMax - defs.DrbdPortMin)\n\n\treturn int(nr)\n}", "func (p Port) Int() int {\n\treturn int(p)\n}", "func SignalToNumber(s os.Signal) int {\n\treturn int(s.(syscall.Signal))\n}", "func openOut(d Driver, number int, name string) (out Out, err error) {\n\touts, err := d.Outs()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't find MIDI output ports: %v\", err)\n\t}\n\n\tif number >= 0 {\n\t\tfor _, port := range outs {\n\t\t\tif number == port.Number() {\n\t\t\t\tout = port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif out == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI output port %v\", number)\n\t\t}\n\t} else {\n\t\tif name != \"\" {\n\t\t\tfor _, port := range outs {\n\t\t\t\tif strings.Contains(port.String(), name) {\n\t\t\t\t\tout = port\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif out == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI output port %v\", name)\n\t\t}\n\t}\n\n\t// should not happen here, since we already returned above\n\tif out == nil {\n\t\tpanic(\"unreachable\")\n\t}\n\n\terr = out.Open()\n\treturn\n}", "func (o *Member) GetPort() int32 {\n\tif o == nil || o.Port == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Port\n}", "func (r *Cluster) Port() pulumi.IntOutput {\n\treturn (pulumi.IntOutput)(r.s.State[\"port\"])\n}", "func openIn(d Driver, number int, name string) (in In, err error) {\n\tins, err := d.Ins()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't find MIDI input ports: %v\", err)\n\t}\n\n\tif number >= 0 {\n\t\tfor _, port := range ins {\n\t\t\tif number == port.Number() {\n\t\t\t\tin = port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif in == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI input port %v\", number)\n\t\t}\n\t} else {\n\t\tif name != \"\" {\n\t\t\tfor _, port := range ins {\n\t\t\t\tif strings.Contains(port.String(), name) {\n\t\t\t\t\tin = port\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif in == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI input port %v\", name)\n\t\t}\n\t}\n\n\t// should not happen here, since we already returned above\n\tif in == nil {\n\t\tpanic(\"unreachable\")\n\t}\n\n\terr = in.Open()\n\treturn\n}", "func (o SrvRecordRecordOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v SrvRecordRecord) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o AppTemplateContainerLivenessProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerLivenessProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o NamedPortResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v NamedPortResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o AppTemplateContainerReadinessProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerReadinessProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func port() string {\n\treturn strconv.Itoa(int(atomic.AddUint32(nport, 1)))\n}", "func (o HybridConnectionOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *HybridConnection) pulumi.IntOutput { return v.Port }).(pulumi.IntOutput)\n}", "func (o GetAppTemplateContainerLivenessProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerLivenessProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func getMIDIPort(ctx context.Context) (string, error) {\n\tconst emptyPort = \"\"\n\tout, err := testexec.CommandContext(ctx, \"/usr/bin/arecordmidi\", \"-l\").Output()\n\tif err != nil {\n\t\treturn emptyPort, errors.Wrap(err, \"couldn't start arecordmidi\")\n\t}\n\n\tconst MIDIClientName = \"Midi Through\"\n\t// The output of arecordmidi is assumed to be of the following format:\n\t//\n\t// Port Client name Port name\n\t// 14:0 Midi Through Midi Through Port-0\n\t//\n\t// So, we parse the output string and search for the port associated\n\t// with \"Midi Through\" assuming the above.\n\tre := regexp.MustCompile(`(\\d+:\\d+)\\s{2,}(.+)\\s{2,}`)\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tfields := re.FindStringSubmatch(line)\n\t\tif fields == nil {\n\t\t\tcontinue\n\t\t}\n\t\tclient := strings.TrimSpace(fields[2])\n\t\tif client == MIDIClientName {\n\t\t\t// Return the port.\n\t\t\treturn strings.TrimSpace(fields[1]), nil\n\t\t}\n\t}\n\treturn emptyPort, errors.Errorf(\"%q client not found\", MIDIClientName)\n}", "func (o SRVRecordRecordOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v SRVRecordRecord) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o TCPHealthCheckResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TCPHealthCheckResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o GetAppTemplateContainerReadinessProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerReadinessProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o NetworkEndpointResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v NetworkEndpointResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o ServiceResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v ServiceResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (s *Server) Port() int {\n return s.ln.Addr().(*net.TCPAddr).Port\n}", "func (s *Srt) Renumber() int {\n\treturn s.ForEach(func(i int, sub *Subtitle) bool {\n\t\tsub.Num = i\n\t\treturn true\n\t})\n}", "func (rm *ResourceManager) GetOutgoingNodeNumByReplica(replica *Replica) map[string]int32 {\n\trm.rwmutex.RLock()\n\tdefer rm.rwmutex.RUnlock()\n\n\tif rm.groups[replica.GetResourceGroup()] == nil {\n\t\treturn nil\n\t}\n\n\trg := rm.groups[replica.GetResourceGroup()]\n\tret := make(map[string]int32)\n\tfor _, node := range replica.GetNodes() {\n\t\tif !rg.containsNode(node) {\n\t\t\trgName, err := rm.findResourceGroupByNode(node)\n\t\t\tif err == nil {\n\t\t\t\tret[rgName]++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ret\n}", "func (o GetSrvRecordRecordOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetSrvRecordRecord) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o GetSrvRecordRecordOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetSrvRecordRecord) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o GetRecordResultOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetRecordResult) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o InstanceGroupNamedPortOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *InstanceGroupNamedPort) pulumi.IntOutput { return v.Port }).(pulumi.IntOutput)\n}", "func (o GetResolverForwardingRuleTargetDnsServerOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetResolverForwardingRuleTargetDnsServer) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o GRPCHealthCheckResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GRPCHealthCheckResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (self *holder) GetPort() int {\n\treturn self.port\n}", "func (o HTTPHealthCheckResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v HTTPHealthCheckResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (c *Container) Port(name string) int {\n\treturn c.Ports.Get(name).Port\n}", "func (o NamedPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v NamedPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o GroupContainerReadinessProbeHttpGetOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerReadinessProbeHttpGet) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (s *Server) Port() (int, error) {\n\tcj, err := s.ContainerInspect(context.Background(), s.ContainerID)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tportBindings := cj.HostConfig.PortBindings\n\n\tif len(portBindings) == 0 {\n\t\treturn 0, fmt.Errorf(\"no ports bound for container %s\", s.ContainerName)\n\t}\n\n\tvar port int\n\n\tfor _, v := range portBindings {\n\t\tp, err := strconv.Atoi(v[0].HostPort)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"error reading container port: %s\", err)\n\t\t}\n\n\t\tport = p\n\t}\n\n\tif port == 0 {\n\t\tpanic(\"port is 0\")\n\t}\n\n\treturn port, nil\n}", "func InByNumber(portNumber int) (in In, err error) {\n\tdrv := Get()\n\tif drv == nil {\n\t\treturn nil, fmt.Errorf(\"no driver registered\")\n\t}\n\treturn openIn(drv, portNumber, \"\")\n}", "func (h *Host) Port() uint16 {\n}", "func (w TargetService) PortNumber() uint32 {\n\treturn w.portNumber\n}", "func (mds *metadataService) Port() int {\n\treturn mds.listener.Addr().(*net.TCPAddr).Port\n}", "func (e CastEntry) GetPort() int {\n\treturn e.Port\n}", "func (o AppTemplateContainerStartupProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerStartupProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (c *Config) Port() int {\n\tport, err := strconv.Atoi(c.Get(\"port\", strconv.Itoa(default_port)))\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\treturn port\n}", "func (o ClusterOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *Cluster) pulumi.IntOutput { return v.Port }).(pulumi.IntOutput)\n}", "func (info *endpointsInfo) Port() (int, error) {\n\treturn int(info.port), nil\n}", "func (o SSLHealthCheckResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v SSLHealthCheckResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (cc ClusterConfig) Port(name string) uint32 {\n\tif name == cc.ID {\n\t\treturn uint32(cc.GatewayPort)\n\t}\n\tfor _, peer := range cc.WatchedPeers {\n\t\tif name == peer.ID {\n\t\t\treturn uint32(peer.GatewayPort)\n\t\t}\n\t}\n\treturn 8080 // dummy value for unknown clusters\n}", "func (p *Port) MapLength() int {\n\treturn len(p.subs)\n}", "func (s *Server) Port() (uint16, error) {\n\tif s.AnnouncedPort != 0 {\n\t\treturn s.ServerConfig.AnnouncedPort, nil\n\t}\n\tvar port uint16\n\t_, portStr, err := net.SplitHostPort(s.transport.Address())\n\tif err != nil {\n\t\tport = s.ServerConfig.Port\n\t} else {\n\t\tp, err := strconv.ParseUint(portStr, 10, 16)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tport = uint16(p)\n\t}\n\treturn port, nil\n}", "func (o DatabaseReplicaOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *DatabaseReplica) pulumi.IntOutput { return v.Port }).(pulumi.IntOutput)\n}", "func (i *in) Open() (err error) {\n\tif i.IsOpen() {\n\t\treturn nil\n\t}\n\n\t//i.Lock()\n\n\ti.midiIn, err = rtmidi.NewMIDIInDefault()\n\tif err != nil {\n\t\ti.midiIn = nil\n\t\t//i.Unlock()\n\t\treturn fmt.Errorf(\"can't open default MIDI in: %v\", err)\n\t}\n\n\terr = i.midiIn.OpenPort(i.number, \"\")\n\t//i.Unlock()\n\n\tif err != nil {\n\t\ti.Close()\n\t\treturn fmt.Errorf(\"can't open MIDI in port %v (%s): %v\", i.number, i, err)\n\t}\n\n\t//i.driver.Lock()\n\t//i.midiIn.IgnoreTypes(i.driver.ignoreSysex, i.driver.ignoreTimeCode, i.driver.ignoreActiveSense)\n\ti.driver.opened = append(i.driver.opened, i)\n\t//i.driver.Unlock()\n\n\treturn nil\n}", "func (cc *CrontabConfig) NameToNumber(i int, s string) int {\n\treturn lookupNameIndex(cc.Fields[i].rangeNames, s)\n}", "func (o PgbenchSpecPostgresOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v PgbenchSpecPostgres) int { return v.Port }).(pulumi.IntOutput)\n}", "func (e DomainEvent) Number() int {\n\treturn e.number\n}", "func (m *DomainDnsSrvRecord) GetPort()(*int32) {\n val, err := m.GetBackingStore().Get(\"port\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*int32)\n }\n return nil\n}", "func (o GetAppTemplateContainerStartupProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerStartupProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o ListenerOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v Listener) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o HTTP2HealthCheckResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheckResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func RedisPort() int {\n\tport, _ := strconv.Atoi(Get(\"PERSIST_PORT\", \"6379\"))\n\treturn port\n}", "func (s *Server) Port() int {\n\treturn s.ln.Addr().(*net.TCPAddr).Port\n}", "func Interface(ctx context.Context, i, ch uint8) (gate, key, vel control.CV, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"midi.Interface: %v\", err)\n\t\t}\n\t}()\n\n\tdrv, err := portmididrv.New()\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tins, err := drv.Ins()\n\tif len(ins) <= int(i) {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tin := ins[i]\n\tif err := in.Open(); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\trd := reader.New(reader.NoLogger())\n\n\tgateCh := make(chan modular.V)\n\tkeyCh := make(chan modular.V)\n\tvelCh := make(chan modular.V)\n\n\trd.Channel.NoteOn = func(p *reader.Position, channel, key, vel uint8) {\n\t\tif channel != ch {\n\t\t\treturn\n\t\t}\n\t\tgateCh <- 1\n\t\tkeyCh <- modular.V(key)\n\t\tvelCh <- modular.V(vel) / 127\n\t}\n\n\trd.Channel.NoteOff = func(p *reader.Position, channel, key, vel uint8) {\n\t\tif channel != ch {\n\t\t\treturn\n\t\t}\n\t\tgateCh <- 0\n\t\tvelCh <- 0\n\t}\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(gateCh)\n\t\t\tclose(keyCh)\n\t\t\tclose(velCh)\n\t\t}()\n\t\tif err := rd.ListenTo(in); err != nil {\n\t\t\tpanic(fmt.Errorf(\"midi.Interface: %v\", err))\n\t\t}\n\t}()\n\n\treturn gateCh, keyCh, velCh, nil\n}", "func getNumber() int {\n\treturn <-jobIDSource\n}", "func (o *V0037Node) GetPort() int32 {\n\tif o == nil || o.Port == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Port\n}", "func (o GroupContainerLivenessProbeHttpGetOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerLivenessProbeHttpGet) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o *out) Open() (err error) {\n\tif o.stream != nil {\n\t\treturn nil\n\t}\n\to.stream, err = portmidi.NewOutputStream(o.id, o.driver.buffersizeOut, 0)\n\tif err != nil {\n\t\to.stream = nil\n\t\treturn fmt.Errorf(\"can't open MIDI out port %v (%s): %v\", o.Number(), o, err)\n\t}\n\to.driver.opened = append(o.driver.opened, o)\n\treturn nil\n}", "func (i ID) Number() (int, error) {\n\tstuff := strings.Split(i.String(), \"/\")\n\tif len(stuff) != 2 {\n\t\treturn 0, fmt.Errorf(\"invalid id: %s\", string(i))\n\t}\n\tnum, err := strconv.Atoi(stuff[1])\n\tif err != nil {\n\t\treturn 0, xerrors.Errorf(\"invalid id: %s, error: %w\", i.String(), err)\n\t}\n\treturn num, nil\n}", "func (o *SmartstackBackend) GetPort() int32 {\n\tif o == nil || o.Port == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Port\n}", "func (o TCPHealthCheckResponsePtrOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *TCPHealthCheckResponse) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Port\n\t}).(pulumi.IntPtrOutput)\n}", "func (p UDPPort) Value() int32 {\n\treturn p.port\n}", "func (o *StorageNetAppCloudTargetAllOf) GetPort() int64 {\n\tif o == nil || o.Port == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.Port\n}", "func (o ListenerPtrOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *Listener) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Port\n\t}).(pulumi.IntPtrOutput)\n}", "func (transport *Transport) Port() int {\n\treturn transport.port\n}", "func (s *Server) GetPort() (port uint16) {\n\tport = defaultIrcPort\n\tif s.Port != 0 {\n\t\tport = s.Port\n\t} else if s.parent != nil && s.parent.Global.Port != 0 {\n\t\tport = s.parent.Global.Port\n\t}\n\treturn\n}", "func (o HTTPSHealthCheckResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func Get() (port int, err error) {\n\treturn within(0, 0)\n}", "func (o InstanceMemcacheNodeOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMemcacheNode) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o TCPHealthCheckPtrOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *TCPHealthCheck) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Port\n\t}).(pulumi.IntPtrOutput)\n}", "func (o TCPHealthCheckOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v TCPHealthCheck) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (p *iowriter) Write(data []byte) (n int, err error) {\n\t_, err = p.bf.Write(data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar msg midi.Message\n\tmsg, err = p.from.Read()\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tp.bf.Reset()\n\treturn len(msg.Raw()), p.to.Write(msg)\n}", "func (o BackendAddressPoolTunnelInterfaceOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BackendAddressPoolTunnelInterface) int { return v.Port }).(pulumi.IntOutput)\n}", "func NewRPort() int {\n\tmu.Lock()\n\tlog.Debug(pathLOG + \"[newRPort] Getting new PORT ...\")\n\trport = rport + 1\n\tb := rport\n\tlog.Debug(pathLOG + \"[newRPort] New PORT = \" + strconv.Itoa(b))\n\tmu.Unlock()\n\treturn b\n}", "func GetPort(addr string) int {\n\ts := strings.Split(addr, \":\")\n\tport, _ := strconv.Atoi(s[1])\n\treturn port\n}", "func (o GroupContainerPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (u URL) Port() int {\n\t_, port, err := net.SplitHostPort(u.URL.Host)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tn, err := strconv.Atoi(port)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn n\n}", "func (p LogonRealmExPacket) RealmPort() int {\n\treturn int(binary.BigEndian.Uint16(p[24:26]))\n}", "func (o *Note) GetNumber() int {\n\tif o.diff.Number != nil {\n\t\treturn *o.diff.Number\n\t}\n\treturn o.Number\n}", "func (o GetServerGroupServerAttachmentsAttachmentOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetServerGroupServerAttachmentsAttachment) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o *UPNP) GetDeviceCount() gdnative.Int {\n\t//log.Println(\"Calling UPNP.GetDeviceCount()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"UPNP\", \"get_device_count\")\n\n\t// Call the parent method.\n\t// int\n\tretPtr := gdnative.NewEmptyInt()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewIntFromPointer(retPtr)\n\treturn ret\n}", "func (o TargetGroupPtrOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *TargetGroup) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Port\n\t}).(pulumi.IntPtrOutput)\n}", "func (o *StorageRemoteKeySetting) GetPort() int64 {\n\tif o == nil || o.Port == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.Port\n}", "func (t *TeamDiscussion) GetNumber() int {\n\tif t == nil || t.Number == nil {\n\t\treturn 0\n\t}\n\treturn *t.Number\n}", "func (r *ISAAC) Intn(n int) int {\n\tif n <= 0 {\n\t\tpanic(\"invalid argument to Intn\")\n\t}\n\tif n <= 1<<31-1 {\n\t\treturn int(r.Int31n(int32(n)))\n\t}\n\treturn int(r.Int63n(int64(n)))\n}", "func (o GroupExposedPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupExposedPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o TargetGroupOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v TargetGroup) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o *Partition) GetNumber(ctx context.Context) (number uint32, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfacePartition, \"Number\").Store(&number)\n\treturn\n}", "func (o GetServiceComponentResultOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetServiceComponentResult) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o *LdapProvider) GetPort() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.Port\n}", "func (g *GRPC) Port() int {\n\treturn g.lis.Addr().(*net.TCPAddr).Port\n}" ]
[ "0.6483945", "0.58069336", "0.54006714", "0.52336913", "0.51097655", "0.5074864", "0.4979618", "0.49640957", "0.4940016", "0.4937151", "0.49351084", "0.4915885", "0.48753217", "0.48716635", "0.48530802", "0.4807086", "0.47985777", "0.47916785", "0.4781883", "0.47786185", "0.47706527", "0.4762612", "0.4742236", "0.47288284", "0.47188443", "0.47102666", "0.47102666", "0.46872732", "0.46867177", "0.46855104", "0.46655434", "0.4661095", "0.4646252", "0.46422872", "0.46283534", "0.46278375", "0.4624433", "0.46238664", "0.46151498", "0.46124277", "0.4611167", "0.46101475", "0.46070293", "0.45995238", "0.45981377", "0.4592834", "0.45922014", "0.45914918", "0.45656693", "0.45371813", "0.4530805", "0.4528176", "0.4522462", "0.45221883", "0.4517245", "0.4515686", "0.4512515", "0.45067713", "0.45062307", "0.45043036", "0.4500061", "0.44908738", "0.44871792", "0.4485688", "0.44856367", "0.44801092", "0.44706437", "0.44698727", "0.44641393", "0.44562677", "0.44549528", "0.44522914", "0.4451734", "0.44465154", "0.44445783", "0.44440514", "0.4443093", "0.44351488", "0.44307858", "0.44284692", "0.44278133", "0.44128317", "0.44116277", "0.44098946", "0.4406361", "0.44031635", "0.44007316", "0.4395052", "0.4392915", "0.43764275", "0.43751937", "0.4367983", "0.43655488", "0.43611512", "0.43604562", "0.43576863", "0.43503413", "0.43426412", "0.43407494" ]
0.56891406
3
Close closes the MIDI in port, after it has stopped listening.
func (i *in) Close() (err error) { if !i.IsOpen() { return nil } i.Lock() i.isOpen = false i.jsport.Call("close") i.Unlock() return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (i *in) Close() (err error) {\n\tif !i.IsOpen() {\n\t\treturn nil\n\t}\n\n\t//\ti.StopListening()\n\t//\ti.Lock()\n\terr = i.midiIn.Close()\n\ti.midiIn = nil\n\t//\ti.Unlock()\n\treturn\n}", "func (m *Modem) Close() error {\n\tif !m.running {\n\t\treturn nil\n\t}\n\tm.running = false\n\treturn m.port.Close()\n}", "func Close() {\r\n\tPort.Close()\r\n}", "func (p *Port) Close() {\n\tif p.closed {\n\t\treturn\n\t}\n\n\tp.closed = true\n\n\tif p.buf != nil {\n\t\tclose(p.buf)\n\t}\n\n\tif p.sub != nil {\n\t\tp.sub.Close()\n\t}\n\n\tif len(p.subs) > 0 {\n\t\tfor _, sub := range p.subs {\n\t\t\tsub.Close()\n\t\t}\n\t}\n}", "func (c *UDPChannel) Close() {\n\n}", "func (mc *MindControl) Close() {\n\tmc.SerialDevice.Write([]byte{'\\x73'})\n\tif mc.saving {\n\t\tmc.quitSave <- true\n\t}\n\tmc.SerialDevice.Close()\n\tmc.quitDecodeStream <- true\n\tclose(mc.quitSendPackets)\n\tclose(mc.quitGenTest)\n\tclose(mc.shutdown)\n}", "func (p *Port) Close() error {\n\treturn p.f.Close()\n}", "func (s *Speaker) Close() error { return nil }", "func (i *Iec62056) Close() error {\n\treturn i.port.Close()\n}", "func (p *Port) Close() error {\n\tif p == nil {\n\t\treturn serial.ErrNotOpen\n\t}\n\treturn p.port.Close()\n}", "func (n *Interface) Close() {\n\tn.dataReceive.StopReceiving()\n\tn.announceReceive.StopReceiving()\n\tn.ad.StopAnnounceDaemon()\n\tclose(n.ErrChan)\n}", "func (bn *BasicNotifiee) ListenClose(n net.Network, addr ma.Multiaddr) {\n\tglog.V(4).Infof(\"Notifiee - Close: %v\", addr)\n}", "func (remote *SerialRemote) Close() error {\n\treturn remote.port.Close()\n}", "func (mm *Manager) Close() {\n\tclose(mm.stopCh)\n}", "func (ch *Channel) Close() {}", "func (o *out) Close() error {\n\tif o.stream == nil {\n\t\treturn nil\n\t}\n\n\terr := o.stream.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't close MIDI out %v (%s): %v\", o.Number(), o, err)\n\t}\n\to.stream = nil\n\treturn nil\n}", "func (_m *Socket) Close() {\n\t_m.Called()\n}", "func (r *Receiver) Close() error { return nil }", "func (h *Homebrew) Close() error {\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tif !h.Active() {\n\t\treturn nil\n\t}\n\n\tlog.Info(\"closing\")\n\n\t// Tell peers we're closing\nclosing:\n\tfor _, peer := range h.Peer {\n\t\tif peer.Status == AuthDone {\n\t\t\tif err := h.WriteToPeer(append(RepeaterClosing, peer.id...), peer); err != nil {\n\t\t\t\tbreak closing\n\t\t\t}\n\t\t}\n\t}\n\n\t// Kill keepalive goroutine\n\tif h.stop != nil {\n\t\tclose(h.stop)\n\t\th.stop = nil\n\t}\n\n\t// Kill listening socket\n\th.closed = true\n\treturn h.conn.Close()\n}", "func (s *Socket) Close() {\n\ts.mux.Lock()\n\tif s.stop != nil {\n\t\ts.stop <- true\n\t\t<-s.stop\n\t\ts.stop = nil\n\t}\n\ts.mux.Unlock()\n}", "func (l *listener) Close() error {\n\tl.cancel()\n\tl.host.RemoveStreamHandler(l.tag)\n\treturn nil\n}", "func (p *Listener) Close() error { return p.Listener.Close() }", "func (this *udp_listener) Close() error {\n\t/*\n\t\tif err := this.conn.Close(); nil != err {\n\t\t\treturn err\n\t\t}\n\t\tclose(this.signal)\n\t*/\n\tthis.stoped = true\n\treturn nil\n}", "func (notifee *Notifee) ListenClose(network.Network, multiaddr.Multiaddr) {}", "func (m *MajsoulChannel) Close(err error) {\n\tm.mutexChannel.Lock()\n\tif m.isopen {\n\t\tm.exitValue = err\n\t\tm.isopen = false\n\t\tclose(m.stop)\n\t\tlog.Println(\"Majsoul channel closed.\")\n\t}\n\tm.mutexChannel.Unlock()\n}", "func SerialPortClose() {\n\tfmt.Println(\"closing serial port\")\n\tport.Close()\n}", "func (s *Server) Close() {\n\ts.running = false\n\tif s.listener != nil {\n\t\ts.listener.Close()\n\t\ts.listener = nil\n\t}\n}", "func (c *MulticastController) Close() {\n\tc.jrConn.Close()\n\tc.queryFlooder.Close()\n}", "func (rr *RtcpReceiver) Close() {\n\trr.events <- rtcpReceiverEventTerminate{}\n\t<-rr.done\n}", "func (p *Plugin) Close() error {\n\tsocket := p.socket\n\tif socket == nil {\n\t\treturn errors.New(\"Socket is already closed\")\n\t}\n\tsessionID := p.sessions.getUniqueSessionID()\n\t_, err := socket.Write(archiveMessage(CloseClient, sessionID, nil))\n\tif err != nil {\n\t\tp.socket = nil\n\t\treturn err\n\t}\n\tmessage := p.sessions.receiveAndClose(sessionID)\n\terr = socket.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.socket = nil\n\tif message.Type != ResultOK {\n\t\treturn fmt.Errorf(\"Unregister error: '%s'\", p.pipeName)\n\t}\n\treturn nil\n}", "func (l *listener) Close() error {\n\tl.L.Lock()\n\terr := l.err\n\tif err == nil {\n\t\tl.err = ErrListenerClosed\n\t}\n\tl.L.Unlock()\n\tl.Broadcast()\n\n\tif err == nil {\n\t\tl.stopListening()\n\t}\n\n\treturn errors.New(\"Not implemented\")\n}", "func (r *Listener) Close() error {\n\tr.portsLk.Lock()\n\tdefer r.portsLk.Unlock()\n\n\tfor n, port := range r.ports {\n\t\tif err := port.Close(); err != nil {\n\t\t\tr.ports = r.ports[n:] // drop any port that was successfully closed\n\t\t\treturn err\n\t\t}\n\t}\n\tr.ports = nil\n\treturn nil\n}", "func (p *Protocol) Close() error {\n\tif p == nil {\n\t\treturn nil\n\t}\n\tp.chans.closeAll()\n\treturn p.rw.Close()\n}", "func (m *display) Close() bool {\n if m.status == CLOSED {\n return true\n }\n\n if err := m.port.Close(); err != nil {\n return false\n }\n m.status = CLOSED\n return true\n}", "func (self *EV3) Close() {\n\tif self.port != nil {\n\t\tself.port.Close()\n\t}\n}", "func (d *Dameon) Stop(closeProcs bool) {\n\td.lockfile.Unlock()\n\td.listener.Close()\n\td.pm.Shutdown(closeProcs)\n}", "func (socket *SocketTLS) Close() {\n\t// Fire closing event\n\tlog.Noteln(socket.name + \" closing. Port \" + socket.port)\n\tsocket.eventChan <- SocketEvent{\n\t\tName: \"close\",\n\t\tData: nil,\n\t}\n\n\t// Close socket\n\tsocket.listen.Close()\n}", "func (e *Engine) Close() error {\n\te.Lock()\n\tdefer e.Unlock()\n\n\tvar err error\n\n\t// first, check if the stream exists\n\t// edge case call sequence of: New() -> [stream: nil], Close()\n\tif e.stream != nil {\n\t\t// now, check if we are started (stream is playing currently)\n\t\tif e.started {\n\t\t\t// and stop the stream\n\t\t\tif err = e.stream.Stop(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// close the stream\n\t\t\tif err = e.stream.Close(); err != nil {\n\t\t\t\t// if it failed, return the error\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// the stream was closed successfully\n\t\t\t// flag that we aren't started anymore\n\t\t\te.started = false\n\t\t}\n\t\t// if we're not started (stopped)\n\t\t// there's nothing to do, Stop() automatically\n\t\t// stops/closes the stream after each call\n\t}\n\n\t// remove the active playing tables\n\te.activePlaybackEvents = nil\n\te.activePlaybackEvents = map[*playbackEvent]bool{}\n\n\t// now try to turn off portaudio\n\tif err := portaudio.Terminate(); err != nil {\n\t\t// if it failed, return the error\n\t\treturn err\n\t}\n\t// otherwise termination of portaudio was successful\n\t// flag that we aren't initialized anymore\n\te.initialized = false\n\n\treturn nil\n}", "func (s *TCPServer) Close() {\n\ts.listener.Close()\n}", "func (s *Socket) Close() error {\n\terr := s.listener.Close()\n\tif err != nil {\n\t\ts.Logger.Printf(\"cannot close socket: %s\\n\", err.Error())\n\t}\n\treturn os.Remove(s.socketPath)\n}", "func (p *Peer) Close(timeout time.Duration) error {\n\terr := p.mlist.Leave(timeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclose(p.stopc)\n\treturn p.mlist.Shutdown()\n}", "func Close() {\n\tif quit != nil {\n\t\t// must close quit first\n\t\tclose(quit)\n\t\tlistener.Close()\n\t\tlistener = nil\n\t}\n}", "func (proxy *UnixProxy) Close() { proxy.listener.Close() }", "func (m *Monitor) Close() {\n\tm.stop()\n\t<-m.stopCh\n}", "func (_m *MgoSession) Close() {\n\t_m.Called()\n}", "func (s *Session) Close() error {\n\ts.Watch(map[string]bool{\"enable\": false})\n\tclose(s.done)\n\treturn s.socket.Close()\n}", "func (m *Mock) Close() {\n\tlog.Printf(\"Closing mock %p\\n\", m)\n\tif m.cmd != nil && m.cmd.Process != nil {\n\t\tif err := m.cmd.Process.Kill(); err != nil {\n\t\t\tlog.Printf(\"Error killing process: %v\", err)\n\t\t\treturn\n\t\t}\n\t\t_, err := m.cmd.Process.Wait()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error waiting for process to end: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tif m.conn != nil {\n\t\tif err := m.conn.Close(); err != nil {\n\t\t\tlog.Printf(\"Error closing connection: %v\", err)\n\t\t}\n\t}\n\tm.EntryPort = 0\n}", "func (o *os) CloseMidiInputs() {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.CloseMidiInputs()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"close_midi_inputs\")\n\n\t// Call the parent method.\n\t// void\n\tretPtr := gdnative.NewEmptyVoid()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n}", "func (m *MEI) Close() error {\n\tif m.fd != nil {\n\t\terr := syscall.Close(*m.fd)\n\t\tm.fd = nil\n\t\treturn err\n\t}\n\treturn nil\n}", "func (l *Listener) Close() error {\n\treturn nil\n}", "func (s *slave) Close() {\n\ts.exit <- struct{}{}\n}", "func (s *Servo) Close() {\n\t_blaster.unsubscribe(s)\n}", "func (file *Remote) Close() error {\n\t_, err := file.client.Send(&Tclunk{\n\t\tFID: file.fid,\n\t})\n\treturn err\n}", "func (s *TCPServer) Close() error {\n\treturn s.listener.Close()\n}", "func (s *Slave) Close() error {\n\treturn s.disconnect()\n}", "func (ln *Listener) Close() error {\n\treturn ln.tcpLn.Close()\n}", "func (server *TCPServer) Close() {\n\tif server.listener != nil {\n\t\tlistener := server.listener\n\t\tserver.listener = nil\n\t\tlistener.Close()\n\t}\n}", "func (e *EthernetInterface) Close() {\n\tsyscall.Close(e.socket)\n}", "func (g *CastDevice) Close() {\n\tg.client.Close()\n}", "func (mb *serialPort) close() (err error) {\n\tif mb.port != nil {\n\t\terr = mb.port.Close()\n\t\tmb.port = nil\n\t}\n\treturn\n}", "func (s *MaplAdapter) Close() error {\n\tif s.server != nil {\n\t\ts.server.GracefulStop()\n\t}\n\n\tif s.listener != nil {\n\t\t_ = s.listener.Close()\n\t}\n\n\treturn nil\n}", "func (d *ConsulDiscovery) Close() {\r\n\tclose(d.stopCh)\r\n}", "func (a *AbstractSessionChannelHandler) OnClose() {}", "func (l *MockListener) Close() error {\n\treturn nil\n}", "func (c *Channel) Close() error {\n\treturn c.exit(false)\n}", "func (m *Manager) Close() {\n\tm.portsLock.Lock()\n\tdefer m.portsLock.Unlock()\n\n\terr := m.sctpAssociation.Close()\n\tm.dtlsState.Close()\n\tm.IceAgent.Close()\n\n\tfor i := len(m.ports) - 1; i >= 0; i-- {\n\t\tif portError := m.ports[i].close(); portError != nil {\n\t\t\tif err != nil {\n\t\t\t\terr = errors.Wrapf(portError, \" also: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\terr = portError\n\t\t\t}\n\t\t} else {\n\t\t\tm.ports = append(m.ports[:i], m.ports[i+1:]...)\n\t\t}\n\t}\n}", "func (n Node) Close() error {\n\treturn n.listener.Close()\n}", "func (a *TCPAcceptor) Stop() {\n\ta.running = false\n\ta.listener.Close()\n}", "func (self *IPCSocket) Close() error {\n\tself.open = false\n\treturn self.socket.Close()\n}", "func (s *Sink) Close() {\n\ts.ifOpen(func() { C.del_aubio_sink(s.s) })\n\ts.s = nil\n}", "func (s *Signal) Close() {\n\tclose(s.channel)\n}", "func (ps *PocketSphinx) Close() error {\n\t// terminate portaudio\n\terr := convertError(portaudio.Terminate())\n\tif err != nil {\n\t\treturn errorx.Decorate(err, \"cannot terminate PortAudio\")\n\t}\n\n\t// destroy sphinx decoder\n\tif ps.decoder != nil {\n\t\tps.decoder.Destroy()\n\t}\n\n\t// close portaudio stream\n\tif ps.stream != nil {\n\t\terr = convertError(portaudio.CloseStream(ps.stream))\n\t\tif err != nil {\n\t\t\treturn errorx.Decorate(err, \"cannot close PortAudio stream\")\n\t\t}\n\t\terr = convertError(portaudio.StopStream(ps.stream))\n\t\tif err != nil {\n\t\t\treturn errorx.Decorate(err, \"cannot stop PortAudio stream\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (h *Haminer) Stop() {\n\th.isRunning = false\n\n\tsignal.Stop(h.chSignal)\n\n\tif h.udpConn != nil {\n\t\terr := h.udpConn.Close()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\tfmt.Println(\"Stopped\")\n}", "func (s *server) Close() error {\n\treturn s.listener.Close()\n}", "func (s *Server) Close() error {\n\treturn s.listener.Close()\n}", "func (s *Server) Close() error {\n\treturn s.listener.Close()\n}", "func (i *invoker) Close() error {\n\ti.cancelPF()\n\n\t// Closing the local listener effectively closes the gRPC connection\n\tif err := i.listener.Close(); err != nil {\n\t\ti.conn.Close() // If we fail to close the listener, explicitly close the gRPC connection and ignore any error\n\t\treturn fmt.Errorf(\"failed to close local tcp port listener: %w\", err)\n\t}\n\n\treturn nil\n}", "func (m *ShowMeasurementsMapper) Close() {\n\tif m.remote != nil {\n\t\tm.remote.Close()\n\t}\n}", "func (p *Proxy) Close() error {\n\treturn p.listener.Close()\n}", "func (a *TCPAcceptor) Stop() {\n\ta.isRun = false\n\ta.listener.Close()\n\n\tsession.GetManagerInstance().CloseAllSessions()\n}", "func (w *Watcher) Close() error {\n\tclose(w.commCh)\n\t<-w.closed\n\treturn nil\n}", "func (s *Server) Close() error {\n if s.ln != nil {\n s.ln.Close()\n }\n return nil\n}", "func (s *udtSocket) Close() error {\n\tif !s.isOpen() {\n\t\treturn nil // already closed\n\t}\n\n\tclose(s.messageOut)\n\t_, _ = <-s.shutdownEvent\n\treturn nil\n}", "func (p *TCPProxy) Close() {\n\tvar err error\n\tlogger.Green.Printf(\"Closing proxy, type: [%s], addr: [%s].\\n\", p.listenerConfigs.Protocol, p.listenerAddr.String())\n\tp.closed.SetToIf(false, true)\n\tp.connMap.Range(func(k, conn interface{}) bool {\n\t\tif err = conn.(io.ReadWriteCloser).Close(); err == nil {\n\t\t\tlogger.Cyan.Printf(\"closing connection to remote: [%v] success.\\n\", k)\n\t\t} else {\n\t\t\tlogger.Red.Printf(\"closing connection to remote: [%v] with error: %v\\n\", k, err)\n\t\t}\n\t\treturn true\n\t})\n\tif p.listenerConn != nil {\n\t\tp.listenerConn.Close()\n\t}\n}", "func (ml *MockMonitorListener) Close() {\n}", "func (s socket) Close() error {\n\ts.done <- true\n\treturn nil\n}", "func (p *PacketHandler) Close() {\n\tp.stop <- struct{}{}\n}", "func (s *Server) Close() {\n\ts.serverUDP.Close()\n\tastilog.Debug(\"Stopping server\")\n}", "func (me *MessagerEngine) Close() {\n\tme.mu.Lock()\n\tdefer me.mu.Unlock()\n\tif !me.isOpen {\n\t\treturn\n\t}\n\tme.isOpen = false\n\tme.tsv.se.UnregisterNotifier(\"messages\")\n\tfor _, mm := range me.managers {\n\t\tmm.Close()\n\t}\n\tme.managers = make(map[string]*MessageManager)\n\tme.conns.Close()\n}", "func (_m *WebSocketServer) Close() {\n\t_m.Called()\n}", "func (l *Listener) Close() {\n\tclose(l.Shutdown)\n\tl.Shutdown = nil\n}", "func (s *Server) Close() error {\n\ts.cancelMx.Lock()\n\tif s.cancel != nil {\n\t\ts.cancel()\n\t}\n\ts.cancelMx.Unlock()\n\n\treturn s.listener.Close()\n}", "func (p *TCPProxy) Close(c chan struct{}) {\n\tclose(c)\n}", "func (sock *Server) close() {\n\tclose(sock.kill)\n}", "func (s *Server) Close() error {\n\ts.instruments.Log(octo.LOGINFO, s.info.UUID, \"udp.Server.Close\", \"Started : %#v\", s.info)\n\n\tif !s.IsRunning() {\n\t\treturn nil\n\t}\n\n\ts.rl.Lock()\n\t{\n\t\ts.running = false\n\t\ts.doClose = true\n\t}\n\ts.rl.Unlock()\n\n\t// Await for last request.\n\ts.rg.Wait()\n\n\tif err := s.conn.Close(); err != nil {\n\t\ts.instruments.Log(octo.LOGERROR, s.info.UUID, \"udp.Server.Close\", \"Completed : %s\", err.Error())\n\t}\n\n\ts.wg.Wait()\n\n\ts.instruments.Log(octo.LOGINFO, s.info.UUID, \"udp.Server.Close\", \"Completed\")\n\treturn nil\n}", "func (n *NatsSubscriber) Close() {\n\tn.nc.Close()\n}", "func (l *listener) Close() error {\n\tif l.unsubscribeLogs != nil {\n\t\tl.unsubscribeLogs()\n\t}\n\tl.runs.Range(func(key, runCloserChannelIf interface{}) bool {\n\t\trunCloserChannel, _ := runCloserChannelIf.(chan struct{})\n\t\tclose(runCloserChannel)\n\t\treturn true\n\t})\n\tl.runs = sync.Map{}\n\tl.shutdownWaitGroup.Wait()\n\treturn nil\n}", "func (s *BufferSink) Close() error {\n\ts.open = false\n\treturn nil\n}", "func (p *Peer) Close() {\n\tif !p.IsConnected() {\n\t\treturn\n\t}\n\tif !atomic.CompareAndSwapInt32(&p.connected, 1, 2) { //closing\n\t\treturn\n\t}\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\t<-done\n\t}()\n\tp.outQueue <- packet{use: packetUseForClose, content: nil, done: done}\n}", "func (p *Listener) Close() error {\n\treturn p.Listener.Close()\n}", "func (p *swPwm) Close() {\n\tsc := make(chan bool)\n\tp.c <- pwmMsg{0, 0, sc}\n\t<-sc\n\tclose(sc)\n\tclose(p.c)\n}" ]
[ "0.6862036", "0.66817975", "0.6633789", "0.6582999", "0.6488882", "0.63879734", "0.627415", "0.6268148", "0.62477446", "0.61805695", "0.6126409", "0.6098916", "0.6087864", "0.60801715", "0.60582757", "0.6055711", "0.60448736", "0.6040362", "0.6038511", "0.5977961", "0.5974102", "0.59459406", "0.5945514", "0.5942518", "0.59274834", "0.5914022", "0.58904356", "0.5883415", "0.5880011", "0.58470964", "0.5844951", "0.5827237", "0.5818722", "0.5788644", "0.5773549", "0.5765458", "0.57534754", "0.5750176", "0.57291824", "0.57258445", "0.570735", "0.5705273", "0.56965494", "0.56931853", "0.5682889", "0.5671768", "0.5669699", "0.56550825", "0.5654274", "0.5648016", "0.5647837", "0.56436175", "0.56346387", "0.5629529", "0.56273776", "0.5611491", "0.56079596", "0.56050175", "0.55892676", "0.5588276", "0.55865276", "0.558465", "0.5578966", "0.55723304", "0.5564244", "0.55624217", "0.55561316", "0.5554312", "0.55528164", "0.5538811", "0.55362463", "0.55353475", "0.55289525", "0.552692", "0.5522684", "0.5522684", "0.5522395", "0.5511798", "0.5509065", "0.55029637", "0.54992396", "0.5494997", "0.5487459", "0.5485568", "0.5484844", "0.54798484", "0.5475666", "0.5469884", "0.5465626", "0.54611033", "0.54497856", "0.54408956", "0.54359055", "0.543345", "0.54330254", "0.5425414", "0.5424873", "0.5421145", "0.5416383", "0.54087925", "0.5407194" ]
0.0
-1
Open opens the MIDI in port
func (i *in) Open() (err error) { if i.IsOpen() { return nil } i.Lock() i.isOpen = true i.jsport.Call("open") i.Unlock() i.driver.Lock() i.driver.opened = append(i.driver.opened, i) i.driver.Unlock() return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (i *in) Open() (err error) {\n\tif i.IsOpen() {\n\t\treturn nil\n\t}\n\n\t//i.Lock()\n\n\ti.midiIn, err = rtmidi.NewMIDIInDefault()\n\tif err != nil {\n\t\ti.midiIn = nil\n\t\t//i.Unlock()\n\t\treturn fmt.Errorf(\"can't open default MIDI in: %v\", err)\n\t}\n\n\terr = i.midiIn.OpenPort(i.number, \"\")\n\t//i.Unlock()\n\n\tif err != nil {\n\t\ti.Close()\n\t\treturn fmt.Errorf(\"can't open MIDI in port %v (%s): %v\", i.number, i, err)\n\t}\n\n\t//i.driver.Lock()\n\t//i.midiIn.IgnoreTypes(i.driver.ignoreSysex, i.driver.ignoreTimeCode, i.driver.ignoreActiveSense)\n\ti.driver.opened = append(i.driver.opened, i)\n\t//i.driver.Unlock()\n\n\treturn nil\n}", "func (o *out) Open() (err error) {\n\tif o.stream != nil {\n\t\treturn nil\n\t}\n\to.stream, err = portmidi.NewOutputStream(o.id, o.driver.buffersizeOut, 0)\n\tif err != nil {\n\t\to.stream = nil\n\t\treturn fmt.Errorf(\"can't open MIDI out port %v (%s): %v\", o.Number(), o, err)\n\t}\n\to.driver.opened = append(o.driver.opened, o)\n\treturn nil\n}", "func openIn(d Driver, number int, name string) (in In, err error) {\n\tins, err := d.Ins()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't find MIDI input ports: %v\", err)\n\t}\n\n\tif number >= 0 {\n\t\tfor _, port := range ins {\n\t\t\tif number == port.Number() {\n\t\t\t\tin = port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif in == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI input port %v\", number)\n\t\t}\n\t} else {\n\t\tif name != \"\" {\n\t\t\tfor _, port := range ins {\n\t\t\t\tif strings.Contains(port.String(), name) {\n\t\t\t\t\tin = port\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif in == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI input port %v\", name)\n\t\t}\n\t}\n\n\t// should not happen here, since we already returned above\n\tif in == nil {\n\t\tpanic(\"unreachable\")\n\t}\n\n\terr = in.Open()\n\treturn\n}", "func openOut(d Driver, number int, name string) (out Out, err error) {\n\touts, err := d.Outs()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't find MIDI output ports: %v\", err)\n\t}\n\n\tif number >= 0 {\n\t\tfor _, port := range outs {\n\t\t\tif number == port.Number() {\n\t\t\t\tout = port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif out == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI output port %v\", number)\n\t\t}\n\t} else {\n\t\tif name != \"\" {\n\t\t\tfor _, port := range outs {\n\t\t\t\tif strings.Contains(port.String(), name) {\n\t\t\t\t\tout = port\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif out == nil {\n\t\t\treturn nil, fmt.Errorf(\"can't find MIDI output port %v\", name)\n\t\t}\n\t}\n\n\t// should not happen here, since we already returned above\n\tif out == nil {\n\t\tpanic(\"unreachable\")\n\t}\n\n\terr = out.Open()\n\treturn\n}", "func (p *Port) Open() {\n\tif !p.closed {\n\t\treturn\n\t}\n\n\tp.closed = false\n\n\tif p.buf != nil {\n\t\tp.buf = make(chan interface{}, CHANNEL_SIZE)\n\t}\n\n\tif p.sub != nil {\n\t\tp.sub.Open()\n\t}\n\n\tif len(p.subs) > 0 {\n\t\tfor _, sub := range p.subs {\n\t\t\tsub.Open()\n\t\t}\n\t}\n}", "func (o *os) OpenMidiInputs() {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.OpenMidiInputs()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"open_midi_inputs\")\n\n\t// Call the parent method.\n\t// void\n\tretPtr := gdnative.NewEmptyVoid()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n}", "func (m *display) Open() (bool) {\n if m.status == OPENED {\n return true\n }\n\n config := &serial.Config{\n Name: m.options.Port,\n Baud: m.options.Baud,\n ReadTimeout: 1 * time.Second,\n }\n\n var err error\n m.port, err = serial.OpenPort(config)\n if err != nil {\n //log.Println(err)\n return false\n }\n\n m.status = OPENED\n return true\n}", "func Open(name string, mode uint8) (*Port, error) {\n\tonce.Do(mountPlumb)\n\tif fsysErr != nil {\n\t\treturn nil, fsysErr\n\t}\n\n\tif err := fsys.Access(\"send\", plan9.AWRITE); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif fid, err := fsys.Open(name, mode); err != nil {\n\t\treturn nil, fmt.Errorf(\"open %s: %s\", name, err)\n\t} else {\n\t\treturn (*Port)(fid), nil\n\t}\n}", "func Open() {\n\tsystem = Connect()\n}", "func (d *device) Open() { d.os.openShell() }", "func Open(portName string, mode *Mode) (Port, error) {\n\treturn openPort(portName, mode)\n}", "func (p *port) Open(c *Config) (err error) {\n\ttermios, err := newTermios(c)\n\tif err != nil {\n\t\treturn\n\t}\n\t// See man termios(3).\n\t// O_NOCTTY: no controlling terminal.\n\t// O_NDELAY: no data carrier detect.\n\tp.file, err = os.OpenFile(c.Address, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NDELAY, os.FileMode(0666))\n\tif err != nil {\n\t\treturn\n\t}\n\t// Backup current termios to restore on closing.\n\tp.backupTermios()\n\tif err = p.setTermios(termios); err != nil {\n\t\tp.file.Close()\n\t\tp.file = nil\n\t\tp.oldTermios = nil\n\t\treturn\n\t}\n\tp.timeout = c.Timeout\n\treturn\n}", "func Open(c *Config) (Port, error) {\n\treturn open(c)\n}", "func Open(filename string, inictl string) (port Port, err error) {\n\tvar file *os.File\n\n\t// NONBLOCK prevents Open from blocking\n\t// until DCD is asserted from modem\n\tif file, err = os.OpenFile(filename, os.O_RDWR|unix.O_NOCTTY|unix.O_NONBLOCK, 0); err != nil {\n\t\treturn\n\t}\n\td := new(dev)\n\td.File = file\n\td.name = filename\n\td.encaps = d\n\n\tfd := d.fd()\n\tt, err := unix.IoctlGetTermios(fd, unix.TCGETS)\n\tif err != nil {\n\t\terr = d.error(\"get term attr\", err)\n\t\tgoto fail\n\t}\n\terr = plainIoctl(fd, unix.TIOCEXCL)\n\tif err != nil {\n\t\tgoto fail\n\t}\n\terr = setBlocking(file.Fd())\n\tif err != nil {\n\t\terr = d.error(\"set blocking\", err)\n\t\tgoto fail\n\t}\n\n\td.tOrig = *t\n\td.tsav = *t\n\td.t = t\n\n\tt.Cflag |= unix.CLOCAL\n\tt.Lflag &^= unix.ICANON | unix.ECHO | unix.ISIG | unix.IEXTEN\n\tt.Iflag &^= unix.BRKINT | unix.ICRNL | unix.INPCK | unix.ISTRIP | unix.IXON\n\tt.Iflag |= unix.IGNPAR\n\tt.Oflag &^= unix.OPOST\n\n\t// block until at least one byte has been read\n\tt.Cc[unix.VMIN] = 1\n\tt.Cc[unix.VTIME] = 0\n\n\tif err = d.Ctl(inictl); err != nil {\n\t\tgoto fail\n\t}\n\tif d.rdpoll, err = epoll.NewPollster(); err != nil {\n\t\treturn\n\t}\n\tif d.rdpoll.AddFD(fd, 'r', true); err != nil {\n\t\treturn\n\t}\n\n\tport = d\n\treturn\n\nfail:\n\tfile.Close()\n\treturn\n}", "func Start(port int, ch chan *netcomm.OBJMSGARGS) {\n\n\tif port > 0 {\n\t\tport := \":\" + strconv.Itoa(port)\n\n\t\tgo open(port)\n\t}\n}", "func (this *Roomba) Open(baud uint) error {\n\tif baud != 115200 && baud != 19200 {\n\t\treturn errors.New(fmt.Sprintf(\"invalid baud rate: %d. Must be one of 115200, 19200\", baud))\n\t}\n\n\tc := &serial.Config{\n\t\tName: this.PortName,\n\t\tBaud: int(baud),\n\t\tReadTimeout: time.Second * 3,\n\t}\n\n\tport, err := serial.OpenPort(c)\n\n\tif err != nil {\n\t\tlog.Printf(\"failed to open serial port: %s\", this.PortName)\n\t\treturn err\n\t}\n\tthis.S = port\n\tlog.Printf(\"opened serial port: %s\", this.PortName)\n\treturn nil\n}", "func main() {\n\tdrv, err := driver.New()\n\tu.CheckErr(err)\n\n\t// make sure to close all open ports at the end\n\tdefer drv.Close()\n\n\tins, err := drv.Ins()\n\tu.CheckErr(err)\n\n\touts, err := drv.Outs()\n\tu.CheckErr(err)\n\n\tif len(ins) == 0 || len(outs) == 0 {\n\t\tpanic(\"no such midi device, on mac, you can use Audio Midi Setup > Midi Studio > Enable IAC Driver\")\n\t}\n\n\tlog.Println(ins)\n\tlog.Println(outs)\n\n\tin, out := ins[0], outs[0]\n\n\tu.CheckErr(in.Open())\n\tu.CheckErr(out.Open())\n\n\twr := writer.New(out)\n\n\t// listen for MIDI\n\t// go mid.NewReader().ReadFrom(in)\n\n\tserver, err := socketio.NewServer(nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tserver.OnConnect(\"/\", func(s socketio.Conn) error {\n\t\ts.SetContext(\"\")\n\t\tlog.Println(\"connected:\", s.ID())\n\n\t\tif err := writer.NoteOn(wr, 61, 100); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t\tif err := writer.NoteOff(wr, 61); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\ttype drawingMsg struct {\n\t\tColor string\n\t\tX0 float64\n\t\tX1 float64\n\t\tY0 float64\n\t\tY1 float64\n\t}\n\tserver.OnEvent(\"/\", \"drawing\", func(s socketio.Conn, msg drawingMsg) {\n\t\tif err := func() error {\n\t\t\t//s.Emit(\"reply\", \"have \"+msg)\n\t\t\tnote := uint8(msg.X1 * 127)\n\t\t\tif note < 20 {\n\t\t\t\tnote = 20\n\t\t\t}\n\t\t\tvelocity := uint8(msg.Y1 * 127)\n\t\t\tif velocity < 20 {\n\t\t\t\tvelocity = 20\n\t\t\t}\n\t\t\tlog.Println(\"note:\", note, \"velocity:\", velocity, \"input:\", msg)\n\t\t\tif err := writer.NoteOn(wr, note, velocity); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(time.Nanosecond * 1000000)\n\t\t\tif err := writer.NoteOff(wr, note); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\tlog.Printf(\"error on drawing event: %v\", err)\n\t\t}\n\t})\n\tserver.OnError(\"/\", func(e error) {\n\t\tlog.Println(\"error:\", e)\n\n\t\tif err := func() error {\n\t\t\tif err := writer.NoteOn(wr, 62, 100); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t\tif err := writer.NoteOff(wr, 62); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\tlog.Printf(\"socket.io error event: %v\", err)\n\t\t}\n\t})\n\tserver.OnDisconnect(\"/\", func(s socketio.Conn, msg string) {\n\t\tif err := func() error {\n\t\t\tlog.Println(\"closed\", msg)\n\n\t\t\tif err := writer.NoteOn(wr, 63, 100); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t\tif err := writer.NoteOff(wr, 63); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\tlog.Printf(\"error on disconnect event: %v\", err)\n\t\t}\n\t})\n\tgo func() {\n\t\tif err := server.Serve(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tdefer server.Close()\n\n\thttp.Handle(\"/socket.io/\", server)\n\thttp.Handle(\"/\", http.FileServer(http.Dir(\"./asset\")))\n\tlog.Println(\"Serving at localhost:8000...\")\n\tlog.Fatal(http.ListenAndServe(\":8000\", nil))\n}", "func Open(addr string) (*bufio.ReadWriter, error) {\r\n\t// Dial the remote process.\r\n\t// Note that the local port is chosen on the fly. If the local port\r\n\t// must be a specific one, use DialTCP() instead.\r\n\tlog.Println(\"Dial \" + addr)\r\n\tconn, err := net.Dial(\"tcp\", addr)\r\n\tif err != nil {\r\n\t\treturn nil, errors.Wrap(err, \"Dialing \"+addr+\" failed\")\r\n\t}\r\n\treturn bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil\r\n}", "func (stream *MAMWriteStream) Open() (trinary.Trytes, error) {\n\tchannelID, err := stream.m.ChannelCreate(5)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstream.currentChannelID = channelID\n\treturn channelID, nil\n}", "func OpenPort(cfg *Config) (Port, error) {\n\treturn openPort(cfg)\n}", "func (c *DUTControlRawUARTPortOpener) OpenPort(ctx context.Context) (serial.Port, error) {\n\tstream, err := c.Client.Console(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, write, err := openDUTControlConsole(stream,\n\t\t&dutcontrol.ConsoleRequest{\n\t\t\tOperation: &dutcontrol.ConsoleRequest_Open{\n\t\t\t\tOpen: &dutcontrol.ConsoleOpen{\n\t\t\t\t\tType: &dutcontrol.ConsoleOpen_RawUart{RawUart: &dutcontrol.ConsoleOpenRawUART{Uart: c.Uart, Baud: int32(c.Baud), DataLen: int32(c.DataLen)}},\n\t\t\t\t},\n\t\t\t}})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &DUTControlPort{stream, data, write, c.ReadTimeout, nil}, nil\n}", "func (config Hardware) Open(logger gopi.Logger) (gopi.Driver, error) {\n\tlogger.Debug(\"sys.darwin.Hardware.Open{}\")\n\n\tthis := new(hardware)\n\tthis.log = logger\n\n\t// Success\n\treturn this, nil\n}", "func (phone *Ph0n3) Open() *Ph0n3 {\n\tif phone.isOpen {\n\t\treturn phone\n\t}\n\tphone.lastEventTime = time.Now()\n\tphone.isOpen = true\n\n\tif phone.opt.DialToneDuration > 0 {\n\t\twg := new(sync.WaitGroup)\n\t\twg.Add(2)\n\t\tgo phone.play(480, time.Second*2, wg)\n\t\tgo phone.play(620, time.Second*2, wg)\n\t\twg.Wait()\n\t\ttime.Sleep(time.Second / 4)\n\t}\n\n\tgo func() {\n\t\t// Waiting for no events during 3s to do the call\n\t\tfor time.Since(phone.lastEventTime) < (3 * time.Second) {\n\t\t\ttime.Sleep(time.Second / 2)\n\t\t}\n\n\t\tfmt.Print(\"\\n\")\n\t\tphone.dialing()\n\t}()\n\treturn phone\n}", "func (conn *Conn) Open(port int) (stream net.Conn, err error) {\n\tstream, err = conn.sess.OpenStream()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to open smux stream\")\n\t}\n\n\tvar portData [8]byte\n\tbinary.BigEndian.PutUint64(portData[:], uint64(port))\n\n\t_, err = stream.Write(portData[:])\n\tif err != nil {\n\t\tstream.Close()\n\t\treturn nil, errors.Wrapf(err, \"failed to write port to smux stream\")\n\t}\n\n\tlog.WithField(\"peer\", conn.peerPublicKey).\n\t\tWithField(\"port\", port).\n\t\tInfo(\"opened connection\")\n\n\treturn stream, nil\n}", "func (p *Device) Open() error {\n\tif Verbose {\n\t\tfmt.Fprintln(p.w, \"Start a process:\", p.Exec, \"with needed lib\", p.Lib, \"and args:\")\n\t\tfor i, a := range p.args {\n\t\t\tfmt.Fprintln(p.w, i, a)\n\t\t}\n\t}\n\tp.cmd = exec.Command(p.Exec, p.args...)\n\n\tif Verbose {\n\t\tfmt.Println(p.cmd)\n\t\tp.cmd.Stdout = os.Stdout\n\t\tp.cmd.Stderr = os.Stderr\n\t}\n\tp.Err = p.cmd.Start()\n\tp.errorFatal()\n\n\tgo func() {\n\t\te := p.cmd.Wait()\n\t\tif e != nil {\n\t\t\tfmt.Println(e)\n\t\t}\n\t}()\n\n\t// todo: check if file exists in a loop for more speed\n\ttime.Sleep(1000 * time.Millisecond) // to be sure, log fie is created\n\tp.tempLogFileHandle, p.Err = p.fSys.Open(p.tempLogFileName) // Open() opens a file with read only flag.\n\tp.errorFatal()\n\n\t// p.watchLogfile() // todo: make it working well\n\tif Verbose {\n\t\tfmt.Fprintln(p.w, \"trice is watching and reading from\", p.tempLogFileName)\n\t}\n\treturn nil\n}", "func (m *DataChannelManager) Open(ctx context.Context, port exec.Port) (*DataChannel, error) {\n\tif port.URL == \"\" {\n\t\tpanic(\"empty port\")\n\t}\n\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif m.ports == nil {\n\t\tm.ports = make(map[string]*DataChannel)\n\t}\n\tif con, ok := m.ports[port.URL]; ok {\n\t\treturn con, nil\n\t}\n\n\tch, err := newDataChannel(ctx, port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tch.forceRecreate = func(id string, err error) {\n\t\tlog.Warnf(ctx, \"forcing DataChannel[%v] reconnection on port %v due to %v\", id, port, err)\n\t\tm.mu.Lock()\n\t\tdelete(m.ports, port.URL)\n\t\tm.mu.Unlock()\n\t}\n\tm.ports[port.URL] = ch\n\treturn ch, nil\n}", "func (client *Client) Open() {\n\tconnectionString := fmt.Sprintf(\"%s:%d\", client.host, client.port)\n\tconn, err := net.Dial(\"udp\", connectionString)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tclient.conn = conn\n}", "func Open(portName string) (*SerialPort, error) {\n\tconf := &serial.Config{\n\t\tName: portName,\n\t\tBaud: 1200,\n\t\tParity: serial.ParityOdd,\n\t\tStopBits: serial.Stop1,\n\t\tReadTimeout: 100 * time.Millisecond,\n\t\tRTSFlowControl: true,\n\t}\n\n\ts, err := serial.OpenPort(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tport := SerialPort{serial: s}\n\treturn &port, nil\n}", "func openPorts() {\n\tinPort, err = utils.CreateInputPort(\"bonjour/discover.options\", *inputEndpoint, nil)\n\tutils.AssertError(err)\n}", "func openInternal(options OpenOptions) (*Port, error) {\n\t// Open the file with RDWR, NOCTTY, NONBLOCK flags\n\t// RDWR : read/write\n\t// NOCTTY : don't allow the port to become the controlling terminal\n\t// NONBLOCK : open with nonblocking so we don't stall\n\tfile, openErr :=\n\t\tos.OpenFile(\n\t\t\toptions.PortName,\n\t\t\tunix.O_RDWR|unix.O_NOCTTY|unix.O_NONBLOCK,\n\t\t\t0777)\n\tif openErr != nil {\n\t\treturn nil, openErr\n\t}\n\n\tfd := file.Fd()\n\n\t// When we call Fd(), we make the file descriptor blocking, which we don't want\n\t// Let's unset the blocking flag and save the pointer for later.\n\tnonblockErr := unix.SetNonblock(int(fd), true)\n\tif nonblockErr != nil {\n\t\treturn nil, nonblockErr\n\t}\n\n\tt2, optErr := makeTermios2(options)\n\tif optErr != nil {\n\t\treturn nil, optErr\n\t}\n\n\t// Set our termios2 struct as the file descriptor's settings\n\terrno := ioctl(unix.TCSETS2, fd, uintptr(unsafe.Pointer(t2)))\n\tif errno != nil {\n\t\treturn nil, errno\n\t}\n\n\treturn NewPort(file, fd, options), nil\n}", "func SerialPortOpen(portName string) error {\n\toptions := serial.OpenOptions{\n\t\tPortName: portName,\n\t\tBaudRate: 115200,\n\t\tDataBits: 8,\n\t\tStopBits: 1,\n\t\tMinimumReadSize: 4,\n\t}\n\n\tfmt.Println(\"opening serial port \" + portName)\n\n\tvar err error\n\tport, err = serial.Open(options)\n\treturn err\n}", "func (o *OMXPlayer) Open(waiting chan string) error {\n\to.status = statusStarting\n\n\t// Close OMXPlayer if it's already running\n\tif err := o.Close(); err != nil {\n\t\t// TODO wrap error here with the new errors thing\n\t\treturn err\n\t}\n\n\tif err := o.cmd.Start(); err != nil {\n\t\to.status = statusError\n\t\treturn err\n\t}\n\n\to.status = statusPlaying\n\to.playbackRate = 1\n\n\t// Listen for when OMXPlayer ends in a new goroutine\n\tgo o.wait(waiting)\n\n\treturn nil\n}", "func OpenPort(c *Config) (io.ReadWriteCloser, error) {\n\n\tconfig = c\n\n\tvar err error\n\n\tc2 := &serial.Config{Name: config.Port, Baud: 9600}\n\ts, err = serial.OpenPort(c2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = initInverter()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error initializing inverter, %s\", err)\n\t}\n\n\treturn s, nil\n}", "func (i *in) Close() (err error) {\n\tif !i.IsOpen() {\n\t\treturn nil\n\t}\n\n\t//\ti.StopListening()\n\t//\ti.Lock()\n\terr = i.midiIn.Close()\n\ti.midiIn = nil\n\t//\ti.Unlock()\n\treturn\n}", "func Open(eventBufferSize uint64) (*Driver, error) {\n\tdevices := hid.Enumerate(vendorID, productID)\n\tif len(devices) == 0 {\n\t\treturn nil, errors.New(\"GM1356 device not found\")\n\t}\n\tdevice, err := devices[0].Open()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\teventBuffer := make(chan Event, eventBufferSize)\n\tlogFunc := func(a ...interface{}) {\n\t\tfmt.Println(a...)\n\t}\n\tdriver := &Driver{\n\t\tdevice: device,\n\t\teventBuffer: eventBuffer,\n\t\timporter: newImporter(eventBuffer),\n\t\tlog: logFunc,\n\t}\n\tdriver.ctx, driver.cancel = context.WithCancel(context.Background())\n\tgo driver.handleInput()\n\treturn driver, nil\n}", "func OpenSocket() error {\n\tsock, err := net.DialTimeout(\"unix\", GetIpcPath()+\"/discord-ipc-0\", time.Second*2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsocket = sock\n\treturn nil\n}", "func open(path string) {\n\tif err := exec.Command(\"/usr/bin/open\", path).Run(); err != nil {\n\t\tfmt.Printf(\"report.Open err: %x\\n\", err.Error())\n\t}\n}", "func (d *Discord) Open() (<-chan Message, error) {\n\tshards := d.Shards\n\tif shards < 1 {\n\t\tshards = 1\n\t}\n\n\td.Sessions = make([]*discordgo.Session, shards)\n\n\tfor i := 0; i < shards; i++ {\n\t\tsession, err := discordgo.New(d.args...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsession.State.TrackPresences = false\n\t\tsession.ShardCount = shards\n\t\tsession.ShardID = i\n\t\tsession.AddHandler(d.onMessageCreate)\n\t\tsession.AddHandler(d.onMessageUpdate)\n\t\tsession.AddHandler(d.onMessageDelete)\n\n\t\td.Sessions[i] = session\n\t}\n\n\td.Session = d.Sessions[0]\n\n\tfor i := 0; i < len(d.Sessions); i++ {\n\t\td.Sessions[i].Open()\n\t}\n\n\treturn d.messageChan, nil\n}", "func OpenMEI(meiPath string, guid ClientGUID) (*MEI, error) {\n\tvar m MEI\n\tfd, err := syscall.Open(meiPath, os.O_RDWR, 0o755)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata := [16]byte(guid)\n\tif _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), IoctlMEIConnectClient, uintptr(unsafe.Pointer(&data))); err != 0 {\n\t\treturn nil, fmt.Errorf(\"ioctl IOCTL_MEI_CONNECT_CLIENT failed: %v\", err)\n\t}\n\t// can be racy, unless protected by a mutex\n\tm.fd = &fd\n\tcopy(m.ClientProperties[:], data[:])\n\treturn &m, nil\n}", "func (lp *LP) Open() (err error) {\n\tgojs.CatchException(&err)\n\tlp.Component().Call(\"open\")\n\treturn err\n}", "func (n *Node) openProxy(in *NodeComm, out *NodeComm) {\n\tif (in != nil) && (out != nil) {\n\t\tgo func() {\n\t\t\tdefer func() { ChanPrintln <- fmt.Sprint(n.name, \": \", \"comm closed\\nDir: \", out.dir, \"\\n\"); }()\n\t\t\tfor !closed(in.in) && !closed(out.out) {\n\t\t\t\tselect {\n\t\t\t\t\tcase msg := <-in.in:\n\t\t\t\t\t\tif msg == nil {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\toutMsg, msgStr := n.process(msg)\n\t\t\t\t\t\t\tChanPrintln <- fmt.Sprint(n.name, \": \", msgStr)\n\t\t\t\t\t\t\tout.out <- outMsg\n\t\t\t\t\t\t}\n\t\t\t\t\tcase shutDown := <-n.shutDown:\n\t\t\t\t\t\tn.shutDown <- shutDown\n\t\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}", "func Open() (*Watch, error) {\n\tif watch != nil {\n\t\treturn watch, nil\n\t}\n\n\t// Configure the screen.\n\tmachine.OLED_LED_POW.Configure(machine.PinConfig{Mode: machine.PinOutput})\n\tmachine.OLED_LED_POW.Low()\n\tspi := machine.SPI0\n\tspi.Configure(machine.SPIConfig{\n\t\tMOSI: machine.OLED_MOSI,\n\t\tMISO: machine.NoPin,\n\t\tSCK: machine.OLED_SCK,\n\t\tFrequency: 8000000,\n\t})\n\tconst (\n\t\tresetPin = machine.OLED_RES\n\t\tdcPin = machine.OLED_DC\n\t\tcsPin = machine.OLED_CS\n\t)\n\tdisplay := ssd1331.New(spi, resetPin, dcPin, csPin)\n\tdisplay.Configure(ssd1331.Config{})\n\tdisplay.SetContrast(0x30, 0x20, 0x30)\n\n\twatch = &Watch{\n\t\tDevice: &display,\n\t}\n\n\treturn watch, nil\n}", "func open(uri string) error {\n\trun, ok := commands[runtime.GOOS]\n\tif !ok {\n\t\treturn fmt.Errorf(\"don't know how to open things on %s platform\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(run, uri)\n\treturn cmd.Start()\n}", "func (clk *Clock) Start() {\n\tclk.midiOut <- midiStart\n}", "func (c *Client) Open() error {\n\t// TODO: auto-select transport based on BMC capabilities\n\treturn c.open()\n}", "func Open(ctx context.Context, name string) (control.DeviceHandle, error) {\n\treturn open(ctx, name)\n}", "func (m *ShowMeasurementsMapper) Open() error {\n\tif m.remote != nil {\n\t\treturn m.remote.Open()\n\t}\n\n\tvar measurements Measurements\n\n\tif m.shard != nil {\n\t\t// If a WHERE clause was specified, filter the measurements.\n\t\tif m.stmt.Condition != nil {\n\t\t\tvar err error\n\t\t\tmeasurements, err = m.shard.index.measurementsByExpr(m.stmt.Condition)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t// Otherwise, get all measurements from the database.\n\t\t\tmeasurements = m.shard.index.Measurements()\n\t\t}\n\t\tsort.Sort(measurements)\n\t}\n\n\t// Create a channel to send measurement names on.\n\tch := make(chan string)\n\t// Start a goroutine to send the names over the channel as needed.\n\tgo func() {\n\t\tfor _, mm := range measurements {\n\t\t\tch <- mm.Name\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\t// Store the channel as the state of the mapper.\n\tm.state = ch\n\n\treturn nil\n}", "func Interface(ctx context.Context, i, ch uint8) (gate, key, vel control.CV, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"midi.Interface: %v\", err)\n\t\t}\n\t}()\n\n\tdrv, err := portmididrv.New()\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tins, err := drv.Ins()\n\tif len(ins) <= int(i) {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tin := ins[i]\n\tif err := in.Open(); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\trd := reader.New(reader.NoLogger())\n\n\tgateCh := make(chan modular.V)\n\tkeyCh := make(chan modular.V)\n\tvelCh := make(chan modular.V)\n\n\trd.Channel.NoteOn = func(p *reader.Position, channel, key, vel uint8) {\n\t\tif channel != ch {\n\t\t\treturn\n\t\t}\n\t\tgateCh <- 1\n\t\tkeyCh <- modular.V(key)\n\t\tvelCh <- modular.V(vel) / 127\n\t}\n\n\trd.Channel.NoteOff = func(p *reader.Position, channel, key, vel uint8) {\n\t\tif channel != ch {\n\t\t\treturn\n\t\t}\n\t\tgateCh <- 0\n\t\tvelCh <- 0\n\t}\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(gateCh)\n\t\t\tclose(keyCh)\n\t\t\tclose(velCh)\n\t\t}()\n\t\tif err := rd.ListenTo(in); err != nil {\n\t\t\tpanic(fmt.Errorf(\"midi.Interface: %v\", err))\n\t\t}\n\t}()\n\n\treturn gateCh, keyCh, velCh, nil\n}", "func (ml *ManagedListener) Open() {\n\tif ml != nil {\n\t\tdefer trace.Tracer.ScopedTrace()()\n\t\tgo ml.Listening()\n\t\tgo ml.PipeMapHandler()\n\t\tml.SetExternalIP()\n\t}\n}", "func (s *Session) Open() error {\n\tvar err error\n\ts.session, err = mgo.Dial(\"127.0.0.1:27017\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.session.SetMode(mgo.Monotonic, true)\n\treturn nil\n}", "func (m *Mediator)Start(){\n\tserver := domainserver.UnixSocketServer{\"/tmp/test\", m.messageHandler}\n\tserver.OpenSocket()\n}", "func (t *Transport) Open() error {\n\treturn errors.New(\"TODO\")\n}", "func (s *Store) Open() error {\n\t// Open a TCP port.\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listen: %s\", err)\n\t}\n\ts.Addr = ln.Addr()\n\ts.Listener = ln\n\n\t// Wrap listener in a muxer.\n\tmux := tcp.NewMux()\n\ts.RaftListener = mux.Listen(meta.MuxRaftHeader)\n\ts.ExecListener = mux.Listen(meta.MuxExecHeader)\n\tgo mux.Serve(ln)\n\n\t// Open store.\n\tif err := s.Store.Open(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (file *Remote) Open(p string, mode uint8) (*Remote, error) {\n\tnext, err := file.walk(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsp, err := file.client.Send(&Topen{\n\t\tFID: next.fid,\n\t\tMode: mode,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topen := rsp.(*Ropen)\n\n\tnext.qid = open.QID\n\n\treturn next, nil\n}", "func (s *Server) Open() error {\n\n ln, err := net.Listen(\"tcp\", s.Addr)\n if err != nil {\n return err\n }\n s.ln = ln\n\n //start a go routine that starts an http server\n go func() {http.Serve(s.ln, s.Handler)} ()\n\n return nil\n}", "func (d *Device) Open(ctx context.Context) (chan Event, error) {\n\tif d.Data.name == [128]byte{} || len(d.Data.descriptor) == 0 {\n\t\treturn nil, errors.New(\"device has not been initialized\")\n\t}\n\n\tvar err error\n\tif d.file, err = os.OpenFile(\"/dev/uhid\", os.O_RDWR, 0644); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed opening /dev/uhid file: %w\", err)\n\t}\n\n\t// Check if uniq is empty.\n\tif d.Data.uniq == [64]byte{} {\n\t\t_, err := rand.Read(d.Data.uniq[:])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tif err = d.WriteEvent(d.Data.createRequest()); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed writing uhid create request: %w\", err)\n\t}\n\n\tgo d.dispatch(ctx)\n\tevt := <-d.eventChan\n\tif evt.Err != nil {\n\t\treturn nil, fmt.Errorf(\"kernel failed at creating the device: %w\", evt.Err)\n\t}\n\treturn d.eventChan, nil\n}", "func (s *RegularStateMachine) Open(stopc <-chan struct{}) (uint64, error) {\n\tpanic(\"Open() called on RegularStateMachine\")\n}", "func (c *DirectClient) Open(correlationId string) error {\n\tif c.Opened {\n\t\treturn nil\n\t}\n\n\tif c.Controller == nil {\n\t\terr := cerr.NewConnectionError(correlationId, \"NO_CONTROLLER\", \"Controller reference is missing\")\n\t\treturn err\n\t}\n\n\tc.Opened = true\n\n\tc.Logger.Info(correlationId, \"Opened direct client\")\n\treturn nil\n}", "func (config Hardware) Open(logger gopi.Logger) (gopi.Driver, error) {\n\tlogger.Debug(\"sys.mock.Hardware.Open{ }\")\n\n\tthis := new(hardware)\n\tthis.log = logger\n\n\t// Success\n\treturn this, nil\n}", "func open(port string) (jdh.DB, error) {\n\tdb := &DB{\n\t\tisClosed: false,\n\t\trequest: make(chan string),\n\t\tanswer: make(chan interface{}),\n\t}\n\tgo db.req()\n\treturn db, nil\n}", "func (i *in) IsOpen() (open bool) {\n\t//\ti.RLock()\n\topen = i.midiIn != nil\n\t//i.RUnlock()\n\treturn\n}", "func getMIDIPort(ctx context.Context) (string, error) {\n\tconst emptyPort = \"\"\n\tout, err := testexec.CommandContext(ctx, \"/usr/bin/arecordmidi\", \"-l\").Output()\n\tif err != nil {\n\t\treturn emptyPort, errors.Wrap(err, \"couldn't start arecordmidi\")\n\t}\n\n\tconst MIDIClientName = \"Midi Through\"\n\t// The output of arecordmidi is assumed to be of the following format:\n\t//\n\t// Port Client name Port name\n\t// 14:0 Midi Through Midi Through Port-0\n\t//\n\t// So, we parse the output string and search for the port associated\n\t// with \"Midi Through\" assuming the above.\n\tre := regexp.MustCompile(`(\\d+:\\d+)\\s{2,}(.+)\\s{2,}`)\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tfields := re.FindStringSubmatch(line)\n\t\tif fields == nil {\n\t\t\tcontinue\n\t\t}\n\t\tclient := strings.TrimSpace(fields[2])\n\t\tif client == MIDIClientName {\n\t\t\t// Return the port.\n\t\t\treturn strings.TrimSpace(fields[1]), nil\n\t\t}\n\t}\n\treturn emptyPort, errors.Errorf(\"%q client not found\", MIDIClientName)\n}", "func (c *Connection) Open(conn net.Conn) (err error) {\n\tc.handler = newHandler(c)\n\tc.pump, err = event.NewPump(conn,\n\t\tevent.NewMessagingDelegator(c.handler),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.Server {\n\t\tc.pump.Server()\n\t}\n\tgo c.pump.Run()\n\treturn nil\n}", "func (d *Discord) Open() (<-chan Message, error) {\n\tgateway, err := discordgo.New(d.args[0].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts, err := gateway.GatewayBot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.Sessions = make([]*discordgo.Session, s.Shards)\n\n\tlog.Printf(\"%s opening with %d shards\\n\", d.Name(), s.Shards)\n\twg := sync.WaitGroup{}\n\tfor i := 0; i < s.Shards; i++ {\n\t\tlog.Printf(\"%s opening shard %d\\n\", d.Name(), i+1)\n\t\tsession, err := discordgo.New(d.args[0].(string))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif d.Session == nil {\n\t\t\td.Session = session\n\t\t}\n\t\td.Sessions[i] = session\n\t\tsession.ShardCount = s.Shards\n\t\tsession.ShardID = i\n\t\tsession.State.TrackPresences = false\n\t\twg.Add(1)\n\t\tgo func(session *discordgo.Session) {\n\t\t\tdefer wg.Done()\n\t\t\terr := session.Open()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error opening shard %s\", err)\n\t\t\t}\n\t\t}(d.Sessions[i])\n\t}\n\twg.Wait()\n\n\tfor _, session := range d.Sessions {\n\t\tsession.AddHandler(d.onMessageCreate)\n\t\tsession.AddHandler(d.onMessageUpdate)\n\t\tsession.AddHandler(d.onMessageDelete)\n\t}\n\n\treturn d.messageChan, nil\n}", "func (s *BufferSink) Open() error {\n\ts.open = true\n\treturn nil\n}", "func (config Service) Open(log gopi.Logger) (gopi.Driver, error) {\n\tlog.Debug(\"<grpc.service.mihome>Open{ server=%v mihome=%v }\", config.Server, config.MiHome)\n\n\tthis := new(service)\n\tthis.log = log\n\tthis.mihome = config.MiHome\n\tthis.pubsub = nil\n\n\t// Register service with server\n\tconfig.Server.Register(this)\n\n\t// Reset the radio\n\tif err := this.mihome.ResetRadio(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Start goroutine for capturing events from mihome\n\tthis.startCapture()\n\n\t// Success\n\treturn this, nil\n}", "func (r *Root) OpenImpl(opts *MountOpts) *Session {\n\n\t//gspt.SetProcTitle(\"sky-anchor\")\n\n\t// Return absolute URI to the created session\n\tif r.Sessions == nil {\n\t\t// TODO: this should be made already\n\t\tr.Sessions = inmem.NewObscuredFolder(\"sessions\")\n\t}\n\tsessionId := extras.GenerateId()\n\tsessionPath := fmt.Sprintf(\":9234/pub/sessions/%s\", sessionId)\n\tsessionUri, _ := toolbox.SelfURI(sessionPath)\n\n\tsession := &Session{\n\t\tfsPrefix: filepath.Clean(opts.FsRootPath),\n\t\tURI: sessionUri,\n\t}\n\tsession.FsRoot = session.getRoot()\n\tlog.Printf(\"built session %+v\", session)\n\n\tif ok := r.Sessions.Put(sessionId, session); !ok {\n\t\tlog.Println(\"Session store rejected us :(\")\n\t\treturn nil\n\t}\n\treturn session\n}", "func Open(location string) error {\n\treturn New(\"open\").WithArgs(location).Run()\n}", "func (ch *Channel) Open() {\n ch.Clients = new(sync.Map)\n}", "func open(port string) (jdh.DB, error) {\n\tif len(port) == 0 {\n\t\tport = Port\n\t}\n\tif i := strings.Index(port, \":\"); i == 0 {\n\t\tport = \"localhost\" + port\n\t} else if i < 0 {\n\t\tport = \"localhost:\" + port\n\t}\n\treturn &DB{port}, nil\n}", "func OpenServer(conn *net.UDPConn, processNodePtr *nd.Node) {\n\tfor {\n\t\t_, portLog := ListenOnPort(conn, processNodePtr)\n\n\t\tif len(portLog) > 0 {\n\t\t\t(*processNodePtr).Logger.Println(portLog)\n\t\t}\n\t}\n}", "func (jobs *Jobs) Open() {\n\tjobs.ch = channels.NewInfiniteChannel()\n}", "func (mb *serialPort) connect() error {\n\tif mb.port == nil {\n\t\tport, err := serial.OpenPort(&mb.Config)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmb.port = port\n\t}\n\treturn nil\n}", "func (client *StatsdClient) Open() {\n\tconnectionString := fmt.Sprintf(\"%s:%d\", client.Host, client.Port)\n\tconn, err := net.Dial(\"udp\", connectionString)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tclient.conn = conn\n}", "func (f *fakePortOpener) OpenLocalPort(lp *util.LocalPort) (util.Closeable, error) {\n\tf.openPorts = append(f.openPorts, lp)\n\treturn nil, nil\n}", "func (t *Tunnel) open() error {\n\tif t.nc == nil {\n\t\treturn nats.ErrInvalidConnection\n\t}\n\tt.mon.timer = globalTimerPool.Get(t.mon.readTimeout)\n\tt.inbox = nats.NewInbox()\n\t// TODO: we could use this directly below in subscribe instead of wildcard, but will consider later.\n\tt.reply = fmt.Sprintf(\"%s.%s\", t.inbox, t.randSuffix.Get())\n\tt.mux = sync.Mutex{}\n\n\tsub, err := t.nc.QueueSubscribe(fmt.Sprintf(\"%s.*\", t.inbox), emptyQ, t.recv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt.sub = sub\n\treturn nil\n}", "func (info DeviceInfo) Open() (*Device, error) {\n\tpath := C.CString(info.Path)\n\tdefer C.free(unsafe.Pointer(path))\n\n\tdevice := C.hid_open_path(path)\n\tif device == nil {\n\t\treturn nil, fmt.Errorf(\"hidapi: failed to open device: %s\", C.GoString(path))\n\t}\n\treturn &Device{\n\t\tDeviceInfo: info,\n\t\tdevice: device,\n\t}, nil\n}", "func (c *ExampleClient) Open(info SessionInfo) (string, Error) {\n\tc.SessionInfo = info\n\t// Do what you need to do to open the session.\n\treturn \"Client has finished opening the session\", nil\n}", "func (s *replayService) OpenMultiCommandSession(config *SessionConfig) (MultiCommandSession, error) {\n\treturn NewReplayMultiCommandSession(s.shellPrompt, s.system, s.commands), nil\n}", "func (c *Client) Open(path string) error {\n\treturn c.Browser.Open(fmt.Sprintf(\"%s%s\", c.base, path))\n}", "func (controller *UIController) Open(ctx context.Context) error {\n\treturn nil\n}", "func Open(devnode string) (*InputDevice, error) {\n\tf, err := os.Open(devnode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdev := InputDevice{}\n\tdev.Fn = devnode\n\tdev.File = f\n\n\treturn &dev, nil\n}", "func NewMIDIIn(api API, name string, queueSize int) (MIDIIn, error) {\n\tp := C.CString(name)\n\tdefer C.free(unsafe.Pointer(p))\n\tin := C.rtmidi_in_create(C.enum_RtMidiApi(api), p, C.uint(queueSize))\n\tif !in.ok {\n\t\tdefer C.rtmidi_in_free(in)\n\t\treturn nil, errors.New(C.GoString(in.msg))\n\t}\n\treturn &midiIn{in: in, midi: midi{midi: C.RtMidiPtr(in)}}, nil\n}", "func OpenPort(proto string, port int) rules.Rule {\n\tswitch proto {\n\tcase \"tcp\":\n\tcase \"udp\":\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"invalid proto: %q\", proto))\n\t}\n\n\treturn rules.Rule(fmt.Sprintf(\n\t\t\"-I in-%s -j ACCEPT -p %s --dport %d\",\n\t\tproto, proto, port))\n}", "func NewMIDIOut(api API, name string) (MIDIOut, error) {\n\tp := C.CString(name)\n\tdefer C.free(unsafe.Pointer(p))\n\tout := C.rtmidi_out_create(C.enum_RtMidiApi(api), p)\n\tif !out.ok {\n\t\tdefer C.rtmidi_out_free(out)\n\t\treturn nil, errors.New(C.GoString(out.msg))\n\t}\n\treturn &midiOut{out: out, midi: midi{midi: C.RtMidiPtr(out)}}, nil\n}", "func (s *Server) Open() error {\n\ts.points, _ = influxDBClient.NewBatchPoints(s.BPConfig)\n\tif err := s.client.Open(); err != nil {\n\t\treturn fmt.Errorf(\"failed to open udpClient to read\", err)\n\t}\n\treturn nil\n}", "func (d *DuinoDispatcher) init() {\n\tc := &serial.Config{Name: \"COM7\", Baud: 9600}\n\ts, err := serial.OpenPort(c)\n\tif err != nil {\n\t\tfmt.Println(\"serial problem\")\n\t\tlog.Fatal(err)\n\t}\n\td.serialPort = s\n\tfmt.Println(\"serial established\")\n}", "func (mio *Mio) Open(id string, anySz ...uint64) error {\n if mio.obj != nil {\n return errors.New(\"object is already opened\")\n }\n err := mio.objNew(id)\n if err != nil {\n return err\n }\n\n C.m0_obj_init(mio.obj, &C.container.co_realm, &mio.objID, 1)\n rc := C.m0_open_entity(&mio.obj.ob_entity)\n if rc != 0 {\n mio.Close()\n return fmt.Errorf(\"failed to open object entity: %d\", rc)\n }\n\n for _, v := range anySz {\n mio.objSz = v\n }\n\n return mio.open(mio.objSz)\n}", "func (c *Client) Open(bufferSize int) error {\n\tvar (\n\t\taddr *net.UDPAddr\n\t\tok bool\n\t\terr error\n\t)\n\n\tif addr, ok = c.LocalAddr.(*net.UDPAddr); !ok {\n\t\treturn fmt.Errorf(\"Invalid local address: %v not a net.UDPAddr\", c.LocalAddr)\n\t}\n\n\t// prepare the socket to listen on for replies\n\tc.connection, err = net.ListenUDP(\"udp6\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.stopping = new(sync.WaitGroup)\n\tc.sendQueue = make(chan dhcpv6.DHCPv6, bufferSize)\n\tc.receiveQueue = make(chan dhcpv6.DHCPv6, bufferSize)\n\tc.packets = make(map[dhcpv6.TransactionID]*promise.Promise)\n\tc.packetsLock = sync.Mutex{}\n\tc.errors = make(chan error)\n\n\tvar ctx context.Context\n\tctx, c.cancel = context.WithCancel(context.Background())\n\tgo c.receiverLoop(ctx)\n\tgo c.senderLoop(ctx)\n\n\treturn nil\n}", "func Open() (*LaunchControl, error) {\n\tinput, output, err := discover()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := input.Open(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := output.Open(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlc := &LaunchControl{\n\t\tinputDriver: input,\n\t\toutputDriver: output,\n\t\terrorChan: make(chan error, 1),\n\t}\n\tfor ch := 0; ch < NumChannels; ch++ {\n\t\tfor cc := Control(0); cc < NumControls; cc++ {\n\t\t\tv0 := ValueUninitialized\n\t\t\tif cc.IsButton() {\n\t\t\t\tv0 = 0\n\t\t\t}\n\t\t\tlc.value[ch][cc] = v0\n\t\t}\n\t}\n\treturn lc, nil\n}", "func openPortCom5() {\n\t// ports, _ := serial.GetPortsList()\n\tvar err error\n\tactivePort, err = serial.Open(\n\t\t\"COM5\",\n\t\t&serial.Mode{\n\t\t\tBaudRate: 57600,\n\t\t\tParity: serial.NoParity,\n\t\t\tDataBits: 8,\n\t\t\tStopBits: serial.OneStopBit,\n\t\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"Connected to COM5\\n\")\n\n}", "func Open(addr string) (*bufio.ReadWriter, error) {\n\tlog.Println(\"Dial \" + addr)\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Dialing \"+addr+\" failed\")\n\t}\n\treturn bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil\n}", "func Open(moduleInterface string, port string, portConfig int) (context *Context, err error) {\n\tif moduleInterface == \"\" {\n\t\tmoduleInterface, _, _ = Defaults()\n\t}\n\n\tswitch moduleInterface {\n\tcase NotecardInterfaceSerial:\n\t\tcontext, err = OpenSerial(port, portConfig)\n\t\tcontext.isLocal = true\n\tcase NotecardInterfaceI2C:\n\t\tcontext, err = OpenI2C(port, portConfig)\n\t\tcontext.isLocal = true\n\tcase NotecardInterfaceRemote:\n\t\tcontext, err = OpenRemote(port, portConfig)\n\tcase NotecardInterfaceLease:\n\t\tcontext, err = OpenLease(port, portConfig)\n\tdefault:\n\t\terr = fmt.Errorf(\"unknown interface: %s\", moduleInterface)\n\t}\n\tif err != nil {\n\t\tcardReportError(nil, err)\n\t\terr = fmt.Errorf(\"error opening port: %s %s\", err, note.ErrCardIo)\n\t\treturn\n\t}\n\tcontext.iface = moduleInterface\n\treturn\n}", "func OpenPortOnInterface(proto string, port int, iface Link) rules.Rule {\n\tswitch proto {\n\tcase \"tcp\":\n\tcase \"udp\":\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"invalid proto: %q\", proto))\n\t}\n\n\treturn rules.Rule(fmt.Sprintf(\n\t\t\"-I in-%s -j ACCEPT -p %s --dport %d -i %s\",\n\t\tproto, proto, port, iface.Name()))\n}", "func (remote *SerialRemote) Connect() error {\n\tlog.Printf(\"serial:open uri=%v\\n\", remote.uri)\n\n\tport, err := serial.Open(remote.uri, &serial.Mode{})\n\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tmode := &serial.Mode{\n\t\tBaudRate: 19200,\n\t\tParity: serial.NoParity,\n\t\tDataBits: 8,\n\t\tStopBits: serial.OneStopBit,\n\t}\n\n\tif err := port.SetMode(mode); err != nil {\n\t\treturn err\n\t}\n\n\tremote.port = port\n\tremote.done = make(chan struct{}, 2)\n\tremote.channel = make(chan []byte, 256)\n\n\tgo remote.ioloop()\n\n\tif true == remote.flags.AutoConfigure {\n\t\t// configureGateway this, flags\n\t\tif err := ensureTinyMeshConfig(remote, remote.flags); nil != err {\n\t\t\treturn err\n\t\t}\n\t} else if true == remote.flags.Verify {\n\t\tif err := verifyTinyMeshConfig(remote, remote.flags); nil != err {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *LinuxOpener) Open(u string, windowType WindowType, autoRaise bool) error {\n\tif getEnv(\"DISPLAY\", o.Env) == \"\" {\n\t\treturn &RequireXError{}\n\t}\n\tcmd := exec.Command(\"/usr/bin/xdg-open\", u)\n\treturn cmd.Run()\n}", "func Open(config *Config) (*Tc, error) {\n\tvar tc Tc\n\n\tif config == nil {\n\t\tconfig = &Config{}\n\t}\n\n\tcon, err := netlink.Dial(unix.NETLINK_ROUTE, &netlink.Config{NetNS: config.NetNS})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttc.con = con\n\n\treturn &tc, nil\n}", "func (c *service) OpenMultiCommandSession(config *SessionConfig) (MultiCommandSession, error) {\n\treturn newMultiCommandSession(c, config, c.replayCommands, c.recordSession)\n}", "func (s *Session) Open() (str *Stream, err error) {\n\t// Wait for acks\n\tselect {\n\tcase s.opened <- struct{}{}:\n\tcase <-s.done:\n\t\treturn nil, ErrSessionClosed\n\t}\n\tsid := s.generateStreamID()\n\tif sid == 0 {\n\t\treturn nil, ErrMaxOpenStreams\n\t}\n\tstr = newStream(s, sid)\n\tif _, err = str.SendWindowUpdate(flagSYN, 0); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = s.putStream(str); err != nil {\n\t\treturn nil, ErrSessionClosed\n\t}\n\treturn str, nil\n}", "func OpenTun() (*Tun, error) {\n\treturn NewTun(\"\")\n}" ]
[ "0.7422173", "0.7149176", "0.6414711", "0.639298", "0.6342888", "0.6203139", "0.60942465", "0.60482734", "0.5800341", "0.579854", "0.57300484", "0.5700226", "0.56920344", "0.5592236", "0.55540836", "0.5538995", "0.5504408", "0.5409996", "0.540787", "0.5380848", "0.5365199", "0.53480047", "0.53460544", "0.5285845", "0.5231384", "0.5223633", "0.5212628", "0.52064633", "0.5180777", "0.51571465", "0.5155369", "0.51424646", "0.5139481", "0.51339895", "0.5114273", "0.508025", "0.5075949", "0.50730425", "0.50321513", "0.50064945", "0.50045997", "0.49947825", "0.49935603", "0.4992233", "0.497985", "0.49507588", "0.4946179", "0.49438602", "0.4937381", "0.4923595", "0.4921086", "0.49155426", "0.4911552", "0.49088854", "0.49060398", "0.49000794", "0.48974502", "0.4896468", "0.4896326", "0.48908347", "0.48805627", "0.48733827", "0.4873148", "0.4841627", "0.48395494", "0.4838335", "0.48319528", "0.48317227", "0.48300025", "0.48153642", "0.4814222", "0.48058394", "0.4792657", "0.47922498", "0.47858357", "0.47753003", "0.47725463", "0.47679642", "0.47589782", "0.47540814", "0.47491622", "0.47202027", "0.47112605", "0.4706984", "0.47044408", "0.47009432", "0.46984664", "0.46983993", "0.4683264", "0.46787965", "0.46762663", "0.46748996", "0.46552402", "0.465517", "0.4646858", "0.46423066", "0.4641849", "0.4640354", "0.46386558", "0.46324414" ]
0.47644037
78
GetCommand returns the browser key for the rule
func (r *Rule) GetCommand() (command string, args []string) { command = r.Command args = r.Args return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Router) GetCommand(name string) (c *Command) {\n\tfor _, cmd := range r.Commands {\n\t\tif strings.ToLower(cmd.Name) == strings.ToLower(name) {\n\t\t\treturn cmd\n\t\t}\n\t\tfor _, a := range cmd.Aliases {\n\t\t\tif strings.ToLower(a) == strings.ToLower(name) {\n\t\t\t\treturn cmd\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func getCommand( )*string {\n\n\tvar inputCommand string \t\t// define variable to store inputCommand\n\n\tfmt.Scanln(&inputCommand) \t\t// stores character from user\n\n\treturn &inputCommand \t\t\t// returns users character\n}", "func GetCmd(key string) string {\n\tresult := storage.StringDb[key]\n\treturn result\n}", "func (m *CreateTabRequest) GetCommand() string {\n\tif m != nil && m.Command != nil {\n\t\treturn *m.Command\n\t}\n\treturn \"\"\n}", "func GetCommand() *cobra.Command {\n\treturn powerCmd\n}", "func (bot *Bot) GetCommand(msg string) string {\n\tif !strings.HasPrefix(msg, bot.config.CommandChar) {\n\t\treturn \"\"\n\t}\n\n\tfirstSpace := strings.Index(msg, \" \")\n\tif firstSpace != -1 {\n\t\treturn msg[1:firstSpace]\n\t} else {\n\t\treturn msg[1:]\n\t}\n}", "func GetCommand(input string) (string, string) {\n\n\t\tinputs := strings.Split(input, \"\\r\\n\")\n\t\tn1 := len(inputs)\n\t\tn := len(inputs[0])\n\n\t\tcom, rem := \"\", \"\"\n\t\tif n >= 3 && (inputs[0][0:3] == \"set\" || inputs[0][0:3] == \"cas\") {\n\t\t\t// start of a 2 line command\n\t\t\tif n1 < 3 { // includes \"\\r\\n\"\n\t\t\t\treturn \"\", input // if the command is not complete, wait for the rest of the command\n\t\t\t}\n\t\t\tvar in = strings.Index(input, \"\\r\\n\") + 2\n\t\t\tin += strings.Index(input[in:], \"\\r\\n\") + 2\n\t\t\tcom = input[:in]\n\t\t\trem = input[in:]\n\t\t} else if (n >= 3 && inputs[0][0:3] == \"get\") || (n >= 4 && inputs[0][0:4] == \"getm\") ||(n >= 6 && inputs[0][0:6] == \"delete\") {\n\t\t\t// start of a 1 line command\n\t\t\tif n1 < 2 { // includes \"\\r\\n\"\n\t\t\t\treturn \"\", input // if the command is not complete, wait for the rest of the command\n\t\t\t}\n\t\t\tvar in = strings.Index(input, \"\\r\\n\") + 2\n\t\t\tcom = input[:in]\n\t\t\trem = input[in:]\n\t\t} else {\n\t\t\treturn \"\", input\n\t\t}\n\t\treturn com, rem\n}", "func (c *Client) Get(_ context.Context, key string) *redis.StringCmd {\n\treturn c.cli.Get(key)\n}", "func GetCommand() *cobra.Command {\n\treturn baseCmd\n}", "func GetCommand(c *cli.Context) error {\n\targs := c.Args()\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Invalid number of arguments\")\n\t}\n\tpwd, err := passwd.GetPassword(c, c.String(\"password\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tparsedData := make(map[string]interface{})\n\tsources := c.StringSlice(\"encrypted-input\")\n\tif len(sources) > 0 {\n\t\tfor _, src := range sources {\n\t\t\terr = uri.ReadMapFromURI(c, src, uri.ReadDataFromEncryptedStream(pwd), parsedData)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\terr = uri.ReadMapFromURI(c, \"\", uri.ReadDataFromEncryptedStream(pwd), parsedData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tbuf := new(bytes.Buffer)\n\tfor _, key := range args {\n\t\tvalue, ok := parsedData[key]\n\t\tif !ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"Key %s is not present in the archive\\n\", key)\n\t\t} else if _, err = fmt.Fprintf(buf, \"%s\\n\", value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn uri.WriteDataToURI(c, c.String(\"output\"), buf.Bytes(), uri.WriteDataToPlaintextStream)\n}", "func GetCommand() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"mho get [args]\",\n\t\tShort: \"ONOS MHO subsystem commands\",\n\t}\n\n\tcli.AddConfigFlags(cmd, defaultAddress)\n\tcmd.AddCommand(getGetCommand())\n\tcmd.AddCommand(loglib.GetCommand())\n\treturn cmd\n}", "func GetCommandFunction(cr *CommandReference) interface{} {\n\tswitch cr.Type {\n\tcase CommandType_cmd_vkCmdBeginRenderPass:\n\t\treturn subDovkCmdBeginRenderPass\n\tcase CommandType_cmd_vkCmdEndRenderPass:\n\t\treturn subDovkCmdEndRenderPass\n\tcase CommandType_cmd_vkCmdNextSubpass:\n\t\treturn subDovkCmdNextSubpass\n\tcase CommandType_cmd_vkCmdBindPipeline:\n\t\treturn subDovkCmdBindPipeline\n\tcase CommandType_cmd_vkCmdBindDescriptorSets:\n\t\treturn subDovkCmdBindDescriptorSets\n\tcase CommandType_cmd_vkCmdBindVertexBuffers:\n\t\treturn subDovkCmdBindVertexBuffers\n\tcase CommandType_cmd_vkCmdBindIndexBuffer:\n\t\treturn subDovkCmdBindIndexBuffer\n\tcase CommandType_cmd_vkCmdPipelineBarrier:\n\t\treturn subDovkCmdPipelineBarrier\n\tcase CommandType_cmd_vkCmdWaitEvents:\n\t\treturn subDovkCmdWaitEvents\n\tcase CommandType_cmd_vkCmdBeginQuery:\n\t\treturn subDovkCmdBeginQuery\n\tcase CommandType_cmd_vkCmdBlitImage:\n\t\treturn subDovkCmdBlitImage\n\tcase CommandType_cmd_vkCmdClearAttachments:\n\t\treturn subDovkCmdClearAttachments\n\tcase CommandType_cmd_vkCmdClearColorImage:\n\t\treturn subDovkCmdClearColorImage\n\tcase CommandType_cmd_vkCmdClearDepthStencilImage:\n\t\treturn subDovkCmdClearDepthStencilImage\n\tcase CommandType_cmd_vkCmdCopyBuffer:\n\t\treturn subDovkCmdCopyBuffer\n\tcase CommandType_cmd_vkCmdCopyBufferToImage:\n\t\treturn subDovkCmdCopyBufferToImage\n\tcase CommandType_cmd_vkCmdCopyImage:\n\t\treturn subDovkCmdCopyImage\n\tcase CommandType_cmd_vkCmdCopyImageToBuffer:\n\t\treturn subDovkCmdCopyImageToBuffer\n\tcase CommandType_cmd_vkCmdCopyQueryPoolResults:\n\t\treturn subDovkCmdCopyQueryPoolResults\n\tcase CommandType_cmd_vkCmdDispatch:\n\t\treturn subDovkCmdDispatch\n\tcase CommandType_cmd_vkCmdDispatchIndirect:\n\t\treturn subDovkCmdDispatchIndirect\n\tcase CommandType_cmd_vkCmdDraw:\n\t\treturn subDovkCmdDraw\n\tcase CommandType_cmd_vkCmdDrawIndexed:\n\t\treturn subDovkCmdDrawIndexed\n\tcase CommandType_cmd_vkCmdDrawIndexedIndirect:\n\t\treturn subDovkCmdDrawIndexedIndirect\n\tcase CommandType_cmd_vkCmdDrawIndirect:\n\t\treturn subDovkCmdDrawIndirect\n\tcase CommandType_cmd_vkCmdEndQuery:\n\t\treturn subDovkCmdEndQuery\n\tcase CommandType_cmd_vkCmdExecuteCommands:\n\t\treturn subDovkCmdExecuteCommands\n\tcase CommandType_cmd_vkCmdFillBuffer:\n\t\treturn subDovkCmdFillBuffer\n\tcase CommandType_cmd_vkCmdPushConstants:\n\t\treturn subDovkCmdPushConstants\n\tcase CommandType_cmd_vkCmdResetQueryPool:\n\t\treturn subDovkCmdResetQueryPool\n\tcase CommandType_cmd_vkCmdResolveImage:\n\t\treturn subDovkCmdResolveImage\n\tcase CommandType_cmd_vkCmdSetBlendConstants:\n\t\treturn subDovkCmdSetBlendConstants\n\tcase CommandType_cmd_vkCmdSetDepthBias:\n\t\treturn subDovkCmdSetDepthBias\n\tcase CommandType_cmd_vkCmdSetDepthBounds:\n\t\treturn subDovkCmdSetDepthBounds\n\tcase CommandType_cmd_vkCmdSetEvent:\n\t\treturn subDovkCmdSetEvent\n\tcase CommandType_cmd_vkCmdResetEvent:\n\t\treturn subDovkCmdResetEvent\n\tcase CommandType_cmd_vkCmdSetLineWidth:\n\t\treturn subDovkCmdSetLineWidth\n\tcase CommandType_cmd_vkCmdSetScissor:\n\t\treturn subDovkCmdSetScissor\n\tcase CommandType_cmd_vkCmdSetStencilCompareMask:\n\t\treturn subDovkCmdSetStencilCompareMask\n\tcase CommandType_cmd_vkCmdSetStencilReference:\n\t\treturn subDovkCmdSetStencilReference\n\tcase CommandType_cmd_vkCmdSetStencilWriteMask:\n\t\treturn subDovkCmdSetStencilWriteMask\n\tcase CommandType_cmd_vkCmdSetViewport:\n\t\treturn subDovkCmdSetViewport\n\tcase CommandType_cmd_vkCmdUpdateBuffer:\n\t\treturn subDovkCmdUpdateBuffer\n\tcase CommandType_cmd_vkCmdWriteTimestamp:\n\t\treturn subDovkCmdWriteTimestamp\n\tcase CommandType_cmd_vkCmdDebugMarkerBeginEXT:\n\t\treturn subDovkCmdDebugMarkerBeginEXT\n\tcase CommandType_cmd_vkCmdDebugMarkerEndEXT:\n\t\treturn subDovkCmdDebugMarkerEndEXT\n\tcase CommandType_cmd_vkCmdDebugMarkerInsertEXT:\n\t\treturn subDovkCmdDebugMarkerInsertEXT\n\tdefault:\n\t\tx := fmt.Sprintf(\"Should not reach here: %T\", cr)\n\t\tpanic(x)\n\t}\n}", "func GetCommand(text string) Executable {\n\tfor _, cmd := range commandsDoc {\n\t\tregx := regexp.MustCompile(cmd.regexValidation)\n\t\tmatch := regx.FindStringSubmatch(text)\n\n\t\tif len(match) > 0 {\n\t\t\targs := make(map[string]string)\n\n\t\t\tfor i, label := range regx.SubexpNames() {\n\t\t\t\tif i > 0 && i <= len(match) {\n\t\t\t\t\targs[label] = match[i]\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Feel free to improve this solution for abstraction\n\t\t\treturn cmd.instance.buildCommand(args)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p PeerRegistrationReplyPacket) GetCommand() Command {\r\n\treturn p.Command\r\n}", "func (p MasterRegistrationReplyPacket) GetCommand() Command {\r\n\treturn p.Command\r\n}", "func (o *WorkflowCliCommandAllOf) GetCommand() string {\n\tif o == nil || o.Command == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Command\n}", "func (c *Command) Key() string {\n\treturn c.Name\n}", "func (p *Payload) GetCommand() string {\n\treturn p.Command\n}", "func (handler commandHandler) get(name string) (*command, bool) {\n\tcmd, found := handler.Cmds[name]\n\treturn &cmd, found\n}", "func (handler commandHandler) get(name string) (*command, bool) {\n\tcmd, found := handler.Cmds[name]\n\treturn &cmd, found\n}", "func GetCommand(input string) (string, string) {\n\tinputs := strings.Split(input, \"\\r\\n\")\n\tn1 := len(inputs)\n\tn := len(inputs[0])\n//\t\tabc := input[0:3]\n//\t\tlog.Printf(\"**%s--%s--%s--%s-\", input, inputs[0], (inputs[0])[1:3], abc)\n\t\t\n\tcom, rem := \"\", \"\"\n\tif n >= 3 && (inputs[0][0:3] == \"set\" || inputs[0][0:3] == \"cas\") {\n\t\t// start of a 2 line command\n\t\tif n1 < 3 {\t\t\t\t\t\t// includes \"\\r\\n\"\n\t\t\treturn \"\", input\t\t\t// if the command is not complete, wait for the rest of the command\n\t\t}\n\t\tvar in = strings.Index(input, \"\\r\\n\") + 2\n\t\tin += strings.Index(input[in:], \"\\r\\n\") + 2\n\t\tcom = input[:in]\n\t\trem = input[in:]\n\t} else if (n >= 3 && inputs[0][0:3] == \"get\") ||\n\t\t(n >= 4 && inputs[0][0:4] == \"getm\") ||\n\t\t(n >= 6 && inputs[0][0:6] == \"delete\") {\n\t\t// start of a 1 line command\n\t\tif n1 < 2 {\t\t\t\t\t\t// includes \"\\r\\n\"\n\t\t\treturn \"\", input\t\t\t// if the command is not complete, wait for the rest of the command\n\t\t}\n\t\tvar in = strings.Index(input, \"\\r\\n\") + 2\n\t\tcom = input[:in]\n\t\trem = input[in:]\n\t\t\n\t} else {\n\t\treturn \"\", input\n\t}\n\treturn com, rem\n}", "func getCommand(name string, cmds []*command) *command {\n\tfor _, cmd := range cmds {\n\t\tif cmd.name == name {\n\t\t\treturn cmd\n\t\t}\n\t}\n\treturn nil\n}", "func getCommandFromCommandArg(c client.Client, commandArg string) *model.Command {\n\tif checkSlash(commandArg) {\n\t\treturn nil\n\t}\n\n\tcmd := getCommandFromTeamTrigger(c, commandArg)\n\tif cmd == nil {\n\t\tcmd, _, _ = c.GetCommandById(commandArg)\n\t}\n\treturn cmd\n}", "func Get(key string) (string, error) {\n\treturn Cli.Get(Ctx, key).Result()\n}", "func GetCommand(client *redis.Client, arg1 string) {\n\tval, err := client.Get(arg1).Result()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tfmt.Println(\"Key doesnt exists\")\n\t}\n\tfmt.Println(val)\n\t//loop.LoopFunction(client)\n}", "func (c *Console) GetCommandGroup(cmd *flags.Command) string {\n\n\t// Sliver commands are searched for if we are in this menu\n\tfor _, group := range c.current.cmd.groups {\n\t\tfor _, c := range group.cmds {\n\t\t\tif c.Name == cmd.Name {\n\t\t\t\t// We don't return the name if the command is not generated\n\t\t\t\tif c.cmd != nil {\n\t\t\t\t\treturn group.Name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (m moduleCommands) Get(name string) Command {\n\tc, ok := m[name]\n\tif ok {\n\t\treturn c\n\t}\n\treturn &doNothingCommand{}\n}", "func (repo *BaseRepository) GetCommand(botID int64, channel string, commandName string) *models.Command {\n\trepo.mutex.RLock()\n\tdefer repo.mutex.RUnlock()\n\n\t// Check if channel exists.\n\tchanInfo := repo.channelMap[channel]\n\tif chanInfo == nil {\n\t\treturn nil\n\t}\n\t// Check if the channel has command with the name and botID\n\tcommand, exists := chanInfo.Commands[commandName]\n\tif exists && command.BotID == botID {\n\t\treturn &command\n\t}\n\treturn nil\n}", "func (c *GetCommand) CommandName() string {\n\treturn commandName(\"get\")\n}", "func (o *WorkflowSshCmdAllOf) GetCommand() string {\n\tif o == nil || o.Command == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Command\n}", "func get_cmd() string {\n\tui := &input.UI{\n\t\tWriter: os.Stdout,\n\t\tReader: os.Stdin,\n\t}\n\tquery := \"Select option\"\n\tcmd, _ := ui.Select(query, []string{\"LIST\", \"INFO\", \"PLAY\", \"STOP\", \"QUIT\"}, &input.Options{\n\t\tLoop: true,\n\t})\n\treturn cmd\n}", "func GetCommand() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"ransim {get,set,create,delete,starts,stop,load,clear} [args]\",\n\t\tShort: \"ONOS RAN simulator commands\",\n\t}\n\n\tcli.AddConfigFlags(cmd, defaultAddress)\n\tcmd.AddCommand(cli.GetConfigCommand())\n\n\tcmd.AddCommand(getCreateCommand())\n\tcmd.AddCommand(getDeleteCommand())\n\tcmd.AddCommand(getGetCommand())\n\tcmd.AddCommand(getSetCommand())\n\n\tcmd.AddCommand(startNodeCommand())\n\tcmd.AddCommand(stopNodeCommand())\n\n\tcmd.AddCommand(loadCommand())\n\tcmd.AddCommand(clearCommand())\n\n\tcmd.AddCommand(loglib.GetCommand())\n\treturn cmd\n}", "func (m *DeviceAndAppManagementAssignmentFilter) GetRule()(*string) {\n val, err := m.GetBackingStore().Get(\"rule\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (c *CommandDescriptor) Command() interface{} {\n\treturn c.command\n}", "func GetQueryCmd(queryRoute string) *cobra.Command {\n\t// Group clerk queries under a subcommand\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tcmd.AddCommand(\n\t\tGetStateRecord(),\n\t)\n\n\treturn cmd\n}", "func (cmd *baseCommand) Key() string {\n\treturn cmd.key\n}", "func GetCommandValidated(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"validated [txid]\",\n\t\tShort: \"check if a tx is previously validated\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\t\t\tdigest := args[0]\n\n\t\t\tres, _, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/validated/%s\", queryRoute, digest), nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"could not get validation - %s \\n\", digest)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar out types.QueryResValidated\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n}", "func (c *Controller) Get(_ context.Context, command string) (interface{}, error) {\n\tif !c.Config.AllowAll && !c.Commands.Lookup(command) {\n\t\treturn nil, errors.NewForbidden(fmt.Errorf(\"command %q is not allowed\", command))\n\t}\n\n\tcmd := exec.Command(\"sh\", \"-c\", command) // \"sh\", \"-c\", \"cd .. && ls -la\"\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo func() {\n\t\tdefer stdin.Close()\n\t\tio.WriteString(stdin, \"values written to stdin are passed to cmd's standard input\")\n\t}()\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\n\treturn out, nil\n}", "func (o *InputEventWithModifiers) GetCommand() gdnative.Bool {\n\t//log.Println(\"Calling InputEventWithModifiers.GetCommand()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"InputEventWithModifiers\", \"get_command\")\n\n\t// Call the parent method.\n\t// bool\n\tretPtr := gdnative.NewEmptyBool()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewBoolFromPointer(retPtr)\n\treturn ret\n}", "func Getcommand(text string, separ byte) (command []string, err error) {\n\ttext = strings.Trim(text, \" \")\n\tif text[0] != separ {\n\t\treturn nil, fmt.Errorf(\"Commands must start with: '%c' \", separ)\n\t}\n\ts := strings.Split(text, \" \")\n\treturn s, nil\n}", "func (b *Bot) getCommand(update tgbotapi.Update) string {\n\tif update.Message != nil {\n\t\tif update.Message.IsCommand() {\n\t\t\treturn update.Message.Command()\n\t\t}\n\t} else if update.CallbackQuery != nil {\n\t\treturn update.CallbackQuery.Data\n\t}\n\n\treturn \"\"\n}", "func getShellCommand(rootCmd *cobra.Command) *cobra.Command {\n\tsubcommands := rootCmd.Commands()\n\tfor _, command := range subcommands {\n\t\tif command.Name() == \"shell\" {\n\t\t\treturn command\n\t\t}\n\t}\n\treturn nil\n}", "func GetConfig() cli.Command {\n\tr := routesCmd{}\n\treturn cli.Command{\n\t\tName: \"route\",\n\t\tUsage: \"Inspect configuration key for this route\",\n\t\tDescription: \"This command gets the configurations for a route.\",\n\t\tAliases: []string{\"routes\", \"r\"},\n\t\tCategory: \"MANAGEMENT COMMAND\",\n\t\tBefore: func(c *cli.Context) error {\n\t\t\tvar err error\n\t\t\tr.provider, err = client.CurrentProvider()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tr.client = r.provider.APIClient()\n\t\t\treturn nil\n\t\t},\n\t\tArgsUsage: \"<app-name> </path> <key>\",\n\t\tAction: r.getConfig,\n\t}\n}", "func GetCommand() *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"ui\",\n\t\tShort: \"display the game ui\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tlog.Println(\"starting UI server: 3000\")\n\t\t\tui.ServeUI()\n\t\t},\n\t}\n}", "func Get() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"get\",\n\t\tShort: \"Gets CA certificate for SSH engine.\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tengineName, _ := cmd.Flags().GetString(\"engine\")\n\n\t\t\tapi, err := vault.NewAPI()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpublicKey, err := controller.NewController(api).GetCACertificate(engineName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tlog.Infoln(publicKey)\n\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tcmd.Flags().String(\"engine\", vault.SSHEngineDefaultName, \"SSH engine path\")\n\n\treturn cmd\n}", "func (c *CommandRegister) Get(command interface{}) (CommandHandle, error) {\n\tname := commandName(command)\n\n\thandler, ok := c.registry[name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"can't find %s in registry\", name)\n\t}\n\treturn handler, nil\n}", "func (command *Command) GetCommand() cli.Command {\n\treturn cli.Command{\n\t\tName: \"cache\",\n\t\tUsage: \"Caching data for faster operation and autocompletion.\",\n\t\tSubcommands: []cli.Command{\n\t\t\t{\n\t\t\t\tName: \"clear\",\n\t\t\t\tUsage: \"Clear all cached data\",\n\t\t\t\tAction: command.ClearCacheAction,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"warmup\",\n\t\t\t\tUsage: \"Fetch data and cache them\",\n\t\t\t\tAction: command.WarmupCacheAction,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"dump\",\n\t\t\t\tUsage: \"Print current cache content\",\n\t\t\t\tAction: command.DumpCacheAction,\n\t\t\t},\n\t\t},\n\t}\n}", "func GetValidatorCmd(storeName string, cdc *wire.Codec) *cobra.Command {\n\tcmdr := commander{\n\t\tstoreName,\n\t\tcdc,\n\t}\n\treturn &cobra.Command{\n\t\tUse: \"validator\",\n\t\tShort: \"Query validator\",\n\t\tRunE: cmdr.getValidatorCmd,\n\t}\n}", "func (e exe) Command() string {\n\treturn e.command\n}", "func (cri *CmdReaderImpl) GetCommand(reader io.Reader) (string, error) {\n\tdefer cri.result.Reset()\n\tscanner := bufio.NewScanner(reader)\n\n\tfor scanner.Scan() {\n\t\tif err := scanner.Err(); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"scan string: %w\", err)\n\t\t}\n\n\t\tstr := strings.TrimSpace(scanner.Text())\n\n\t\tif err := cri.parseInputString(str); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"invalid string %s: %w\", str, err)\n\t\t}\n\n\t\tif cri.isEndOfCommand() {\n\t\t\tbreak\n\t\t}\n\n\t\tcri.result.WriteRune(' ')\n\t}\n\n\treturn cri.result.String(), nil\n}", "func GetQueryCmd(queryRoute string) *cobra.Command {\n\t// Group id queries under a subcommand\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\tcmd.AddCommand(\n\t\tCmdGetGovernmentAddr(),\n\t)\n\treturn cmd\n}", "func GetQueryCmd(queryRoute string) *cobra.Command {\n\t// Group pki queries under a subcommand\n\tcmd := &cobra.Command{\n\t\tUse: pkitypes.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", pkitypes.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tcmd.AddCommand(CmdListApprovedCertificates()) // TODO: use store-based index of cert Ids\n\tcmd.AddCommand(CmdShowApprovedCertificates())\n\tcmd.AddCommand(CmdListProposedCertificate()) // TODO: use store-based index of cert Ids\n\tcmd.AddCommand(CmdShowProposedCertificate())\n\tcmd.AddCommand(CmdShowChildCertificates())\n\tcmd.AddCommand(CmdListProposedCertificateRevocation()) // TODO: use store-based index of cert Ids\n\tcmd.AddCommand(CmdShowProposedCertificateRevocation())\n\tcmd.AddCommand(CmdListRevokedCertificates()) // TODO: use store-based index of cert Ids\n\tcmd.AddCommand(CmdShowRevokedCertificates())\n\tcmd.AddCommand(CmdShowApprovedRootCertificates())\n\tcmd.AddCommand(CmdShowRevokedRootCertificates())\n\tcmd.AddCommand(CmdShowApprovedCertificatesBySubject())\n\tcmd.AddCommand(CmdListRejectedCertificate())\n\tcmd.AddCommand(CmdShowRejectedCertificate())\n\tcmd.AddCommand(CmdListPkiRevocationDistributionPoint())\n\tcmd.AddCommand(CmdShowPkiRevocationDistributionPoint())\n\tcmd.AddCommand(CmdShowPkiRevocationDistributionPointsByIssuerSubjectKeyID())\n\t// this line is used by starport scaffolding # 1\n\n\treturn cmd\n}", "func (msg *MsgGetAddr) Command() string {\n\treturn CmdGetAddr\n}", "func getCommand(args []string) (*command, []string, error) {\n\tif len(args) < 2 {\n\t\treturn nil, nil, fmt.Errorf(\"Too few arguments: %q\", args)\n\t}\n\n\tfor _, c := range commands {\n\t\tif c.flag == args[1] {\n\t\t\treturn &c, args[2:], nil\n\t\t}\n\t}\n\n\t// command not found\n\treturn nil, nil, fmt.Errorf(\"Command not found: %q\", args)\n}", "func (m *Win32LobAppRegistryRule) GetOperator()(*Win32LobAppRuleOperator) {\n return m.operator\n}", "func (m *AssignmentFilterEvaluateRequest) GetRule()(*string) {\n val, err := m.GetBackingStore().Get(\"rule\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func GetCommand(client Client) error{\n conn := client.Conn\n argv := client.CommandArgv\n err := checkCommandProtocol(&client)\n if err != nil {\n return err\n }\n resp := ServerInstance.Dict[argv[4]]\n\n //类型判定\n tmpValue, ok := (resp.Value).(string)\n if !ok {\n responseNil(conn)\n } else {\n responseValue(tmpValue, conn)\n }\n return nil\n}", "func GetQueryCmd(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\t// Group profile queries under a subcommand\n\tprofileQueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tprofileQueryCmd.AddCommand(\n\t\tflags.GetCommands(\n\t\t\tGetCmdBalance(queryRoute, cdc),\n\t\t)...,\n\t)\n\n\treturn profileQueryCmd\n}", "func GetQueryCmd(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\t// Group did queries under a subcommand\n\tdidQueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tdidQueryCmd.AddCommand(\n\t\tflags.GetCommands(\n\t\t\tGetCmdDidDocumentAll(queryRoute, cdc),\n\t\t\tGetCmdVerifiableCredentialAll(queryRoute, cdc),\n\t\t)...,\n\t)\n\n\treturn didQueryCmd\n}", "func (mc ModuleClient) GetQueryCmd() *cobra.Command {\n\tagentQueryCmd := &cobra.Command{\n\t\tUse: \"group\",\n\t\tShort: \"Querying commands for the group module\",\n\t}\n\n\tagentQueryCmd.AddCommand(client.GetCommands(\n\t\tagentcmd.GetCmdGetGroup(mc.storeKey, mc.cdc),\n\t\tagentcmd.GetCmdGetProposal(mc.storeKey, mc.cdc),\n\t)...)\n\n\treturn agentQueryCmd\n}", "func (s *ApplicationsService) GetResourceMatchingEvaluationOrderCommand(input *GetResourceMatchingEvaluationOrderCommandInput) (result *ResourceMatchingEvaluationOrderView, resp *http.Response, err error) {\n\tpath := \"/applications/{id}/resourceMatchingEvaluationOrder\"\n\tpath = strings.Replace(path, \"{id}\", input.Id, -1)\n\n\trel := &url.URL{Path: fmt.Sprintf(\"%s%s\", s.client.Context, path)}\n\treq, err := s.client.newRequest(\"GET\", rel, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err = s.client.do(req, &result)\n\tif err != nil {\n\t\treturn result, resp, err\n\t}\n\treturn result, resp, nil\n\n}", "func MGetCacheCmd(c Cacheable) string {\n\tif c.cs.s[0][0] == 'J' {\n\t\treturn \"JSON.GET\" + c.cs.s[len(c.cs.s)-1]\n\t}\n\treturn \"GET\"\n}", "func (mc ModuleClient) GetQueryCmd() *cobra.Command {\n\tstakingQueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: \"Querying commands for the staking module\",\n\t}\n\n\tstakingQueryCmd.AddCommand(client.GetCommands(\n\t\tstakingcli.GetCmdQueryDelegation(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryDelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryUnbondingDelegation(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryUnbondingDelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryRedelegation(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryRedelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidator(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidators(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidatorDelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidatorUnbondingDelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidatorRedelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryParams(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryPool(mc.storeKey, mc.cdc))...)\n\n\treturn stakingQueryCmd\n\n}", "func getCommand(meta *meta, args []string, suggest string) *Command {\n\trawCommand := removeOptions(args)\n\tsuggestIsOption := argIsOption(suggest)\n\n\tif !suggestIsOption {\n\t\trawCommand = append(rawCommand, suggest)\n\t}\n\n\trawCommand = meta.CliConfig.Alias.ResolveAliases(rawCommand)\n\n\t// Find the closest command in case there is multiple positional arguments\n\tfor ; len(rawCommand) > 1; rawCommand = rawCommand[:len(rawCommand)-1] {\n\t\tcommand, foundCommand := meta.Commands.find(rawCommand...)\n\t\tif foundCommand {\n\t\t\treturn command\n\t\t}\n\t}\n\treturn nil\n}", "func GetQueryCmd(storeKey string, cdc *codec.Codec) *cobra.Command {\n\toracleCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: \"Querying commands for the oracle module\",\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\toracleCmd.AddCommand(flags.GetCommands(\n\t\tGetQueryCmdParams(storeKey, cdc),\n\t\tGetQueryCmdCounts(storeKey, cdc),\n\t\tGetQueryCmdDataSource(storeKey, cdc),\n\t\tGetQueryCmdOracleScript(storeKey, cdc),\n\t\tGetQueryCmdRequest(storeKey, cdc),\n\t\tGetQueryCmdRequestSearch(storeKey, cdc),\n\t\tGetQueryCmdValidatorStatus(storeKey, cdc),\n\t\tGetQueryCmdReporters(storeKey, cdc),\n\t\tGetQueryActiveValidators(storeKey, cdc),\n\t\tGetQueryPendingRequests(storeKey, cdc),\n\t)...)\n\treturn oracleCmd\n}", "func GetQueryCmd(queryRoute string) *cobra.Command {\n\t// Group id queries under a subcommand\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tcmd.AddCommand(\n\t\tCmdPoolFunds(),\n\t\tCmdTrustedServiceProviders(),\n\t\tCmdGetInvites(),\n\t\tCmdGetInvite(),\n\n\t\tCmdMembership(),\n\t\tCmdMemberships(),\n\t)\n\n\treturn cmd\n}", "func GetQueryCmd(queryRoute string) *cobra.Command {\n\t// Group topup queries under a subcommand\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tcmd.AddCommand(\n\t\tGetSequenceCmd(),\n\t)\n\n\treturn cmd\n}", "func GetQueryCmd() *cobra.Command {\n\tqueryCmd := &cobra.Command{\n\t\tUse: \"ibc-router\",\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t}\n\n\tqueryCmd.AddCommand(\n\t\tGetCmdParams(),\n\t)\n\n\treturn queryCmd\n}", "func GetQueryCmd(queryRoute string) *cobra.Command {\n\t// Group bifrost queries under a subcommand\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\t// this line is used by starport scaffolding # 1\n\n\tcmd.AddCommand(CmdListSendToTezosSigned())\n\tcmd.AddCommand(CmdShowSendToTezosSigned())\n\n\tcmd.AddCommand(CmdListReceivedFa12Txs())\n\tcmd.AddCommand(CmdShowReceivedFa12Txs())\n\n\tcmd.AddCommand(CmdListSendToTezos())\n\tcmd.AddCommand(CmdShowSendToTezos())\n\n\tcmd.AddCommand(CmdListReceivedTxs())\n\tcmd.AddCommand(CmdShowReceivedTxs())\n\n\treturn cmd\n}", "func GetQueryCmd(queryRoute string) *cobra.Command {\n\t// Group gentlemint queries under a subcommand\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tAliases: []string{types.ModuleNameAlias},\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tcmd.AddCommand(CmdShowExchangeRate())\n\n\tcmd.AddCommand(CmdListLevelFee())\n\tcmd.AddCommand(CmdShowLevelFee())\n\tcmd.AddCommand(CmdListActionLevelFee())\n\tcmd.AddCommand(CmdShowActionLevelFee())\n\tcmd.AddCommand(CmdCheckFees())\n\n\tcmd.AddCommand(CmdBalances())\n\n\t// this line is used by starport scaffolding # 1\n\n\treturn cmd\n}", "func (s *ApplicationsService) GetResourceMatchingEvaluationOrderCommand(input *GetResourceMatchingEvaluationOrderCommandInput) (output *models.ResourceMatchingEvaluationOrderView, resp *http.Response, err error) {\n\tpath := \"/applications/{id}/resourceMatchingEvaluationOrder\"\n\tpath = strings.Replace(path, \"{id}\", input.Id, -1)\n\n\top := &request.Operation{\n\t\tName: \"GetResourceMatchingEvaluationOrderCommand\",\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: path,\n\t\tQueryParams: map[string]string{},\n\t}\n\toutput = &models.ResourceMatchingEvaluationOrderView{}\n\treq := s.newRequest(op, nil, output)\n\n\tif req.Send() == nil {\n\t\treturn output, req.HTTPResponse, nil\n\t}\n\treturn nil, req.HTTPResponse, req.Error\n}", "func GetQueryCmd(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\t// Group qac queries under a subcommand\n\tqacQueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tqacQueryCmd.AddCommand(\n\t\tflags.GetCommands(\n\t\t\tGetCmdListQuestion(queryRoute, cdc),\n\t\t\tGetCmdGetQuestion(queryRoute, cdc),\n\t\t\tGetCmdGetOwnQuestion(queryRoute, cdc),\n\t\t\t)...,\n\t)\n\n\treturn qacQueryCmd\n}", "func GetQueryCmd(storeKey string, cdc *codec.Codec) *cobra.Command {\n\tburnQueryCommand := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: \"Querying commands for the burn module\",\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\tburnQueryCommand.AddCommand(client.GetCommands(\n\t\tGetCommandValidated(storeKey, cdc),\n\t)...)\n\treturn burnQueryCommand\n}", "func GetQueryCmd(queryRoute string) *cobra.Command {\n\t// Group model queries under a subcommand\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tcmd.AddCommand(CmdShowVendorProducts())\n\tcmd.AddCommand(CmdListModel())\n\tcmd.AddCommand(CmdShowModel())\n\tcmd.AddCommand(CmdShowModelVersion())\n\tcmd.AddCommand(CmdShowModelVersions())\n\t// this line is used by starport scaffolding # 1\n\n\treturn cmd\n}", "func (rc *Cache) Get(command, key string) interface{} {\n\tif v, err := rc.do(command, key); err == nil {\n\n\t\treturn v\n\t}\n\treturn nil\n}", "func (AppModuleBasic) GetQueryCmd(cdc *codec.Codec) *cobra.Command {\n\t// return cli.GetQueryCmd(cdc)\n\tpanic(\"need to add cli.GetQueryCmd(cdc)\")\n}", "func getCommandFromTeamTrigger(c client.Client, teamTrigger string) *model.Command {\n\tarr := strings.Split(teamTrigger, \":\")\n\tif len(arr) != 2 {\n\t\treturn nil\n\t}\n\n\tteam, _, _ := c.GetTeamByName(arr[0], \"\")\n\tif team == nil {\n\t\treturn nil\n\t}\n\n\ttrigger := arr[1]\n\tif len(trigger) == 0 {\n\t\treturn nil\n\t}\n\n\tlist, _, _ := c.ListCommands(team.Id, false)\n\tif list == nil {\n\t\treturn nil\n\t}\n\n\tfor _, cmd := range list {\n\t\tif cmd.Trigger == trigger {\n\t\t\treturn cmd\n\t\t}\n\t}\n\treturn nil\n}", "func (hc *Hailconfig) Get(alias string) (string, error) {\n\treturn hc.Scripts[alias].Command, nil\n\n}", "func GetQueryCmd(key string, cdc *codec.Codec) *cobra.Command {\n\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: \"Deployment query commands\",\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tcmd.AddCommand(flags.GetCommands(\n\t\tcmdDeployments(key, cdc),\n\t\tcmdDeployment(key, cdc),\n\t\tgetGroupCmd(key, cdc),\n\t)...)\n\n\treturn cmd\n}", "func (command *Command) GetCommand() cli.Command {\n\n\tlistHookCommand := ListHooksCommand{\n\t\tSettings: command.Settings,\n\t\tflags: &ListHooksCommandFlags{},\n\t}\n\n\teolHookCommand := EolHookCommand{\n\t\tSettings: command.Settings,\n\t}\n\n\tpubHookCommand := PubHookCommand{\n\t\tSettings: command.Settings,\n\t}\n\n\tyaccHookCommand := YaccHookCommand{\n\t\tSettings: command.Settings,\n\t}\n\n\trfpHookCommand := RfpHookCommand{\n\t\tSettings: command.Settings,\n\t}\n\n\treturn cli.Command{\n\t\tName: \"hooks\",\n\t\tUsage: \"Hooks operations\",\n\t\tSubcommands: []cli.Command{\n\t\t\tlistHookCommand.GetCommand(),\n\t\t\teolHookCommand.GetCommand(),\n\t\t\tpubHookCommand.GetCommand(),\n\t\t\tyaccHookCommand.GetCommand(),\n\t\t\trfpHookCommand.GetCommand(),\n\t\t},\n\t}\n}", "func (amb AppModuleBasic) GetQueryCmd(cdc *codec.Codec) *cobra.Command {\n\treturn cli.GetQueryCmd(cdc)\n}", "func (t Template) GetExecutableCommand(req ExecutionRequest) (string, error) {\n\tvar (\n\t\terr error\n\t\tresult bytes.Buffer\n\t)\n\n\t// Get the request's custom fields.\n\tcustomFields := *req.GetExecutionRequestCustom()\n\texecutionPayload, ok := customFields[TemplatePayloadKey]\n\tif !ok || executionPayload == nil {\n\t\treturn \"\", err\n\t}\n\n\texecutionPayload, err = t.compositeUserAndDefaults(executionPayload)\n\n\tschemaLoader := gojsonschema.NewGoLoader(t.Schema)\n\tdocumentLoader := gojsonschema.NewGoLoader(executionPayload)\n\n\t// Perform JSON schema validation to ensure that the request's template\n\t// payload conforms to the template's JSON schema.\n\tvalidationResult, err := gojsonschema.Validate(schemaLoader, documentLoader)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif validationResult != nil && validationResult.Valid() != true {\n\t\tvar res []string\n\t\tfor _, resultError := range validationResult.Errors() {\n\t\t\tres = append(res, resultError.String())\n\t\t}\n\t\treturn \"\", errors.New(strings.Join(res, \"\\n\"))\n\t}\n\n\t// Create a new template string based on the template.Template.\n\ttextTemplate, err := template.New(\"command\").Funcs(sprig.TxtFuncMap()).Parse(t.CommandTemplate)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Dump payload into the template string.\n\tif err = textTemplate.Execute(&result, executionPayload); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn result.String(), nil\n}", "func (r *MessageExecuteCommand) GetCommand() string {\n\treturn r.Command\n}", "func GetQueryCmd(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\t// Group pot queries under a subcommand\n\tpotQueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tpotQueryCmd.AddCommand(\n\t\tflags.GetCommands(\n\t\t\tGetCmdQueryVolumeReport(queryRoute, cdc),\n\t\t)...,\n\t)\n\n\treturn potQueryCmd\n}", "func (AppModuleBasic) GetQueryCmd(cdc *codec.Codec) *cobra.Command {\n\treturn cli.GetQueryCmd(cdc)\n}", "func GetQueryCmd(cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: \"Querying commands for the relationships module\",\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\tcmd.AddCommand(flags.GetCommands(\n\t\tGetCmdQueryUserRelationships(cdc),\n\t\tGetCmdQueryRelationships(cdc),\n\t\tGetCmdQueryUserBlocks(cdc),\n\t)...)\n\treturn cmd\n}", "func (cmd *GetUserListCmd) Command() string {\n\treturn cmd.cmd\n}", "func getRequiredKeyName(key, commandName string) (requiredKeyName string, ok bool) {\n\tif !strings.HasPrefix(key, fmt.Sprintf(commandKeyPattern, commandName)) {\n\t\treturn\n\t}\n\n\tif requiredKeyName = strings.TrimPrefix(key, fmt.Sprintf(commandKeyPattern, commandName)); requiredKeyName == \"\" {\n\t\treturn\n\t}\n\n\tok = true\n\n\treturn\n}", "func getExportCmd(key, value string) string {\n\treturn fmt.Sprintf(\"export %s=%s\", key, value)\n}", "func GetQueryCmd(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tqueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tqueryCmd.AddCommand(flags.GetCommands(\n\t\t// committees\n\t\tGetCmdQueryCommittee(queryRoute, cdc),\n\t\tGetCmdQueryCommittees(queryRoute, cdc),\n\t\t// proposals\n\t\tGetCmdQueryProposal(queryRoute, cdc),\n\t\tGetCmdQueryProposals(queryRoute, cdc),\n\t\t// votes\n\t\tGetCmdQueryVotes(queryRoute, cdc),\n\t\t// other\n\t\tGetCmdQueryProposer(queryRoute, cdc),\n\t\tGetCmdQueryTally(queryRoute, cdc),\n\t\tGetCmdQueryRawParams(queryRoute, cdc))...)\n\n\treturn queryCmd\n}", "func (c *Commands) Get(name string) *Command {\n\tfor _, cmd := range c.list {\n\t\tif cmd.Name == name {\n\t\t\treturn cmd\n\t\t}\n\t\tfor _, a := range cmd.Aliases {\n\t\t\tif a == name {\n\t\t\t\treturn cmd\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func GetContribCmd(storeName string, cdc *wire.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"query [key]\",\n\t\tShort: \"Query contrib status\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\t// find the key to look up the contrib\n\t\t\tkey, err := hex.DecodeString(args[0])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// perform query\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\t\t\tres, err := cliCtx.QueryStore(key, storeName)\n\t\t\t// res, err := ctx.Query(storeName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// parse out the value\n\t\t\tvar ctb contrib.Status\n\t\t\terr = cdc.UnmarshalBinaryBare(res, &ctb)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(res)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// print out whole contrib\n\t\t\toutput, err := json.MarshalIndent(ctb, \"\", \" \")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif viper.GetBool(flagScore) {\n\t\t\t\tfmt.Println(ctb.GetScore())\n\t\t\t} else {\n\t\t\t\tfmt.Println(string(output))\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().Bool(flagScore, false, \"bool of only showing score\")\n\treturn cmd\n}", "func (ui *UI) GetCommandSelect() string {\n\tprompt := promptui.Select{\n\t\tLabel: \"Select Commnad\",\n\t\tItems: []string{\"exec\", \"logs\", \"stop\"},\n\t}\n\n\t_, result, err := prompt.Run()\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"^\") {\n\t\t\tui.logger.Errorf(\"Select containers dialog failed | %s\", err)\n\t\t}\n\t}\n\treturn result\n}", "func GetQueryCmd(cdc *amino.Codec) *cobra.Command {\n\tqueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: \"Querying commands for the currencies module\",\n\t}\n\n\tqueryCmd.AddCommand(\n\t\tsdkClient.GetCommands(\n\t\t\tcli.GetIssue(types.ModuleName, cdc),\n\t\t\tcli.GetCurrency(types.ModuleName, cdc),\n\t\t\tcli.GetCurrencies(types.ModuleName, cdc),\n\t\t\tcli.GetWithdraw(types.ModuleName, cdc),\n\t\t\tcli.GetWithdraws(types.ModuleName, cdc),\n\t\t)...)\n\n\treturn queryCmd\n}", "func (AppModuleBasic) GetQueryCmd(cdc *amino.Codec) *cobra.Command {\n\treturn client.GetQueryCmd(cdc)\n}", "func GetQueryCmd(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\t// Group supplychain queries under a subcommand\n\tsupplychainQueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: fmt.Sprintf(\"Querying commands for the %s module\", types.ModuleName),\n\t\tDisableFlagParsing: true,\n\t\tSuggestionsMinimumDistance: 2,\n\t\tRunE: client.ValidateCmd,\n\t}\n\n\tsupplychainQueryCmd.AddCommand(\n\t\tflags.GetCommands(\n // this line is used by starport scaffolding # 1\n\t\t\tGetCmdListShipment(queryRoute, cdc),\n\t\t\tGetCmdGetShipment(queryRoute, cdc),\n\t\t\tGetCmdListReceipt(queryRoute, cdc),\n\t\t\tGetCmdGetReceipt(queryRoute, cdc),\n\t\t)...,\n\t)\n\n\treturn supplychainQueryCmd\n}", "func GetCmd() (int){\n url := \"http://127.0.0.1:8080/\"\n resp, err := http.Get(url)\n if err != nil {\n //log.Fatalln(err)\n return 0\n }\n\n body, err := ioutil.ReadAll(resp.Body)\n\n if err != nil {\n //log.Fatalln(err)\n return 0\n }\n re := regexp.MustCompile(\"\\\\(cmd\\\\).*?\\\\(cmd\\\\)\")\n cmdParsed := re.FindStringSubmatch(string(body))\n cmd := strings.Join(cmdParsed, \" \")\n cmd = strings.ReplaceAll(cmd, \"(cmd)\", \"\")\n\n re = regexp.MustCompile(\"\\\\(arg\\\\).*?\\\\(arg\\\\)\")\n argParsed := re.FindStringSubmatch(string(body))\n arg := strings.Join(argParsed, \" \")\n arg = strings.ReplaceAll(arg, \"(arg)\", \"\")\n arg = html.UnescapeString(arg)\n\n\n // Debugging commmand input\n // fmt.Println(\"Command is: \" + cmd + \" \" + arg + \" \" + val)\n \n args, err := shellwords.Parse(arg)\n\n if err != nil{\n //log.Fatalln(err)\n return 0\n }\n\n var out []byte\n\n if cmd != \"\" && len(args) > 0 {\n out, err = exec.Command(cmd, args...).Output()\n\t} else if cmd != \"\" {\n out, err = exec.Command(cmd).Output()\n\t} \n\n if err != nil {\n //log.Fatalln(err)\n return 0\n }\n SendResponse(string(out))\n\n return 0\n}", "func GetCurlCommand(req *http.Request) (*CurlCommand, error) {\n\tcommand := CurlCommand{}\n\n\tcommand.append(\"curl\")\n\n\tcommand.append(\"-X\", bashEscape(req.Method))\n\n\tif req.Body != nil {\n\t\tvar buff bytes.Buffer\n\t\tbodyReader, err := req.GetBody()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"getCurlCommand: GetBody error: %w\", err)\n\t\t}\n\t\t_, err = buff.ReadFrom(bodyReader)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"getCurlCommand: buffer read from body erorr: %w\", err)\n\t\t}\n\t\tif len(buff.String()) > 0 {\n\t\t\tbodyEscaped := bashEscape(buff.String())\n\t\t\tcommand.append(\"-d\", bodyEscaped)\n\t\t}\n\t}\n\n\tvar keys []string\n\n\tfor k := range req.Header {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tcommand.append(\"-H\", appendHeader(req, k))\n\t}\n\n\tcommand.append(bashEscape(req.URL.String()))\n\n\treturn &command, nil\n}", "func (a *App) SelectCmd(value string) (Command, error) {\n\n\ta.DmnLogFile.Log.Println(\"Selecting \" + value)\n\n\tcmds, error := a.History.ReadCmdHistoryFile()\n\n\tif error != nil {\n\t\treturn Command{}, error\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tif strings.Index(cmd.CmdHash, value) == 0 {\n\t\t\treturn cmd, nil\n\t\t}\n\t}\n\n\treturn Command{}, nil\n}", "func GetQueryCmd() *cobra.Command {\n\tqueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: \"Querying commands for the token module\",\n\t\tDisableFlagParsing: true,\n SuggestionsMinimumDistance: 2,\n RunE: client.ValidateCmd,\n\t}\n\n\tqueryCmd.AddCommand(\n\t\tGetCmdQueryParams(),\n\t\tGetCmdQueryTokens(),\n\t\tGetCmdQueryToken(),\n\t\tGetCmdQueryTokenFees(),\n\t\tGetCmdQueryBurntoken(),\n\t)\n\n\treturn queryCmd\n}" ]
[ "0.65028673", "0.62863606", "0.6265849", "0.60598564", "0.60506624", "0.59370685", "0.59200186", "0.58728564", "0.5871531", "0.5840316", "0.5814983", "0.57641596", "0.57635516", "0.5752387", "0.573877", "0.5720792", "0.5689586", "0.56780064", "0.56411", "0.56411", "0.56337404", "0.5632821", "0.5630021", "0.56019", "0.5596183", "0.55912006", "0.5578597", "0.5541174", "0.5527693", "0.5522142", "0.55102634", "0.5504914", "0.54944223", "0.546988", "0.54674006", "0.5456193", "0.5452394", "0.54521054", "0.54352885", "0.5410036", "0.5402752", "0.53959835", "0.53872776", "0.5382587", "0.5378245", "0.53725004", "0.5371337", "0.53705215", "0.5369852", "0.53674877", "0.5358686", "0.5357507", "0.5348681", "0.53483486", "0.5337048", "0.53369445", "0.5333186", "0.5327769", "0.53277624", "0.5322103", "0.53054345", "0.53042245", "0.5296336", "0.5293526", "0.5275346", "0.5274177", "0.5270424", "0.5263825", "0.5261091", "0.52571714", "0.5251205", "0.5245331", "0.5244502", "0.5234588", "0.52320826", "0.52314574", "0.52312607", "0.5211327", "0.5211002", "0.5189406", "0.5181691", "0.5177185", "0.5175552", "0.5164879", "0.5157829", "0.5150351", "0.51466274", "0.514649", "0.513726", "0.5126841", "0.51186085", "0.51157576", "0.51069945", "0.5083401", "0.50811934", "0.507655", "0.5075184", "0.5070454", "0.50685084", "0.5060392" ]
0.67667276
0
player and game instance in function is exist
func GeneralCheckSlotbacay(models *Models, data map[string]interface{}, playerId int64) ( *slotbacay.SlotbacayGame, *player.Player, error) { gameCode := slotbacay.SLOTBACAY_GAME_CODE currencyType := utils.GetStringAtPath(data, "currency_type") currencyType = currency.Money gameInstance := models.GetGameMini(gameCode, currencyType) if gameInstance == nil { return nil, nil, errors.New("err:invalid_currency_type") } slotbacayGame, isOk := gameInstance.(*slotbacay.SlotbacayGame) if !isOk { return nil, nil, errors.New("err:cant_happen") } player, err := models.GetPlayer(playerId) if err != nil { return nil, nil, err } return slotbacayGame, player, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (this *FreeRoom) DoUserEndGame(player *ScenePlayer) {\n\n}", "func (self *SinglePad) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func (self *GameObjectCreator) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func (self *TileSprite) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func (self *PhysicsP2) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func (g *Game) playing(p chat.Person) bool {\n\tfor _, q := range g.players {\n\t\tif p == q {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (self *Tween) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func (self *Graphics) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func simulateGame(p1 Player, p2 Player) Player {\n\trand.Seed(time.Now().UnixNano()) //forandrer seedet etter hva tiden er.\n\tvar rng int = rand.Intn(2)\n\tvar winner Player\n\n\tif rng == 0 {\n\t\twinner = p1\n\t} else {\n\t\twinner = p2\n\t}\n\treturn winner\n}", "func launch(p player) player {\n\tif !isAlreadyAchieved(p.Achievements, SharpShooter) && sharpShooter(p.getLastGame()) {\n\t\tp.Achievements = append(p.Achievements, SharpShooter)\n\t}\n\n\tif !isAlreadyAchieved(p.Achievements, Bruiser) && bruiser(p.getLastGame()) {\n\t\tp.Achievements = append(p.Achievements, Bruiser)\n\t}\n\n\tif !isAlreadyAchieved(p.Achievements, Veteran) && veteran(p.Games) {\n\t\tp.Achievements = append(p.Achievements, Veteran)\n\t}\n\n\tif !isAlreadyAchieved(p.Achievements, BigWinner) && bigWinner(p.NbWin) {\n\t\tp.Achievements = append(p.Achievements, BigWinner)\n\t}\n\n\tif !isAlreadyAchieved(p.Achievements, BigBro) && bigBro(p.Games) {\n\t\tp.Achievements = append(p.Achievements, BigBro)\n\t}\n\n\tif !isAlreadyAchieved(p.Achievements, Gandalf) && gandalf(p.getLastGame()) {\n\t\tp.Achievements = append(p.Achievements, Gandalf)\n\t}\n\treturn p\n}", "func (g *GameHub) createPlayer(conn *websocket.Conn) *Player {\n\tp1 := uuid.NewV4()\n\n\tp := &Player{\n\t\tuid: p1.String(),\n\t\ttest: make(chan []byte),\n\t\tpush: make(chan map[string]interface{}),\n\t\tghub: g,\n\t\tconn: conn,\n\t\tradius: 20,\n\t\txPos: 512,\n\t\tyPos: 512,\n\t}\n\tg.Lock()\n\tdefer g.Unlock()\n\tg.Players[p] = true\n\treturn p\n}", "func (firstWinGame) Play(player1, _ pig.Player) pig.Player {\n\treturn player1\n}", "func (g *Game) gameHandler() {\n\tfor true {\n\t\tif len(g.Players) > 1 && !g.isStarted {\n\t\t\tg.startGame()\n\t\t} else if len(g.Players) <= 1 {\n\t\t\tg.isStarted = false\n\t\t}\n\t}\n}", "func myGame(c *gin.Context, playerName string) {\n\tdata, err := game_data.ForPlayer(playerName)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, data)\n}", "func (d *driver) newGame() func() {\n\tg := game{d}\n\tg.player.Start(g.sender)\n\treturn g.forward\n}", "func (room *Room) addPlayer(conn *Connection) bool {\n\tif room.done() {\n\t\treturn false\n\t}\n\troom.wGroup.Add(1)\n\tdefer func() {\n\t\troom.wGroup.Done()\n\t}()\n\n\tif room.lobby.Metrics() {\n\t\tmetrics.Players.WithLabelValues(room.ID, conn.User.Name).Inc()\n\t}\n\n\t// if room have already started\n\t// if room.Status != StatusPeopleFinding {\n\t// \treturn false\n\t// }\n\n\tconn.debug(\"Room(\" + room.ID + \") wanna connect you\")\n\n\t// if room hasnt got places\n\tif !room.Players.EnoughPlace() {\n\t\tconn.debug(\"Room(\" + room.ID + \") hasnt any place\")\n\t\treturn false\n\t}\n\n\troom.MakePlayer(conn, true)\n\n\tgo room.addAction(conn.ID(), ActionConnectAsPlayer)\n\tgo room.sendPlayerEnter(*conn, room.AllExceptThat(conn))\n\tgo room.lobby.sendRoomUpdate(*room, All)\n\n\tif !room.Players.EnoughPlace() {\n\t\troom.chanStatus <- StatusFlagPlacing\n\t}\n\n\treturn true\n}", "func (self *PhysicsP2PrismaticConstraint) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func (g *Game) getCurrentPlayer() *Player {\n\tfor _, p := range g.Players {\n\t\tif p.IsCurrent {\n\t\t\treturn p\n\t\t}\n\t}\n\tpanic(\"No current player!\")\n}", "func (g *game) addPlayer(p *player) {\n\t// If the player is first, make him admin.\n\tif len(g.Players) == 0 {\n\t\tp.Admin = true\n\t}\n\tg.Players = append(g.Players, p)\n\tp.State = \"online\"\n\tgo p.msgParser(g)\n\tg.broadcastGameInfo()\n}", "func makePlayer() {\n\tP = Player{name: \"Abraxas\", level: 1, health: 30, maxHealth: 30, defense: 4, attack: 11}\n}", "func updatePlayers(g *Game) {\n\tstate := g.gameState\n\n\tg.Team1.updatePlayers(state)\n\tg.Team2.updatePlayers(state)\n}", "func newPlayer(seat int, shoe *Shoe, cfg *Config, strategy Strategy, betAmount int) *Player {\n\tvar p Player\n\t// fmt.Println(\"in newPlayer\")\n\tp.seat = seat\n\tp.shoe = shoe\n\tp.cfg = cfg\n\tp.strategy = strategy\n\tp.betAmount = betAmount\n\treturn &p\n}", "func init() {\n\tplayer1 = &chessPlayer{\"Garry Kasparov\", \"White\", []piece{}}\n\tplayer2 = &chessPlayer{\"Deep Blue\", \"Black\", []piece{}}\n\tsessionPlayers = append(sessionPlayers, player1, player2)\n}", "func updateGame(game string, username string) bool {\n\tsuccess := false\n\tif success = checkID(game); success && gameStarted {\n\t\treturn true\n\t}\n\treturn false\n}", "func gameDestroy() {\n\n}", "func (g *Game) getStartPlayer() *Player {\n\tfor _, p := range g.Players {\n\t\tif p.TurnOrder == 0 {\n\t\t\treturn p\n\t\t}\n\t}\n\tpanic(\"No start player!\")\n}", "func populatePlayers(game *models.Game) (err config.ApiError) {\n\tif game.Player1Id.Valid() {\n\t\tgame.Player1, err = GetUserById(game.Player1Id)\n\t}\n\n\tif game.Player2Id.Valid() {\n\t\tgame.Player2, err = GetUserById(game.Player2Id)\n\t}\n\treturn\n}", "func startGame(request *pb.GameRequest) (*game.Game, error) {\n\tvar localGame *game.Game\n\n\tswitch request.Gametype {\n\tcase pb.GameRequest_RANDOM:\n\t\tlocalGame = game.New()\n\tcase pb.GameRequest_REPLAY:\n\t\tlocalGame = game.Replay(request.Seed)\n\tdefault:\n\t\treturn nil, errors.Errorf(\"invalid GameType %d\", request.Gametype)\n\t}\n\n\treturn localGame, nil\n}", "func (g *Game) startGame() {\n\tg.isStarted = true\n\tg.spawnRandomItems(len(g.Players) * 3)\n\tfor _, player := range g.Players {\n\t\tplayer.start()\n\t\tplayer.itemPos(*g)\n\t}\n}", "func (g *GoalScorer) Player() *Player {\n\treturn g.PlayerData.Data\n}", "func (poi *PlayerOperationsImpl) GetPlayer() *Player {\n\treturn poi.Player\n}", "func GetPlayer(cfg config.Cfg) SPlayer {\n\tif cfg.Exists(config.EXECPLAYER) && len(cfg.Val(config.EXECPLAYER)) > 0 {\n\t\treturn &eplayer{\n\t\t\tplayer: cfg.Val(config.EXECPLAYER)}\n\t}\n\tif cfg.Exists(config.USERBUFFERSIZE) {\n\t\treturn &mplayer{\n\t\t\tuserBufferSize: cfg.Intval(config.USERBUFFERSIZE)}\n\t}\n\treturn nil\n}", "func playTheGame(p myParameters, conn *net.Conn, KeyChannel chan string, clients map[int]net.Conn, nodeChan []chan myParameters) {\n\n\tworkingWorld := make([][]byte, p.ImageHeight)\n\tfor i := range p.world {\n\t\tworkingWorld[i] = make([]byte, p.ImageWidth)\n\t}\n\n\tfor i := 0; i < p.ImageHeight; i++ {\n\t\tfor j := 0; j < p.ImageWidth; j++ {\n\t\t\tval := p.world[i][j]\n\t\t\tworkingWorld[i][j] = val\n\t\t}\n\t}\n\t//Reports every 2 seconds the alive cells\n\tticker := time.NewTicker(2000 * time.Millisecond)\n\tturn := 0\n\tfor ; turn < p.Turns; turn++ {\n\t\tselect {\n\t\t//If any key is pressed\n\t\tcase key := <-KeyChannel:\n\t\t\t//The Pause key has been pressed\n\t\t\tif key == \"keypauseTheGame\\n\" {\n\t\t\t\tworldString := convertToString(workingWorld, p.ImageHeight, p.ImageWidth, p.Turns, p.Threads, 0)\n\t\t\t\tfmt.Fprintln(*conn, worldString)\n\t\t\t\tfmt.Fprintln(*conn, createStateChange(turn, \"p\"))\n\t\t\t\tfor {\n\t\t\t\t\t//The Pause key has been pressed again, so start executing\n\t\t\t\t\tkey2 := <-KeyChannel\n\t\t\t\t\tif key2 == \"keypauseTheGame\\n\" {\n\t\t\t\t\t\tfmt.Fprintln(*conn, createStateChange(turn, \"e\"))\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t//The Save key has been pressed\n\t\t\t} else if key == \"keysaveTheGame\\n\" {\n\t\t\t\t//Send the actual board to the controller in order to be saved as a PGM\n\t\t\t\tworldString := convertToString(workingWorld, p.ImageHeight, p.ImageWidth, p.Turns, p.Threads, 0)\n\t\t\t\tfmt.Fprintln(*conn, worldString)\n\t\t\t} else if key == \"keyquitTheGame\\n\" {\n\t\t\t\t//The Quit key has been pressed -> The actual board will be saved -> the controller disconnects\n\t\t\t\tworldString := convertToString(workingWorld, p.ImageHeight, p.ImageWidth, p.Turns, p.Threads, 0)\n\t\t\t\tfmt.Fprintln(*conn, worldString)\n\t\t\t\tfmt.Fprintln(*conn, createStateChange(turn, \"q\"))\n\t\t\t\treturn\n\t\t\t} else if key == \"keyshutDown\\n\" {\n\t\t\t\t//The ShutDown key has been pressed -> All the distributed components will shut down cleanly\n\t\t\t\tworldString := convertToString(workingWorld, p.ImageHeight, p.ImageWidth, p.Turns, p.Threads, 0)\n\t\t\t\tfmt.Fprintln(*conn, worldString)\n\t\t\t\tfmt.Fprintln(*conn, createStateChange(turn, \"q\"))\n\t\t\t\tmsg := \"keyshutDown\\n\"\n\t\t\t\tfor i := 0; i < 4; i++ {\n\t\t\t\t\tfmt.Fprintf(clients[i], msg)\n\t\t\t\t}\n\t\t\t\tos.Exit(3)\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\t//AliveCellsCount every 2 seconds\n\t\t\thowManyAreAlive := 0\n\t\t\tfor i := 0; i < p.ImageHeight; i++ {\n\t\t\t\tfor j := 0; j < p.ImageWidth; j++ {\n\t\t\t\t\tif workingWorld[i][j] != 0 {\n\t\t\t\t\t\thowManyAreAlive++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tmsg := createAliveCellsCount(turn, howManyAreAlive)\n\t\t\tfmt.Fprintln(*conn, msg)\n\t\tdefault:\n\t\t}\n\n\t\tnodes := 4\n\t\tworkerHeight := p.ImageHeight / nodes\n\n\t\t//Split and send the world between nodes in order to start computing\n\t\tfor node := 0; node < nodes; node++ {\n\t\t\tnodeWorldBytes := getWorkerWorld(0, workingWorld, workerHeight, p.ImageHeight, p.ImageWidth, node)\n\t\t\tstartNode(workerHeight+2, turn, p.ImageHeight, p.ImageWidth, nodeWorldBytes, p.Threads, node, conn, clients)\n\t\t}\n\t\t//Auxiliary world\n\t\tunifyWorld := make([][]byte, p.ImageHeight)\n\t\tfor i := range unifyWorld {\n\t\t\tunifyWorld[i] = make([]byte, p.ImageWidth)\n\t\t}\n\t\t//Receive computer worlds from AWS Nodes and start unifying them\n\t\tfor node := 0; node < nodes; node++ {\n\t\t\tunifyWorldHelper := make([][]byte, workerHeight)\n\t\t\tfor i := range unifyWorldHelper {\n\t\t\t\tunifyWorldHelper[i] = make([]byte, p.ImageWidth)\n\t\t\t}\n\t\t\tparams := <-nodeChan[node]\n\t\t\tfor i := 0; i < workerHeight; i++ {\n\t\t\t\tfor j := 0; j < params.ImageWidth; j++ {\n\t\t\t\t\tunifyWorldHelper[i][j] = params.world[i][j]\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor i := 0; i < workerHeight; i++ {\n\t\t\t\tfor j := 0; j < p.ImageWidth; j++ {\n\t\t\t\t\tunifyWorld[mod(node*workerHeight+i, p.ImageHeight)][j] = unifyWorldHelper[i][j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t//Rewrite the original World and send CellFlipped messages to controller where the cells change state\n\t\tfor i := 0; i < p.ImageHeight; i++ {\n\t\t\tfor j := 0; j < p.ImageWidth; j++ {\n\t\t\t\tif workingWorld[i][j] != unifyWorld[i][j] {\n\t\t\t\t\tmsg := createCellFlipped(i, j, turn)\n\t\t\t\t\tfmt.Fprintf(*conn, msg)\n\t\t\t\t}\n\t\t\t\tworkingWorld[i][j] = unifyWorld[i][j]\n\t\t\t}\n\t\t}\n\t\t//Reports to controller that a turn has been completed\n\t\tturnCompleteString := createTurnComplete(turn)\n\t\tfmt.Fprintln(*conn, turnCompleteString)\n\t\t//myVisualiseMatrix(workingWorld, p.ImageWidth, p.ImageHeight)\n\n\t}\n\t//Reports to controller that all turns have been completed and send the last board as well\n\tworldString := convertToString(workingWorld, p.ImageHeight, p.ImageWidth, p.Turns, p.Threads, 0)\n\tfmt.Fprintln(*conn, worldString)\n\tfinalTurnCompleteString := createFinalTurnComplete(turn, workingWorld, p.ImageHeight, p.ImageWidth)\n\tfmt.Fprintln(*conn, finalTurnCompleteString)\n\n}", "func PlayGame(p1 Player, p2 Player, simulation bool) Player {\n\n\tif simulation { //hvis turneringen er i simuleringsmodus\n\t\twinner = simulateGame(p1, p2)\n\t\treturn winner\n\t}\n\n\trunde = 1\n\tboard = map[int]string{ // Lagrer spillets trekk. Tomme felt forblir tall som representerer posisjonen dens på brettet.\n\t\t1: \"1\", 2: \"2\", 3: \"3\",\n\t\t4: \"4\", 5: \"5\", 6: \"6\",\n\t\t7: \"7\", 8: \"8\", 9: \"9\"}\n\n\tprintBoard()\n\tfmt.Println(\"\\n\" + p1.Name + \" Starter.\")\n\tnewRoundOrGameOver(p1, p2)\n\treturn winner\n}", "func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {\n\t// we only have the game ID at hand\n\tasserted, okay := lb.GameData.(string)\n\tif okay {\n\t\treturn GameByID(asserted, embeds)\n\t}\n\n\treturn toGame(lb.GameData, true), nil\n}", "func PlayGame(agents []net.Conn, p1 Player, p2 Player) Player {\n\tagentList = agents\n\trunde = 1\n\tboard = map[int]string{ // Lagrer spillets trekk. Tomme felt forblir tall som representerer posisjonen dens på brettet.\n\t\t1: \"1\", 2: \"2\", 3: \"3\",\n\t\t4: \"4\", 5: \"5\", 6: \"6\",\n\t\t7: \"7\", 8: \"8\", 9: \"9\"}\n\n\tmultiplayerComm.PrintAll(agents, printBoard())\n\tmultiplayerComm.PrintAll(agents, \"\\n\"+p1.Name+\" Starter.\")\n\tnewRoundOrGameOver(agents, p1, p2)\n\treturn winner\n}", "func (self *PhysicsP2) SetGameA(member *Game) {\n self.Object.Set(\"game\", member)\n}", "func (a *AssistScorer) Player() *Player {\n\treturn a.PlayerData.Data\n}", "func newRoundOrGameOver(agents []net.Conn, p1 Player, p2 Player) {\nnewRound:\n\twinningCombos := [][]int{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}, {1, 4, 7}, {2, 5, 8}, {3, 6, 9}, {1, 5, 9}, {3, 5, 7}}\n\tp1, p2 = placeMove(p1, p2) // plasserer brikken i på brettet og oppdaterer hver spiller har brukt.\n\tmultiplayerComm.PrintAll(agents, printBoard())\n\n\tfor i := 0; i < len(winningCombos); i++ { //Sjekker om det er noen vinnerkombinasjoner på brettet.\n\t\tif board[winningCombos[i][0]] == board[winningCombos[i][1]] && board[winningCombos[i][1]] == board[winningCombos[i][2]] {\n\t\t\tif board[winningCombos[i][0]] == \"X\" {\n\t\t\t\twinner = p1\n\t\t\t\treturn\n\t\t\t}\n\t\t\twinner = p2\n\t\t\treturn\n\n\t\t}\n\t}\n\n\trunde++\n\tif runde > 9 { //Sjekker om brettet er fylt opp.\n\t\tmultiplayerComm.PrintAll(agents, \"\\nBrettet er fullt. Vinneren blir dermed avgjort på tid.\")\n\t\tif p1.TimeUsed <= p2.TimeUsed {\n\t\t\twinner = p1\n\t\t} else {\n\t\t\twinner = p2\n\t\t}\n\n\t\tmultiplayerComm.PrintAll(agents, \"\\n \"+p1.Name+\" brukte \"+strconv.Itoa(p1.TimeUsed)+\" millisekunder.\")\n\t\tmultiplayerComm.PrintAll(agents, \" \"+p2.Name+\" brukte \"+strconv.Itoa(p2.TimeUsed)+\" millisekunder.\")\n\n\t\treturn\n\t}\n\n\tgoto newRound\n}", "func (room *Room) addPlayer(conn *Connection, recover bool) bool {\n\tfmt.Println(\"addPlayer\", recover)\n\tif room.done() {\n\t\treturn false\n\t}\n\troom.wGroup.Add(1)\n\tdefer func() {\n\t\troom.wGroup.Done()\n\t}()\n\n\tif room.lobby.Metrics() {\n\t\tmetrics.Players.WithLabelValues(room.ID, conn.User.Name).Inc()\n\t}\n\n\tconn.debug(\"Room(\" + room.ID + \") wanna connect you\")\n\n\t// if room hasnt got places\n\tif !recover && !room.Players.EnoughPlace() {\n\t\tconn.debug(\"Room(\" + room.ID + \") hasnt any place\")\n\t\treturn false\n\t}\n\n\troom.MakePlayer(conn, recover)\n\n\tgo room.addAction(conn.ID(), ActionConnectAsPlayer)\n\tgo room.sendPlayerEnter(*conn, room.AllExceptThat(conn))\n\n\tif !recover {\n\t\troom.lobby.sendRoomUpdate(*room, All)\n\t\troom.lobby.sendRoomToOne(*room, *conn)\n\n\t\tif !room.Players.EnoughPlace() {\n\t\t\troom.chanStatus <- StatusFlagPlacing\n\t\t}\n\t}\n\n\treturn true\n}", "func addPlayerToGame(c socketio.Conn, roomID string) {\n\tctx := c.Context().(map[string]interface{})\n\tcurrentPlayer := ctx[ctxval.Player].(player.Player)\n\tactiveGames := ctx[ctxval.ActiveGames].(map[string]*Room)\n\tcurrentGame := activeGames[roomID]\n\tserver := ctx[ctxval.SocketServer].(*socketio.Server)\n\n\tcurrentGame.Players[currentPlayer.ID] = currentPlayer\n\tcurrentGame.PlayersOrder = append(currentGame.PlayersOrder, currentPlayer.ID)\n\n\tj, err := json.Marshal(currentPlayer)\n\n\tif err != nil {\n\t\tdelete(currentGame.Players, currentPlayer.ID)\n\t\tc.Emit(\"exception\", map[string]string{\n\t\t\t\"event\": \"invite_accepted\",\n\t\t\t\"error\": errcode.UnexpectedError,\n\t\t})\n\t\treturn\n\t}\n\n\tg, err := json.Marshal(currentGame)\n\n\tif err != nil {\n\t\tdelete(currentGame.Players, currentPlayer.ID)\n\t\tc.Emit(\"exception\", map[string]string{\n\t\t\t\"event\": \"invite_accepted\",\n\t\t\t\"error\": errcode.UnexpectedError,\n\t\t})\n\t\treturn\n\t}\n\n\tctx[ctxval.InGameRoomID] = roomID\n\n\tserver.BroadcastToRoom(\"/\", roomID, roomID+\":player_joined\", string(j))\n\tc.Join(roomID)\n\tc.Emit(\"game_joined\", string(g))\n}", "func (self *TileSprite) InWorld() bool{\n return self.Object.Get(\"inWorld\").Bool()\n}", "func (self *GameObjectCreator) SetGameA(member *Game) {\n self.Object.Set(\"game\", member)\n}", "func Game(players []Player, ante int, minBet int, maxBet int, dealerToken int)*Player{\r\n \t/* additional arguments might be added to function to denote display specifiations \r\n \t/* show() main game page\r\n \t/* create and shuffle deck of cards */\r\n \t//reader := bufio.NewReader(os.Stdin)\r\n \tcheat := true\r\n \tcardTypes, suites := Init_card_cat()\r\n \tpot := 0\r\n \tdeck := createDeck(cardTypes, suites)\r\n \tdeck = shuffle(deck)\r\n \tfmt.Printf(\"Deck test: \\n\")\r\n\tfor _, d := range deck{\r\n\t\tfmt.Printf(\"%s of %s \", d.Face, d.Suit)\r\n\t}\r\n \t/*maybe show() some shuffle gif animation\r\n \t/* each player pays the ante (may later swich to 'blind') */\r\n \tfor i := 0; i < len(players); i++{\r\n \t\tplayers[i].Money -= ante\r\n \t\tpot += ante\r\n \t\tfmt.Printf(\"%s pays %d for ante \\n\", players[i].Name, ante)\r\n\t}\r\n\tif cheat == true{\r\n\t\treader := bufio.NewReader(os.Stdin)\r\n\t\tfor i := 0; i < len(players); i++{\r\n\t\t\tfmt.Printf(\"Time for %s to chose cards...\\n\", players[i].Name)\r\n\t\t\tfor j := 0; j < 5; j++{\r\n\t\t\t\tfmt.Printf(\"Please enter face of next card:\\n\")\r\n\t\t\t\tface, _ := reader.ReadString('\\n')\r\n\t\t\t\tfmt.Printf(\"Please enter the suit of the next card:\\n\")\r\n\t\t\t\tsuit, _ := reader.ReadString('\\n')\r\n\t\t\t\tface = strings.Replace(face, \"\\r\\n\", \"\", -1)\r\n\t\t\t\tsuit = strings.Replace(suit, \"\\r\\n\", \"\", -1)\r\n\t\t\t\tcard := newCard(face, suit, cardTypes)\r\n\t\t\t\tplayers[i].Hand = append(players[i].Hand, *card)\r\n\t\t\t}\r\n\t\t}\r\n\t}else{\r\n \r\n \t\t/*first round dealing */\r\n\t \tfmt.Printf(\"The dealer suffles the cards and begins dealing... \\n\")\r\n\t \tbufio.NewReader(os.Stdin).ReadBytes('\\n')\r\n\r\n\t \td := 0\r\n\t \tfor d < 5{\r\n\t \t\tfor i := 0; i < len(players); i++{\r\n\t \t\t\tcard := draw(deck)\r\n\t \t\t\tdeck = deck[1:]\r\n\t \t\t\tplayers[i].Hand = append(players[i].Hand, card)\r\n\t \t\t\tfmt.Printf(\" %s is delt a %s of %s \\n \", players[i].Name, card.Face, card.Suit)\r\n\t \t\t}\r\n\t \t\td++\r\n\t \t}\r\n\t}\r\n \t/*first round betting */\r\n \tpot = betting_round(players, minBet, maxBet, pot)\r\n \tremaining := check_num_players_remaining(players)\r\n \tif remaining < 2 {\r\n \t\twinner := find_winner(players)\r\n \t\treturn winner\r\n \t}\r\n \t/* first draw */ \r\n \tdeck = redraw(players, deck)\r\n \t/* second round of betting */\r\n \tpot = betting_round(players, minBet, maxBet, pot)\r\n \tremaining = check_num_players_remaining(players)\r\n \tif remaining < 2 {\r\n \t\twinner := find_winner(players)\r\n \t\treturn winner\r\n \t}\r\n \t/* second draw */ \r\n \tdeck= redraw(players, deck)\r\n \t/* Third and final round of betting */\r\n \tpot = betting_round(players, minBet, maxBet, pot)\r\n \tremaining = check_num_players_remaining(players)\r\n \tif remaining < 2 {\r\n \t\twinner := find_winner(players)\r\n \t\treturn winner\r\n \t}\r\n \t/* sort hands by rank to prepare for hand comparisons */\r\n \tfor i := 0; i < len(players); i++{\r\n \t\tif players[i].Folded == false{\r\n \t\t\tplayers[i].sort_hand_by_rank()\r\n \t\t\tfmt.Printf(\"Sorted hand: \\n\")\r\n \t\t\tplayers[i].show_hand()\r\n \t\t\tplayers[i].card_histogram()\r\n \t\t\t}\r\n \t\t}\r\n \tscore_board :=rank_hands(players)\t\r\n \twinner := showdown(players, score_board)\r\n \tfmt.Printf(\"%s win a pot worth %d \\n\", winner.Name, pot)\r\n \twinner.Money += pot\r\n \treturn winner\r\n\r\n \t\t\r\n }", "func (g *GameSession) Run() {\n\twaiting := true\n\tfor {\n\t\ttimeout := time.Millisecond * 33\n\t\twaiting = true\n\t\tfor waiting {\n\t\t\tselect {\n\t\t\tcase <-time.After(timeout):\n\t\t\t\twaiting = false\n\t\t\t\tbreak\n\t\t\tcase msg := <-g.FromNetwork:\n\t\t\t\tswitch msg.mtype {\n\t\t\t\tcase messages.MovePlayerMsgType:\n\t\t\t\t\tg.MoveEntity(msg.client, msg.net.(*messages.MovePlayer))\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"game.go:Run(): UNKNOWN MESSAGE TYPE: %T\\n\", msg)\n\t\t\t\t}\n\t\t\tcase imsg := <-g.FromGameManager:\n\t\t\t\tswitch timsg := imsg.(type) {\n\t\t\t\tcase AddPlayer:\n\t\t\t\t\tnewid := uint32(len(g.World.Entities))\n\t\t\t\t\tplayer := &Entity{\n\t\t\t\t\t\tID: newid,\n\t\t\t\t\t\tName: timsg.Entity.Name,\n\t\t\t\t\t\tEType: CreatureEType,\n\t\t\t\t\t\tSeed: timsg.Entity.Seed,\n\t\t\t\t\t\tBody: physics.NewRigidBody(newid, 22, 46, physics.Vect2{X: 5000, Y: 5000}, physics.Vect2{}, 0, 100),\n\t\t\t\t\t}\n\t\t\t\t\tg.World.Space.AddEntity(player.Body, false)\n\t\t\t\t\tg.World.Entities[newid] = player\n\t\t\t\t\tg.Clients[timsg.Client.ID] = &User{\n\t\t\t\t\t\tClient: timsg.Client,\n\t\t\t\t\t\tAccounts: []*Account{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tCharacter: &Character{\n\t\t\t\t\t\t\t\t\tID: newid,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\tcase RemovePlayer:\n\t\t\t\t\tid := g.Clients[timsg.Client.ID].Accounts[0].Character.ID\n\t\t\t\t\tent := g.World.Entities[id]\n\t\t\t\t\tif ent == nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// TODO: remove player from game after timeout?\n\t\t\t\t\tg.World.Space.RemoveEntity(ent.Body, false)\n\t\t\t\t\tdelete(g.Clients, timsg.Client.ID)\n\t\t\t\t\tif len(g.Clients) == 0 {\n\t\t\t\t\t\tfmt.Printf(\"All clients disconnected, closing game %d.\", g.ID)\n\t\t\t\t\t\tg.IntoGameManager <- GameMessage{\n\t\t\t\t\t\t\tnet: &messages.EndGame{},\n\t\t\t\t\t\t\tmtype: messages.EndGameMsgType,\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-g.Exit:\n\t\t\t\tfmt.Print(\"EXITING: Run in Game.go\\n\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tcollisions := g.World.Space.Tick(true)\n\t\tfor _, col := range collisions {\n\t\t\tvar ent *Entity\n\t\t\tfor _, e := range g.World.Entities {\n\t\t\t\tif e.ID == col.Body.ID {\n\t\t\t\t\tent = e\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ent.EType == ProjectileEType {\n\t\t\t\t// TODO: remove the projectile\n\t\t\t\t// TODO: resolve hit to target.\n\t\t\t}\n\t\t}\n\t\tif g.World.Space.TickID%20 == 0 {\n\t\t\tg.SendMasterFrame()\n\t\t}\n\t}\n}", "func (g *game) alivePlayers() int {\n\talivePlayers := 0\n\tfor i := 0; i < len(g.Players); i++ {\n\t\tif !g.Players[i].Dead && !g.Players[i].Spectator {\n\t\t\talivePlayers++\n\t\t}\n\t}\n\treturn alivePlayers\n}", "func gameServer(ws *websocket.Conn) {\n log.Printf(\"Booting game player...%v\\n\", game)\n\n obj := CreatePlayer()\n game.add <- obj\n\n c := &Connection{send: make(chan GameObject, 256), id: obj.Id, ws: ws}\n h.register <- c\n defer func() {\n h.unregister <- c\n game.remove <- obj\n }()\n\n go c.Writer()\n\n c.send <- *obj // First object is player, itself\n\n for _, obj := range game.objects {\n c.send <- *obj\n }\n\n c.Reader(chGame)\n // TODO: These reader/writers need to run all reads through processGameMessage\n // Writing will occur when the GameState is updated (possibly via a Tick)\n}", "func (self *Tween) SetGameA(member *Game) {\n self.Object.Set(\"game\", member)\n}", "func (b *base) getPlayers() []match2.Player {\n\tplayers := make([]match2.Player, len(b.players))\n\tfor i := range b.players {\n\t\tplayers[i].ID = b.players[i].userID\n\t\tplayers[i].Username = b.players[i].Username\n\t\tplayers[i].IsCPU = b.players[i].IsCPU\n\t}\n\treturn players\n}", "func (b *base) waitForPlayers() bool {\n\tlog.Printf(\"[Match] Waiting for all the players to register, match: %s\", b.info.ID)\n\tnbConnections := int64(len(b.connections))\n\n\tcnt := context.Background()\n\tvar cancel func()\n\tcnt, cancel = context.WithTimeout(cnt, time.Second*5)\n\tif b.readyMatch.Acquire(cnt, nbConnections) != nil {\n\t\tcancel()\n\n\t\tcancelMessage := socket.RawMessage{}\n\t\tcancelMessage.ParseMessagePack(byte(socket.MessageType.GameCancel), GameCancel{\n\t\t\tType: 1,\n\t\t})\n\t\tb.broadcast(&cancelMessage)\n\t\tcbroadcast.Broadcast(match2.BGameEnds, b.info.ID)\n\t\treturn false\n\t}\n\tcancel()\n\t//Send a message to all the clients to advise them that the game is starting\n\tmessage := socket.RawMessage{}\n\tmessage.MessageType = byte(socket.MessageType.GameStarting)\n\tb.broadcast(&message)\n\treturn true\n}", "func UserInGame(PlayerID string) bool {\n\tfor _, player := range Players {\n\t\t// If a match is found return true\n\t\tif player.PlayerID == PlayerID {\n\t\t\treturn true\n\t\t}\n\t}\n\t// Otherwise return false.\n\treturn false\n}", "func (self *Graphics) InWorld() bool{\n return self.Object.Get(\"inWorld\").Bool()\n}", "func (g *Game) getPlayer(id string) (*Player, error) {\n\tfor _, p := range g.Players {\n\t\tif p.ID == id {\n\t\t\treturn p, nil\n\t\t}\n\t}\n\treturn nil, errors.New(\n\t\tfmt.Sprintf(\"Unknown player ID: %s\", id))\n}", "func newRoundOrGameOver(p1 Player, p2 Player) {\nnewRound:\n\twinningCombos := [][]int{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}, {1, 4, 7}, {2, 5, 8}, {3, 6, 9}, {1, 5, 9}, {3, 5, 7}}\n\tp1, p2 = placeMove(p1, p2) // plasserer brikken i på brettet og oppdaterer hver spiller har brukt.\n\tprintBoard()\n\n\tfor i := 0; i < len(winningCombos); i++ { //Sjekker om det er noen vinner kombinasjoner på brettet.\n\t\tif board[winningCombos[i][0]] == board[winningCombos[i][1]] && board[winningCombos[i][1]] == board[winningCombos[i][2]] {\n\t\t\tif board[winningCombos[i][0]] == \"X\" {\n\t\t\t\twinner = p1\n\t\t\t\treturn\n\t\t\t}\n\t\t\twinner = p2\n\t\t\treturn\n\n\t\t}\n\t}\n\n\trunde++\n\tif runde > 9 { //Sjekker om brettet er fylt opp.\n\t\tfmt.Println(\"\\nBrettet er fullt. Vinneren blir dermed avgjort på tid.\")\n\t\tif p1.TimeUsed <= p2.TimeUsed {\n\t\t\twinner = p1\n\t\t} else {\n\t\t\twinner = p2\n\t\t}\n\n\t\tfmt.Println(\"\\n \"+p1.Name+\" brukte\", p1.TimeUsed, \"millisekunder.\")\n\t\tfmt.Println(\" \"+p2.Name+\" brukte\", p2.TimeUsed, \"millisekunder.\")\n\n\t\treturn\n\t}\n\n\tgoto newRound\n}", "func (self *TileSprite) SetGameA(member *Game) {\n self.Object.Set(\"game\", member)\n}", "func (b *Board) evaluate_static() Player {\n\tif newBoardEvaluator(b.Player1).isWin() {\n\t\treturn Player1\n\t} else if newBoardEvaluator(b.Player2).isWin() {\n\t\treturn Player2\n\t}\n\treturn NoPlayer\n}", "func (g *Game) Playable() bool {\n\treturn g.initialized && g.Stats.SunkShips < g.Stats.InitialShips\n}", "func (self *TileSprite) InCamera() bool{\n return self.Object.Get(\"inCamera\").Bool()\n}", "func (self *TileSprite) Alive() bool{\n return self.Object.Get(\"alive\").Bool()\n}", "func (inst *Instance) AddPlayer(plr player) error {\n\tplr.SetInstance(inst)\n\n\tfor _, other := range inst.players {\n\t\tother.Send(packetMapPlayerEnter(plr))\n\t\tplr.Send(packetMapPlayerEnter(other))\n\t}\n\n\tinst.lifePool.AddPlayer(plr)\n\n\t// show all the rooms\n\tfor _, v := range inst.rooms {\n\t\tif game, valid := v.(room.Game); valid {\n\t\t\tplr.Send(packetMapShowGameBox(game.DisplayBytes()))\n\t\t}\n\t}\n\n\t// Play map animations e.g. ship arriving to dock\n\n\tinst.players = append(inst.players, plr)\n\n\tif len(inst.players) == 1 {\n\t\tinst.startFieldTimer()\n\t}\n\n\treturn nil\n}", "func (p *Player) Next() { p.Player.Call(INTERFACE+\".Player.Next\", 0) }", "func (rout *router) serveGame(w http.ResponseWriter, r *http.Request,\n\tgameId, color string, minutes int, cleanup, switchColors func(),\n\tusername, userId string) {\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, \"Could not upgrade conn\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tplayerClock := time.NewTimer(time.Duration(minutes) * time.Minute)\n\tplayerClock.Stop()\n\tp := &player{\n\t\tcleanup: cleanup,\n\t\tclock: playerClock,\n\t\tcolor: color,\n\t\tconn: conn,\n\t\tgameId: gameId,\n\t\toppRanOut: make(chan bool, 1),\n\t\tdisconnect: make(chan bool),\n\t\tdrawOffer: make(chan bool, 1),\n\t\toppAcceptedDraw: make(chan bool, 1),\n\t\toppResigned: make(chan bool, 1),\n\t\trematchOffer: make(chan bool, 1),\n\t\toppAcceptedRematch: make(chan bool, 1),\n\t\toppReady: make(chan bool, 1),\n\t\toppDisconnected: make(chan bool, 1),\n\t\toppGone: make(chan bool, 1),\n\t\toppReconnected: make(chan bool, 1),\n\t\tsendMove: make(chan []byte, 2), // one for the clock, one for the move\n\t\tsendChat: make(chan message, 128),\n\t\tswitchColors: switchColors,\n\t\ttimeLeft: time.Duration(minutes) * time.Minute,\n\t\tuserId: userId,\n\t\tusername: username,\n\t}\n\tswitch minutes {\n\tcase 1:\n\t\trout.rm.registerPlayer1Min<- p\n\tcase 3:\n\t\trout.rm.registerPlayer3Min<- p\n\tcase 5:\n\t\trout.rm.registerPlayer5Min<- p\n\tcase 10:\n\t\trout.rm.registerPlayer10Min<- p\n\tdefault:\n\t\tlog.Println(\"Invalid clock time:\", minutes)\n\t\thttp.Error(w, \"Invalid clock time\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Allow collection of memory referenced by the caller by doing all work in\n\t// new goroutines.\n\tgo p.writePump()\n\tgo p.readPump()\n\n\trout.ldHub.joinPlayer<- userId\n}", "func (game *Game) hasPlayerGoneFirst(index int) bool {\n\treturn game.playerHasStarted[index]\n}", "func newGame() *game {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tg := &game{}\n\tg.State = \"lobby\"\n\tg.StateTime = time.Now()\n\tg.Name = \"MafiosoGame\"\n\tg.Winner = \"\"\n\tg.Players = make([]*player, 0)\n\tif g.Id != \"\" {\n\t\tgameList[g.Id] = g\n\t}\n\treturn g\n}", "func (deltaMergeImpl *DeltaMergeImpl) Player(delta interface{}) coreminer.Player {\n\tbaseGameObject := (*deltaMergeImpl).BaseGameObject(delta)\n\tif baseGameObject == nil {\n\t\treturn nil\n\t}\n\n\tasPlayer, isPlayer := baseGameObject.(coreminer.Player)\n\tif !isPlayer {\n\t\t(*deltaMergeImpl).CannotConvertDeltaTo(\"coreminer.Player\", delta)\n\t}\n\n\treturn asPlayer\n}", "func (self *Graphics) SetGameA(member *Game) {\n self.Object.Set(\"game\", member)\n}", "func getPlayersFromMatch(matchID int) (*Player, *Player) {\n\tvar player1, player2 *Player\n\n\tpStatement := `SELECT id, first_name, last_name, is_admin FROM player \n\tLEFT JOIN match_participant mp ON mp.player_id = player.id\n\tWHERE match_id = $1`\n\tprows, perr := db.Query(pStatement, matchID)\n\tif perr != nil {\n\t\tprintln(perr.Error())\n\t}\n\n\tp := true\n\tfor prows.Next() {\n\t\tvar scannedPlayer Player\n\t\tperr = prows.Scan(&scannedPlayer.Id, &scannedPlayer.FirstName, &scannedPlayer.LastName, &scannedPlayer.Admin)\n\t\tif perr != nil {\n\t\t\tprintln(perr.Error())\n\t\t}\n\n\t\tif p {\n\t\t\tplayer1 = &scannedPlayer\n\t\t} else {\n\t\t\tplayer2 = &scannedPlayer\n\t\t}\n\t\tp = !p\n\t}\n\treturn player1, player2\n}", "func getPlayer(params martini.Params, w http.ResponseWriter, r *http.Request) {\n\tid := params[\"id\"]\n\tplayer := models.NewPlayer(id)\n\tskue.Read(view, player, nil, w, r)\n}", "func (room *RoomRecorder) peoplePlayerEnter(msg synced.Msg) {\n\tif conn, recover, ok := room.peoplCheck(msg); ok {\n\t\troom.AddConnection(conn, true, recover)\n\t}\n}", "func (this *Game) oo(player player, king square) bool {\n\tif this.SquareIsCoveredBy(player.Other(), king) {\n\t\treturn false\n\t}\n\n\ttravelSquare := IntSquare(king.Int() + 1)\n\tif this.SquareIsCoveredBy(player.Other(), travelSquare) {\n\t\treturn false\n\t}\n\n\tlandingSquare := IntSquare(king.Int() + 2)\n\tif this.SquareIsCoveredBy(player.Other(), landingSquare) {\n\t\treturn false\n\t}\n\n\tif this.anyOccupied(travelSquare, landingSquare) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (self *Graphics) Alive() bool{\n return self.Object.Get(\"alive\").Bool()\n}", "func (g *Game) initPlayers(names []string) {\n\tg.addMessage(\"Initializing players...\")\n\n\tg.Players = make([]*Player, len(names))\n\n\tfor i, name := range names {\n\t\tid, err := uuid.GenUUID()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tp := &Player{\n\t\t\tID: id,\n\t\t\tName: name,\n\t\t\tFactories: make([]Factory, 1),\n\t\t\tMoney: 12,\n\t\t\tChatMessages: Queue{Capacity: 500},\n\t\t\tTurnOrder: i}\n\n\t\tp.Factories[0] = Factory{Key: \"p1\", Capacity: 1}\n\n\t\tg.Players[i] = p\n\t}\n\n\tp := g.Players[0]\n\tp.IsCurrent = true\n}", "func playGame(uci *uci.Engine, ai ai.Engine, e eval.Eval, board *chess.Board) {\n\t//setup new positions\n\tuci.SetPosition(board)\n\tai.SetPosition(board)\n\n\tfor {\n\t\t//white\n\t\tfor i := range uci.SearchDepth(5) {\n\t\t\tif m, ok := i.BestMove(); ok {\n\t\t\t\tfmt.Println(\"white move: \", m)\n\t\t\t\tboard = board.MakeMove(m)\n\t\t\t\tif _, mate := board.IsCheckOrMate(); mate {\n\t\t\t\t\tfmt.Println(\"WHITE WINS\")\n\t\t\t\t\tboard.PrintBoard(true)\n\t\t\t\t\tos.Exit(0)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\tboard.PrintBoard(true)\n\t\tuci.SetPosition(board)\n\t\tai.SetPosition(board)\n\n\t\t//black\n\t\tfor i := range ai.SearchDepth(5, e) {\n\t\t\tif m, ok := i.BestMove(); ok {\n\t\t\t\tfmt.Println(\"black move: \", m)\n\t\t\t\tboard = board.MakeMove(m)\n\t\t\t\tif _, mate := board.IsCheckOrMate(); mate {\n\t\t\t\t\tfmt.Println(\"BLACK WINS\")\n\t\t\t\t\tboard.PrintBoard(true)\n\t\t\t\t\tos.Exit(0)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\tuci.SetPosition(board)\n\t\tai.SetPosition(board)\n\t\tboard.PrintBoard(true)\n\t}\n\tuci.Quit()\n\tai.Quit()\n}", "func player(name string, table chan *ball) {\n\tfor {\n\t\tb := <-table // blocks until a ball is sent on table\n\t\tb.hits++\n\t\tfmt.Println(name, b.hits)\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttable <- b // blocks until someone receives on table\n\t}\n}", "func createPlayer(fname, lname, pos string) *Athelete {\n\n\tplayer1 := &Athelete{\n\n\t\tFirstname: fname,\n\t\tLastname: lname,\n\t\tLMType: \"player\",\n\t\tTeamName: \"Free Agent\",\n\t\tEligible: &Eligible{\n\t\t\tReason: \"\",\n\t\t\tSlips: make([]*Slip, 10, 30),\n\t\t\tLMActive: true,\n\t\t\tReturnDate: 0,\n\t\t},\n\t\tAtti: Attributes{\n\t\t\tPosition: pos,\n\t\t},\n\t}\n\n\treturn player1\n}", "func (g *Game) setCurrentPlayer(p *Player) {\n\tfor i, p0 := range g.Players {\n\t\tif p0 == p {\n\t\t\tg.ActivePlayer = i\n\t\t\tp0.IsCurrent = true\n\t\t} else {\n\t\t\tp0.IsCurrent = false\n\t\t}\n\t}\n}", "func startPendingMatch(seekerName string, matchID int) bool {\n\n\tvar game ChessGame\n\n\t//checking to make sure player's rating is in range, used as a backend rating check\n\terrMessage, bullet, blitz, standard, correspondence := GetRating(seekerName)\n\tif errMessage != \"\" {\n\t\tfmt.Println(\"Cannot get rating lobby.go startPendingMatch\")\n\t\treturn false\n\t}\n\n\tmatch := Pending.Matches[matchID]\n\tif match == nil {\n\t\treturn false\n\t}\n\t//isPlayersInGame function is located in socket.go\n\tif isPlayersInGame(match.Name, match.Opponent) {\n\t\treturn false\n\t}\n\n\tif match.Opponent == \"\" { //only use this case for public matches\n\t\tif match.GameType == \"bullet\" && (bullet < match.MinRating || bullet > match.MaxRating) {\n\t\t\t//fmt.Println(\"Bullet Rating not in range.\")\n\t\t\treturn false\n\t\t} else if match.GameType == \"blitz\" && (blitz < match.MinRating || blitz > match.MaxRating) {\n\t\t\t//fmt.Println(\"Blitz Rating not in range.\")\n\t\t\treturn false\n\t\t} else if match.GameType == \"standard\" && (standard < match.MinRating || standard > match.MaxRating) {\n\t\t\t//fmt.Println(\"Standard Rating not in range.\")\n\t\t\treturn false\n\t\t} else if match.GameType == \"correspondence\" && (correspondence < match.MinRating ||\n\t\t\tcorrespondence > match.MaxRating) {\n\t\t\t//fmt.Println(\"Correspondence Rating not in range.\")\n\t\t\treturn false\n\t\t}\n\t}\n\n\t//bullet, blitz, standard or correspondence game type\n\tgame.GameType = match.GameType\n\tgame.Type = \"chess_game\"\n\n\t//seting up the game info such as white/black player, time control, etc\n\trand.Seed(time.Now().UnixNano())\n\n\t//randomly selects both players to be white or black\n\tif rand.Intn(2) == 0 {\n\t\tgame.WhitePlayer = seekerName\n\t\tif game.GameType == \"bullet\" {\n\t\t\tgame.WhiteRating = bullet\n\n\t\t} else if game.GameType == \"blitz\" {\n\t\t\tgame.WhiteRating = blitz\n\n\t\t} else {\n\t\t\tgame.WhiteRating = standard\n\t\t}\n\n\t\tgame.BlackRating = match.Rating\n\t\tgame.BlackPlayer = match.Name\n\n\t} else {\n\t\tgame.WhitePlayer = match.Name\n\t\tif game.GameType == \"bullet\" {\n\t\t\tgame.BlackRating = bullet\n\n\t\t} else if game.GameType == \"blitz\" {\n\t\t\tgame.BlackRating = blitz\n\t\t} else {\n\t\t\tgame.BlackRating = standard\n\t\t}\n\n\t\tgame.WhiteRating = match.Rating\n\t\tgame.BlackPlayer = seekerName\n\t}\n\t//White for white to move or Black for black to move, white won, black won, stalemate or draw.\n\tgame.Status = \"White\"\n\n\t//no moves yet so nill/null\n\tgame.GameMoves = nil\n\tgame.StartMinutes = match.TimeControl\n\n\tgame.TimeControl = match.TimeControl\n\t//for simplicity we will only allow minutes\n\tgame.WhiteMinutes = match.TimeControl\n\tgame.WhiteSeconds = 0\n\tgame.BlackMinutes = match.TimeControl\n\tgame.BlackSeconds = 0\n\tgame.PendingDraw = false\n\tgame.Rated = match.Rated\n\tgame.Spectate = true\n\tgame.CountryWhite = GetCountry(game.WhitePlayer)\n\tgame.CountryBlack = GetCountry(game.BlackPlayer)\n\n\t// Guests should always be unrated games\n\tif strings.Contains(game.WhitePlayer, \"guest\") || strings.Contains(game.BlackPlayer, \"guest\") {\n\t\tgame.Rated = \"No\"\n\t}\n\n\tvar start int = 0\n\tfor {\n\t\tif _, ok := All.Games[start]; ok {\n\t\t\tstart++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tgame.ID = start\n\t//used in backend to keep track of all pending games waiting for a player to accept\n\tAll.Games[start] = &game\n\n\t//no longer need all the pending matches as game will be started\n\tfor key, value := range Pending.Matches {\n\t\t//deletes all pending matches for either players\n\t\tif value.Name == game.WhitePlayer || value.Name == game.BlackPlayer {\n\t\t\tdelete(Pending.Matches, key)\n\t\t}\n\t}\n\n\t//sending to front end for url redirection\n\tvar acceptmatch AcceptMatch\n\tacceptmatch.Type = \"accept_match\"\n\tacceptmatch.Name = game.WhitePlayer\n\tacceptmatch.TargetPlayer = game.BlackPlayer\n\tacceptmatch.MatchID = matchID\n\n\t//setting up the private chat between two players and send move connection\n\tPrivateChat[acceptmatch.Name] = acceptmatch.TargetPlayer\n\tPrivateChat[acceptmatch.TargetPlayer] = acceptmatch.Name\n\n\t//intitalizes all the variables of the game\n\tInitGame(game.ID, acceptmatch.Name, acceptmatch.TargetPlayer)\n\n\t// Redirects when players in the lobby\n\tfor _, cs := range Chat.Lobby {\n\t\tif err := websocket.JSON.Send(cs, &acceptmatch); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n\n\t// Redirects for players in the game room\n\tfor _, name := range All.Games[game.ID].observe.Names {\n\t\tif client, ok := Active.Clients[name]; ok {\n\t\t\tif err := websocket.JSON.Send(client, &game); err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t//starting white's clock first, this goroutine will keep track of both players clock for this game\n\t// the name of person passed in does not matter as long as its one of the two players\n\ttable := All.Games[game.ID]\n\tgo table.StartClock(game.ID, game.WhiteMinutes, game.WhiteSeconds, game.WhitePlayer)\n\n\treturn true\n}", "func newPlayer(s sender, tape tape, pm core.PulseManager, scheme core.PlatformCryptographyScheme) *player {\n\treturn &player{sender: s, tape: tape, pm: pm, scheme: scheme}\n}", "func Spawncheck(np models.Player) {\n\tcm := models.Chosenmonsterget()\n\tif cm.Spawn == false {\n\t\tswitch cm.Number {\n\t\tcase 1:\n\t\t\tif np.Position != 1 {\n\t\t\t\tMonsterspawn() //currently spawns when player is not in the living room\n\t\t\t\tfmt.Println(\"You hear the roar of a chainsaw. The crash and tearing of metal chewing wood.\\nIt's coming from the living room.\")\n\t\t\t\tfd := models.ItemGet(\"front door\")\n\t\t\t\tfd.Used = true\n\t\t\t\tnd := models.ItemGet(\"ruins of front door\")\n\t\t\t\tnd.Loc = 1\n\t\t\t\tmodels.Itemupdate(fd)\n\t\t\t\tmodels.Itemupdate(nd)\n\t\t\t\tcm = models.Chosenmonsterget() //to get fresh copy\n\t\t\t\tcm.Position = 1\n\t\t\t}\n\t\tcase 2:\n\t\t\tskillet := models.ItemGet(\"skillet\")\n\t\t\tif skillet.Toggle == false && np.Position != 1 {\n\t\t\t\tMonsterspawn() //currently spawns when player uses skillet\n\t\t\t\tfmt.Println(\"You hear a loud crack and crash. It sounds like something just tore down your front door.\")\n\t\t\t\tfd := models.ItemGet(\"front door\")\n\t\t\t\tfd.Used = true\n\t\t\t\tnd := models.ItemGet(\"ruins of front door\")\n\t\t\t\tnd.Loc = 1\n\t\t\t\tmodels.Itemupdate(fd)\n\t\t\t\tmodels.Itemupdate(nd)\n\t\t\t\tcm = models.Chosenmonsterget() //to get fresh copy\n\t\t\t\tcm.Position = 1\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Staying in this cabin alone is hungry work. Your stomach is growling.\")\n\t\t\t}\n\t\tcase 3:\n\t\t\tbell := models.ItemGet(\"tiny bell\")\n\t\t\tif bell.Toggle == false {\n\t\t\t\tMonsterspawn()\n\t\t\t\tcm = models.Chosenmonsterget()\n\t\t\t\tcm.Position = 3\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"With the snow falling outside, you hope it will be a white Christmas this year.\")\n\t\t\t}\n\t\tdefault:\n\t\t\tfmt.Println(\"Nothing evil lurks in these woods.\")\n\t\t}\n\t}\n\tmodels.Monsterupdate(cm)\n}", "func playerLoad(ctx hybs.Ctx) {\n\tvar hayabusaID = ctx.CtxString(\"HayabusaID\")\n\tvar player = &samplegame.Player{}\n\tif err := ctx.Mongo().Collection(\"player\").FindOne(\n\t\tctx.Context(),\n\t\tbson.M{\"hayabusa_id\": hayabusaID},\n\t).Decode(&player); err != nil {\n\t\tif err == mongo.ErrNoDocuments {\n\t\t\tctx.SysLogf(\"mongo find player failed:%s\", err)\n\t\t\tctx.StatusBadRequest()\n\t\t} else {\n\t\t\tctx.SysLogf(\"mongo find player failed:%s\", err)\n\t\t\tctx.StatusInternalServerError(\"DB Error\")\n\t\t}\n\t\treturn\n\t}\n\t// set response\n\tctx.SetCtxValue(\"Player\", player)\n\treturn\n}", "func gameManager() {\n\t//up to 100 game instances ;; should be made expanding, not fixed\n\tmax := 100\n\twidth := boardWidth //width of board\n\tboards := make([]Board, max)\n\n\tfor i := 0; i < max; i++ {\n\t\t(boards)[i].B = make([][]Spot, width)\n\t\tboards[i].Bp = 81\n\t\tboards[i].Wp = 81\n\t\tfor j := 0; j < width; j++ {\n\t\t\t((boards)[i]).B[j] = make([]Spot, width)\n\t\t}\n\t}\n\n\ta := true\n\tfor a {\n\t\tselect {\n\t\tcase inst := <- reqChan:\n\t\t\tselect {\n\t\t\tcase move := <- moveChan:\n\t\t\t\t/* rules checking occurs here ;; must add */\n\t\t\t\tif move.S == B {\n\t\t\t\t\tboards[inst].Bp--\n\t\t\t\t} else if move.S == W {\n\t\t\t\t\tboards[inst].Wp--\n\t\t\t\t}\n\t\t\t\t(boards[inst]).B[move.Y][move.X] = move.S\n\t\t\tcase act := <- activeChan:\n\t\t\t\tif boards[inst].A == Active && act == Inactive {\n\t\t\t\t\tfor i := 0; i < boardWidth; i++ {\n\t\t\t\t\t\tfor j := 0; j < boardWidth; j++ {\n\t\t\t\t\t\t\t(boards[inst]).B[i][j] = E\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\n\t\t\t\tboards[inst].A = act\n\t\t\t\tfmt.Printf(\"ID %d is now %v\\n\", inst, act)\n\t\t\t\t\n\t\t\tdefault: boardChan <- boards[inst]\n\t\t\t}\n\t\tcase a = <- killChan:\n\t\tcase <- getActiveChan:\t\n\t\t\tstr := \"\"\n\n\t\t\tfor i := 0; i < max; i++ {\n\t\t\t\tif(boards[i].A == Active) {\n\t\t\t\t\tstr += (sc.Itoa(i) + \",\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tstr += \"nil\"\n\n\t\t\tstrChan <- str\n\t\tdefault:\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}\n}", "func (r Result) GameOver() bool {\n\tif r == InPlay {\n\t\treturn false\n\t}\n\treturn true\n}", "func (self *SinglePad) SetGameA(member *Game) {\n self.Object.Set(\"game\", member)\n}", "func (g *Game) AddTournamentPlayer(p *TournamentPlayer) error {\n\tswitch {\n\tcase isEmptyPlayer(g.RightPlayerOne):\n\t\tg.RightPlayerOne = *p\n\tcase isEmptyPlayer(g.LeftPlayerOne):\n\t\tg.LeftPlayerOne = *p\n\tcase isEmptyPlayer(g.RightPlayerTwo):\n\t\tg.RightPlayerTwo = *p\n\tcase isEmptyPlayer(g.LeftPlayerTwo):\n\t\tg.LeftPlayerTwo = *p\n\tdefault:\n\t\treturn errors.New(\"all players have been added\")\n\t}\n\treturn nil\n}", "func (gs *GameState) startGame(ulist []*users.User) error {\n\n\tgs.gamestarted = true\n\n\tmsg, err := json.Marshal(map[string]interface{}{\n\t\t\"action\": \"game-start\",\n\t\t\"players\": ulist,\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"error marshalling JSON: %v\", err)\n\t\tselect {\n\t\tcase gs.quit <- 1:\n\t\tdefault:\n\t\t}\n\t}\n\tgs.notify(&gameEvent{\n\t\ttarget: nil,\n\t\tdata: msg,\n\t})\n\n\t// initial description phase\n\tif err := gs.handlePhaseOne(); err != nil {\n\t\treturn err\n\t}\n\n\t// while the game is less than the max rounds or 2 * number of playerCount -1\n\tfor gs.roundNumber < 2*gs.playerCount-1 && gs.roundNumber < maxRounds {\n\t\tif err := gs.beginphase(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := gs.beginphase(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (pt *ProtoUdp) ActPlayer(ring *sr.StreamRing) error {\n\tvar err error\n\tlog.Printf(\"%s\\n\", STR_UDP_PLAYER)\n\n\treturn err\n}", "func (p *Player) BossBattle(e *Enemy, l *Log, item *Item, d *Death) int {\n t := time.Now()\n fmt.Printf(\"%s has encountered a strong foe!\\n\\nName:%s\\n♥:%d\\nAtk:%d\\nDef:%d \\n\\nIt doesn't seem to notice. Want to come back another time[1] or fight[2]? \", p.Name, e.Name, e.Health, e.Attack, e.Armor)\n var choice int\n fmt.Scanln(&choice)\n var first bool\n switch choice {\n case 1:\n sucessString := fmt.Sprintf(\"%s snuck away from a %s without it noticing.\", p.Name, e.Name)\n l.AddAction(t.Format(\"3:04:05 \") + sucessString)\n default:\n TypedText(\"You failed to select one of the options given to you.\\n\", 50)\n TypedText(\"You're just going to have to fight it\", 50)\n TypedText(\"...\\n\", 200)\n fallthrough\n case 2:\n var simulate int\n fmt.Printf(\"Would you like to view battle[1] or simulate[2]? \")\n fmt.Scanln(&simulate)\n switch simulate {\n case 1:\n userI := RandomNumber(20)\n compI := RandomNumber(20)\n fmt.Printf(\"Rolling for initiative\")\n TypedText(\"...\", 300)\n fmt.Printf(\"%s rolled a %d\\n\", p.Name, userI)\n fmt.Printf(\"The %s rolled a %d\\n\", e.Name, compI)\n if compI > userI {\n fmt.Printf(\"The %s rolled higher, they will attack first.\\n\", e.Name)\n } else {\n fmt.Printf(\"%s rolled higher, %s will attack first.\\n\", p.Name, p.Name)\n first = true\n }\n for p.Health > 0 && e.Health > 0 {\n if first == true {\n p.UserAttack(e, l)\n time.Sleep(2500 * time.Millisecond)\n if e.Health > 0 {\n p.MobAttack(e, l)\n time.Sleep(2500 * time.Millisecond)\n } else {\n break\n }\n } else {\n p.MobAttack(e, l)\n time.Sleep(2500 * time.Millisecond)\n if p.Health > 0 {\n p.UserAttack(e, l)\n } else {\n break\n }\n }\n }\n default:\n TypedText(\"Since you didn't enter a valid selection the battle will be simulated.\", 50)\n fallthrough\n case 2:\n userI := RandomNumber(20)\n compI := RandomNumber(20)\n fmt.Printf(\"Rolling for initiative\")\n fmt.Printf(\".\")\n fmt.Printf(\".\")\n fmt.Printf(\".\\n\")\n fmt.Printf(\"%s rolled a %d\\n\", p.Name, userI)\n fmt.Printf(\"The %s rolled a %d\\n\", e.Name, compI)\n if compI > userI {\n fmt.Printf(\"The %s rolled higher, they will attack first.\\n\", e.Name)\n } else {\n fmt.Printf(\"%s rolled higher, %s will attack first.\\n\", p.Name, p.Name)\n first = true\n }\n for p.Health > 0 && e.Health > 0 {\n if first == true {\n p.UserAttack(e, l)\n if e.Health > 0 {\n p.MobAttack(e, l)\n } else {\n break\n }\n } else {\n p.MobAttack(e, l)\n if p.Health > 0 {\n p.UserAttack(e, l)\n } else {\n break\n }\n }\n }\n } \n }\n if p.Health <= 0 {\n d.Death()\n } else if e.Health <= 0 && p.Health >= 0 {\n winString := fmt.Sprintf(\"%s successfully defeated a %s!\", p.Name, e.Name)\n l.AddAction(t.Format(\"3:04:05 \")+winString)\n fmt.Printf(\"The %s dropped gold.\", e.Name)\n p.Gold += e.Gold\n lootRoll := RandomNumber(100)\n if lootRoll <= 25 {\n loot := RandomItem(p, item)\n fmt.Printf(\"The %s dropped loot [%s].\", e.Name, loot)\n itemString := fmt.Sprintf(\"%s obtained a %s and %d gold from a %s.\", p.Name, loot, e.Gold, e.Name)\n l.AddAction(t.Format(\"3:04:05 \")+itemString)\n } else {\n goldString := fmt.Sprintf(\"%s gained %d gold from a %s.\", p.Name, e.Gold, e.Name)\n l.AddAction(t.Format(\"3:04:05 \")+goldString)\n }\n if p.Key == false {\n fmt.Printf(\"The %s dropped a giant key.\", e.Name)\n l.AddAction(t.Format(\"3:04:05 \")+p.Name+\" found a giant key.\")\n p.Key = true\n }\n fmt.Printf(\"\\nPosting results\")\n TypedText(\"...\", 300)\n }\n return choice\n}", "func getAPlayer(theUsername string, thePassword string) (Player, string) {\n\treturnedError := \"all good\"\n\tvar returnedPlayer Player\n\n\t//Query DB for this Player\n\tplayerCollection := mongoClient.Database(\"superdbtest1\").Collection(\"players\") //here's our collection\n\tfilter := bson.D{{\"username\", theUsername}} //Here's our filter to look for\n\t//Here's how to find and assign multiple Documents using a cursor\n\t// Pass these options to the Find method\n\tfindOptions := options.Find()\n\tcur, err := playerCollection.Find(context.TODO(), filter, findOptions)\n\tif err != nil {\n\t\terrMsg := \"Error looking through Player db in getAPlayer: \" + err.Error()\n\t\tlogWriter(errMsg)\n\t\tfmt.Println(errMsg)\n\t\treturnedError = \"all bad\"\n\t}\n\t//Loop through results\n\tfor cur.Next(theContext) {\n\t\t// create a value into which the single document can be decoded\n\t\terr := cur.Decode(&returnedPlayer)\n\t\tif err != nil {\n\t\t\terrMsg := \"Issue writing the current element for Player in getAPlayer: \" + err.Error()\n\t\t\tfmt.Println(errMsg)\n\t\t\tlogWriter(errMsg)\n\t\t\treturnedError = \"all bad\"\n\t\t}\n\t}\n\tif err := cur.Err(); err != nil {\n\t\terrMsg := \"Issue looping through Players in getAPlayer: \" + err.Error()\n\t\tlogWriter(errMsg)\n\t\tfmt.Println(errMsg)\n\t\treturnedError = \"all bad\"\n\t}\n\n\t//Check if returned player has password\n\tif len(returnedPlayer.Username) <= 0 {\n\t\treturnedError = \"all bad\"\n\t} else if len(returnedPlayer.Password) <= 0 {\n\t\treturnedError = \"all bad\"\n\t} else {\n\t\t//Password and Username are good so far\n\t}\n\n\t//Check if password entered matches password in db\n\tpassOK := checkPassword(returnedPlayer.Password, thePassword)\n\tif passOK == true {\n\t\t//Password is good\n\t} else {\n\t\treturnedError = \"password bad\"\n\t}\n\n\treturn returnedPlayer, returnedError\n}", "func (c *CardScorer) Player() *Player {\n\treturn c.PlayerData.Data\n}", "func (g *GameHub) destroyPlayer(player *Player) {\n\tg.Lock()\n\tdefer g.Unlock()\n\tif _, exist := g.Players[player]; exist {\n\t\tdelete(g.Players, player)\n\t\tclose(player.push)\n\t\tclose(player.test)\n\t\tplayer = nil //clear pointer, garbage collector will free memory associated with player\n\t\treturn\n\t}\n}", "func playerInList(p Player, ls []Player) bool {\n\tfor _, player := range ls {\n\t\tif p == player {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func positionplayer() {\n\ttry := 2\n\tfor (item[playerx][playery] != 0 || mitem[playerx][playery] != 0) && try != 0 {\n\t\tplayerx++\n\t\tif playerx >= MAXX-1 {\n\t\t\tplayerx = 1\n\t\t\tplayery++\n\t\t\tif playery >= MAXY-1 {\n\t\t\t\tplayery = 1\n\t\t\t\ttry--\n\t\t\t}\n\t\t}\n\t}\n\tif try == 0 {\n\t\tlprcat(\"Failure in positionplayer\\n\")\n\t}\n}", "func ExecutePlay() int {\n\tutil.Mainlog.Println(\"game.ExecutePlay()\")\n\t\n\t// get the current turn from end of turn list\n\tcurrTurn := turns.Back().Value.(*turn)\n\tutil.Mainlog.Println(\"currTurn: \", currTurn)\n\t\n\t// create a new play\n\tvar p = play{}\n\t\n\t// assign a sequence number\n\tplaySequenceSource++ // this will need sync'ing in multi-threaded world\n\tp.sequence = playSequenceSource\n\t\n\n\tvar pDefendTerr *Territory\n\tvar err error\n\tfor {\n\t\t// get the attacking territory\n\t\tp.attackFrom = SelectAttackingTerritory()\n\t\t// TODO check for zero-value territory.\n\t\t\n\t\t// get the defending territory\n\t\tpDefendTerr, err = SelectDefendingTerritory(p.attackFrom)\n\t\tif (err == nil) {\n\t\t\tp.attackTo = *pDefendTerr\n\t\t\tbreak\n\t\t} else {\n\t\t\tcurrentPlayer.PutMessage(err.Error() + \"\\n\")\n\t\t}\n\t}\n\t\n\t// append play to turn's play list - play is \"official\"\n\tcurrTurn.plays.PushBack(&p)\n\n\t// roll the die/dice for the attacker\n\tp.attackerRoll = util.Roll()\n\t\n\t// roll the die/dice for the defender\n\tp.defenderRoll = util.Roll()\n\t\n\tPutMessageAllPlayers(fmt.Sprintf(\"Attacker rolled %d, defender rolled %d \\n\", p.attackerRoll, p.defenderRoll))\n\t\n\t\n\t// determine outcome (win/loss (ties go to defender))\n\tif (p.attackerRoll > p.defenderRoll) {\n\t\t// Current player wins\n\t\tPutMessageAllPlayers(\"Attacker wins!\\n\")\n\t\tp.attackerWin = true\n\t\tpDefendTerr.Owner = currTurn.attacker\n\n\t\tcurrTurn.nbrWins++\n\t\tif (currTurn.streak < 0) {\n\t\t\tcurrTurn.streak = 1\n\t\t} else {\n\t\t\tcurrTurn.streak++\n\t\t}\n\n\t} else {\n\t\t// Current player loses\n\t\tPutMessageAllPlayers(\"Defender wins!\\n\")\n\t\tcurrTurn.nbrLosses++\n\t\tif (currTurn.streak > 0) {\n\t\t\tcurrTurn.streak = -1\n\t\t} else {\n\t\t\tcurrTurn.streak--\n\t\t}\n\t}\n\t\n\tutil.Mainlog.Println(\"game.executePlay() p:\", p)\n\t\n\tif (currTurn.nbrLosses == 3) {\n\t\tPutMessageAllPlayers(\"That was the third loss in the turn.\\n\")\n\t\treturn -1\n\t} else if (currTurn.streak == -2) {\n\t\tPutMessageAllPlayers(\"Two losses in a row. Too bad.\\n\")\n\t\treturn -1\n\t} else {\n\t\treturn 0\n\t}\n}", "func (p *Player) playStrategy(s StrPoint, h *Hand) bool {\n\tvar ret bool\n\tif p.strategy[s] {\n\t\tlog.Printf(\"act: %s YES\", &s)\n\t\th.hit()\n\t\tlog.Printf(\"play: HIT. Hand: %s\\n\", h)\n\t\tif h.isBusted {\n\t\t\tret = false\n\t\t} else {\n\t\t\tret = true\n\t\t}\n\t} else {\n\t\tlog.Printf(\"act: %s NO\", &s)\n\t\tret = false\n\t}\n\treturn ret\n}", "func (g *Game) getPlayerBySymbol(s byte) (*Player, error) {\n\tp := new(Player)\n\n\tfor i := range g.p {\n\t\tif g.p[i].s == s {\n\t\t\tp = &g.p[i]\n\t\t}\n\t}\n\n\tif p == nil {\n\t\te := errors.New(\"Player does not exist in the game\")\n\n\t\treturn p, e\n\t}\n\n\treturn p, nil\n}", "func toGame(data interface{}, isResponse bool) *Game {\n\tif data == nil {\n\t\treturn nil\n\t}\n\n\tif isResponse {\n\t\tdest := gameResponse{}\n\n\t\tif recast(data, &dest) == nil {\n\t\t\treturn &dest.Data\n\t\t}\n\t} else {\n\t\tdest := Game{}\n\n\t\tif recast(data, &dest) == nil {\n\t\t\treturn &dest\n\t\t}\n\t}\n\n\treturn nil\n}", "func (g *game) rmPlayer(p *player) {\n\t// If game is at lobby, remove the player completely (also freeing up the name).\n\tif g.State == \"lobby\" || p.Spectator {\n\t\tfor i := range g.Players {\n\t\t\tif g.Players[i] == p && !p.Admin {\n\t\t\t\tg.Players = append(g.Players[:i], g.Players[i+1:]...)\n\t\t\t\tg.broadcastGameInfo()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tp.State = \"offline\"\n\tp.Connection = nil\n\tg.broadcastGameInfo()\n}", "func (c *ConfigBridge) addGame() {\n\tc.config.AddGame()\n}", "func GetPlayer(lobby *game.Lobby, r *http.Request) *game.Player {\n\treturn lobby.GetPlayer(GetUserSession(r))\n}", "func getPlayers(c *gin.Context) {\n\n\tsqlStatement := `SELECT id, first_name, last_name, is_admin FROM player;`\n\tqueryPlayers(c, sqlStatement)\n}", "func (g *Controller) RegisterPlayer(conn model.Connection) {\n\tplayers := g.state.GetPlayers()\n\tpID := g.state.GetNewPlayerID()\n\tspawn := g.state.Map.GetRandomSpawn(players)\n\tplayer := model.NewPlayer(pID, spawn.X, spawn.Y, conn)\n\tg.networkManager.Register(player, g.state)\n\tg.state.AddPlayer(player)\n\tg.CheckStartConditions()\n\tif g.state.InProgress() { // game already started, new Player has to know about it\n\t\tg.networkManager.SendGameStartToClient(player.Client, g.state)\n\t}\n\tlog.Printf(\"GameManager: Player %d connected, %d connected Players\", player.ID, g.state.GetPlayerCount())\n}" ]
[ "0.63689643", "0.6351946", "0.63514185", "0.6286333", "0.62635595", "0.62445056", "0.6186175", "0.615802", "0.6033943", "0.59299517", "0.59293294", "0.5863469", "0.5795158", "0.57452166", "0.5714151", "0.5700817", "0.5668207", "0.56542397", "0.5643266", "0.5641598", "0.5608093", "0.56031585", "0.5594767", "0.55805254", "0.5537218", "0.553506", "0.550783", "0.5476285", "0.54449", "0.5442205", "0.54368764", "0.5416352", "0.54161495", "0.54120165", "0.54114515", "0.54089284", "0.54080766", "0.5396321", "0.5395977", "0.53955334", "0.5387318", "0.53870857", "0.5385919", "0.5372079", "0.5365686", "0.53652465", "0.53560984", "0.5356082", "0.5353946", "0.5347252", "0.53322685", "0.5329342", "0.53292096", "0.5323196", "0.53223705", "0.53122526", "0.5306198", "0.5304906", "0.5295581", "0.52901316", "0.5272812", "0.5267726", "0.5263722", "0.5260249", "0.5244534", "0.52422446", "0.52321744", "0.5221631", "0.521887", "0.5214218", "0.5214085", "0.5213526", "0.5212362", "0.5205069", "0.5200384", "0.5194123", "0.51922613", "0.5187821", "0.518361", "0.5165306", "0.51647365", "0.51642114", "0.5161556", "0.5156683", "0.5153977", "0.51507455", "0.51447797", "0.51419574", "0.51409864", "0.51395804", "0.5138747", "0.5127159", "0.5126954", "0.51218283", "0.51212114", "0.5119267", "0.5114617", "0.5105889", "0.510219", "0.5097926", "0.5077866" ]
0.0
-1
encodeKey returns a full storage key for a given network. e.g: 10.4.0.0/16 > /ipam/subnet/10.4.0.016
func encodeKey(network net.IPNet) string { return fmt.Sprintf( IPAMSubnetStorageKeyFormat, strings.Replace(network.String(), "/", "-", -1), ) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Network) MakeKey(prefix string) string {\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", prefix, \"/\", \"networks/\", m.Tenant, \"/\", m.Name)\n}", "func (k *Key) Encode() string {\n\te := make([]*pb.Path_Element, len(k.toks))\n\tfor i, t := range k.toks {\n\t\tt := t\n\t\te[i] = &pb.Path_Element{\n\t\t\tType: &t.Kind,\n\t\t}\n\t\tif t.StringID != \"\" {\n\t\t\te[i].Name = &t.StringID\n\t\t} else {\n\t\t\te[i].Id = &t.IntID\n\t\t}\n\t}\n\tvar namespace *string\n\tif ns := k.kc.Namespace; ns != \"\" {\n\t\tnamespace = &ns\n\t}\n\tr, err := proto.Marshal(&pb.Reference{\n\t\tApp: &k.kc.AppID,\n\t\tNameSpace: namespace,\n\t\tPath: &pb.Path{\n\t\t\tElement: e,\n\t\t},\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// trim padding\n\treturn strings.TrimRight(base64.URLEncoding.EncodeToString(r), \"=\")\n}", "func NetAddressKey(na *wire.NetAddress) string {\n\tport := strconv.FormatUint(uint64(na.Port), 10)\n\taddr := net.JoinHostPort(ipString(na), port)\n\treturn addr\n}", "func (kv *DisKV) encodeKey(key string) string {\n\treturn base32.StdEncoding.EncodeToString([]byte(key))\n}", "func EncodeKey(pk *[32]byte) string {\n\treturn base32.StdEncoding.EncodeToString(pk[:])\n}", "func encodeAddress(hash160 []byte, netID [2]byte) string {\n\t// Format is 2 bytes for a network and address class (i.e. P2PKH vs\n\t// P2SH), 20 bytes for a RIPEMD160 hash, and 4 bytes of checksum.\n\treturn base58.CheckEncode(hash160[:ripemd160.Size], netID)\n}", "func (b *Blockstore) StorageKey(dst []byte, cid cid.Cid) []byte {\n\th := cid.Hash()\n\treqsize := base32.RawStdEncoding.EncodedLen(len(h)) + b.prefixLen\n\tif reqsize > cap(dst) {\n\t\t// passed slice is smaller than required size; create new.\n\t\tdst = make([]byte, reqsize)\n\t} else if reqsize > len(dst) {\n\t\t// passed slice has enough capacity, but its length is\n\t\t// restricted, expand.\n\t\tdst = dst[:cap(dst)]\n\t}\n\n\tif b.prefixing { // optimize for branch prediction.\n\t\tcopy(dst, b.prefix)\n\t\tbase32.RawStdEncoding.Encode(dst[b.prefixLen:], h)\n\t} else {\n\t\tbase32.RawStdEncoding.Encode(dst, h)\n\t}\n\treturn dst[:reqsize]\n}", "func escapeKey(key string, isPrefix bool) string {\n\treturn escape.HexEscape(key, func(r []rune, i int) bool {\n\t\tc := r[i]\n\t\tswitch {\n\t\t// Azure does not work well with backslashes in blob names.\n\t\tcase c == '\\\\':\n\t\t\treturn true\n\t\t// Azure doesn't handle these characters (determined via experimentation).\n\t\tcase c < 32 || c == 34 || c == 35 || c == 37 || c == 63 || c == 127:\n\t\t\treturn true\n\t\t// Escape trailing \"/\" for full keys, otherwise Azure can't address them\n\t\t// consistently.\n\t\tcase !isPrefix && i == len(key)-1 && c == '/':\n\t\t\treturn true\n\t\t// For \"../\", escape the trailing slash.\n\t\tcase i > 1 && r[i] == '/' && r[i-1] == '.' && r[i-2] == '.':\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n}", "func EncodeKey(ultKey *ULTKey) string {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.BigEndian, ultKey)\n\treturn b58.Encode(buf.Bytes())\n}", "func (pubKey PubKeySecp256k1Eth) KeyString() string {\n\treturn fmt.Sprintf(\"%X\", pubKey[:])\n}", "func encode(k Key) ([]byte, error) {\n\tver := k.version()\n\tpsize := ver.PayloadSize()\n\ttsize := 1 + psize + 4\n\traw := k.raw()\n\tif len(raw) > psize {\n\t\treturn nil, errors.New(\"tbd\")\n\t}\n\ttmp := make([]byte, tsize)\n\ttmp[0] = byte(ver)\n\tcopy(tmp[len(tmp)-4-len(raw):], raw)\n\tsum := doublehash.SumDoubleSha256(tmp[:1+psize])\n\tcopy(tmp[1+psize:], sum[:4])\n\treturn rippleEncoding.Encode(tmp)\n}", "func (k *CtKey4) ToNetwork() CtKey {\n\tn := *k\n\tn.SourcePort = byteorder.HostToNetwork16(n.SourcePort)\n\tn.DestPort = byteorder.HostToNetwork16(n.DestPort)\n\treturn &n\n}", "func GetRedisNetworkKey() string {\n\treturn GetRedisKey(\"network\")\n}", "func (key Key) Encode(cryptKey *Key) (SimpleBlob, error) {\n\tvar expKey C.HCRYPTKEY\n\tvar blobType C.DWORD = C.PUBLICKEYBLOB\n\tif cryptKey != nil {\n\t\texpKey = cryptKey.hKey\n\t\tblobType = C.SIMPLEBLOB\n\t}\n\tvar slen C.DWORD\n\tif C.CryptExportKey(key.hKey, expKey, blobType, 0, nil, &slen) == 0 {\n\t\treturn nil, getErr(\"Error getting length for key blob\")\n\t}\n\tbuf := make([]byte, slen)\n\tif C.CryptExportKey(key.hKey, expKey, blobType, 0, (*C.BYTE)(unsafe.Pointer(&buf[0])), &slen) == 0 {\n\t\treturn nil, getErr(\"Error exporting key blob\")\n\t}\n\treturn SimpleBlob(buf[0:int(slen)]), nil\n}", "func formatKey(key string, salt string) string {\n formatted := fmt.Sprintf(\"$argon2i$v=19$m=65536,t=1,p=4$%s$%s\", salt, key)\n\n return formatted\n}", "func (p *param) Key() string {\n\treturn \"conf.network.\" + p.Name()\n}", "func encodeTestKey(kvDB *client.DB, keyStr string) (roachpb.Key, error) {\n\tvar key []byte\n\ttokens := strings.Split(keyStr, \"/\")\n\n\tfor _, tok := range tokens {\n\t\t// Encode the table ID if the token is a table name.\n\t\tif tableNames[tok] {\n\t\t\tdesc := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, tok)\n\t\t\tkey = encoding.EncodeUvarintAscending(key, uint64(desc.ID))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Interleaved sentinel.\n\t\tif tok == \"#\" {\n\t\t\tkey = encoding.EncodeNotNullDescending(key)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Assume any other value is an unsigned integer.\n\t\ttokInt, err := strconv.ParseUint(tok, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkey = encoding.EncodeUvarintAscending(key, tokInt)\n\t}\n\n\treturn key, nil\n}", "func EncodeKey(key []byte) string {\n\treturn hex.EncodeToString(key)\n}", "func GraphKey(base []string) (string, error) {\n var a IpvsVirtualServer\n a.Protocol = base[0]\n a.Schedule = base[2]\n VirtualServerInfo, err := Hex2IpvsServer(base[1])\n if (err != nil) {\n return \"\", err\n }\n a.IPAddress = VirtualServerInfo.IPAddress\n a.Port = VirtualServerInfo.Port\n var m = [...]string{\n strings.Replace(a.IPAddress,\".\",\"_\",-1),\n a.Port,\n a.Protocol,\n a.Schedule,\n }\n return strings.Replace(GraphNamePrefixTemplate, \"*\", strings.Join(m[:],\"_\"), 1), nil\n}", "func keyString(k ssh.PublicKey) string {\n\treturn k.Type() + \" \" + base64.StdEncoding.EncodeToString(k.Marshal()) // e.g. \"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTY....\"\n}", "func buildKey(prefix, txid []byte, outputIndex uint64) []byte {\n\tkey := make([]byte, len(prefix)+len(txid)+8)\n\tcopy(key, prefix)\n\tcopy(key[len(prefix):], txid)\n\tbinary.BigEndian.PutUint64(key[len(prefix)+len(txid):], outputIndex)\n\treturn key\n}", "func (broadcast *Broadcast) encodeForEncryption(w io.Writer) error {\n\terr := broadcast.bm.encodeBroadcast(w)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsigLength := uint64(len(broadcast.sig))\n\tif err = bmutil.WriteVarInt(w, sigLength); err != nil {\n\t\treturn err\n\t}\n\tif _, err = w.Write(broadcast.sig); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (k *CtKey4Global) ToNetwork() CtKey {\n\treturn &CtKey4Global{\n\t\tTupleKey4Global: *k.TupleKey4Global.ToNetwork().(*tuple.TupleKey4Global),\n\t}\n}", "func (w *Wallet) ToKey(address string) (keys.PublicKey, error) {\n\treturn keys.PublicKey{}, nil\n}", "func (esr *etcdSubnetRegistry) parseNetworkKey(s string) (string, bool) {\n\tif parts := esr.networkRegex.FindStringSubmatch(s); len(parts) == 3 {\n\t\treturn parts[1], parts[2] != \"\"\n\t}\n\n\treturn \"\", false\n}", "func MakePrefixFromNetwork(network string, netSize int) string {\n\tminPartsNeeded := netSize / 16\n\tparts := strings.Split(strings.TrimRight(network, \":\"), \":\")\n\thaveParts := len(parts)\n\n\tif haveParts > minPartsNeeded {\n\t\tparts[minPartsNeeded] = strings.TrimSuffix(parts[minPartsNeeded], \"00\")\n\t}\n\tfor haveParts < minPartsNeeded {\n\t\tparts = append(parts, \"0\")\n\t\thaveParts++\n\t}\n\tprefix := strings.Join(parts, \":\")\n\tif haveParts == minPartsNeeded {\n\t\tprefix += \":\"\n\t}\n\treturn prefix\n}", "func Key(keyType string) (string, error) {\n\tswitch keyType {\n\tcase \"aes:128\":\n\t\tkey := memguard.NewBufferRandom(16).Bytes()\n\t\treturn base64.StdEncoding.EncodeToString(key), nil\n\tcase \"aes:256\":\n\t\tkey := memguard.NewBufferRandom(32).Bytes()\n\t\treturn base64.StdEncoding.EncodeToString(key), nil\n\tcase \"secretbox\":\n\t\tkey := memguard.NewBufferRandom(32).Bytes()\n\t\treturn base64.StdEncoding.EncodeToString(key), nil\n\tcase \"fernet\":\n\t\t// Generate a fernet key\n\t\tk := &fernet.Key{}\n\t\tif err := k.Generate(); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn k.Encode(), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"invalid keytype (%s) [aes:128, aes:256, secretbox, fernet]\", keyType)\n\t}\n}", "func (key PublicKey) String() string {\n\treturn base58.Encode(key[:])\n}", "func (api *nodeAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"nodes\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"nodes\", \"/\", name)\n}", "func (k *Keychain) Key() (string, error) {\n\tkey, err := k.BinKey()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(key), nil\n}", "func (api *distributedservicecardAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"distributedservicecards\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"distributedservicecards\", \"/\", name)\n}", "func Key(typ, name, namespace string) string {\n\treturn fmt.Sprintf(\"%s/%s/%s\", typ, namespace, name)\n}", "func (pubKey PubKeySecp256k1) KeyString() string {\n\treturn Fmt(\"%X\", pubKey[:])\n}", "func keyToAddr(key *secp256k1.PrivateKey, net *chaincfg.Params) (dcrutil.Address, error) {\n\tpubKey := (*secp256k1.PublicKey)(&key.PublicKey)\n\tserializedKey := pubKey.SerializeCompressed()\n\tpubKeyAddr, err := dcrutil.NewAddressSecpPubKey(serializedKey, net)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pubKeyAddr.AddressPubKeyHash(), nil\n}", "func (e Aes128CtsHmacSha256128) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {\n\tsaltp := rfc8009.GetSaltP(salt, \"aes128-cts-hmac-sha256-128\")\n\treturn rfc8009.StringToKey(secret, saltp, s2kparams, e)\n}", "func (a *Address) Encode(w io.Writer) error {\n\tpk, ok := a.PubKey.(*secp256k1.PubKey)\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"wrong type: %T\", a.PubKey))\n\t}\n\treturn cio.WriteBytesUint16(w, pk.Key)\n}", "func getKey(data string) string {\n\tsign := md5.Sum([]byte(data))\n\tsignStr := fmt.Sprintf(\"%x\", sign)\n\treturn signStr[:7]\n}", "func (d *Disk) getKey(p *DiskParams) []byte {\n\treturn []byte(time_util.TimeToName(time.Unix(p.ExicutionTime, 0), fmt.Sprintf(\"%x\", d.hasher.Sum(nil))))\n}", "func GraphKey(graph string) []byte {\n\treturn bytes.Join([][]byte{graphPrefix, []byte(graph)}, []byte{0})\n}", "func (api *hostAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"hosts\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"hosts\", \"/\", name)\n}", "func escapeKey(key string) string {\n\treturn escape.HexEscape(key, func(r []rune, i int) bool {\n\t\tc := r[i]\n\t\tswitch {\n\t\t// S3 doesn't handle these characters (determined via experimentation).\n\t\tcase c < 32:\n\t\t\treturn true\n\t\t// For \"../\", escape the trailing slash.\n\t\tcase i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.':\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n}", "func PathToKey(path string) (string) {\n\treturn base64.StdEncoding.EncodeToString([]byte(path))\n}", "func (api *clusterAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"cluster\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"cluster\", \"/\", name)\n}", "func (k *PrivateKey) Encoded() ([]byte, error) {\n\treturn k.seckey, nil\n}", "func encodePubkey(pubkey *ecdsa.PublicKey) []byte {\n\treturn []byte(base64.RawURLEncoding.EncodeToString(ethcrypto.FromECDSAPub(pubkey)))\n}", "func keyToString(k []byte) string {\n\tif len(k) != lenHash {\n\t\tpanic(fmt.Sprintf(\"bad hash passed to hashToKey: %x\", k))\n\t}\n\treturn fmt.Sprintf(\"%s%x\", hashPrefix, k)[0:lenKey]\n}", "func Base58Encode(pkh string) string {\n\tpkhBytes, _ := hex.DecodeString(pkh)\n\tb58 := base58.NewBitcoinBase58()\n\taddr, _ := b58.EncodeToString(pkhBytes)\n\treturn addr\n}", "func shortenNetworkId(networkId string) string {\n\t// vde_switch has an internal limit on the length of this name from the\n\t// command line it seems. Base64 encoding isn't really enough to beat it,\n\t// so we truncate to docker cli length for convenience and ensure our own\n\t// commands can handle conflicts.\n\treturn networkId[:12]\n}", "func KeyAsString(key uint64) string {\n\treturn strconv.FormatUint(key, 36)\n}", "func (api *bucketAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"objstore\", \"/\", \"buckets\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"objstore\", \"/\", \"buckets\", \"/\", name)\n}", "func keyToString(pubKey ssh.PublicKey, comment string) string {\n\treturn fmt.Sprintf(\n\t\t\"%s %s %s\\n\",\n\t\tpubKey.Type(),\n\t\tbase64.StdEncoding.EncodeToString(pubKey.Marshal()),\n\t\tcomment,\n\t)\n}", "func MakeV4PrefixFromNetwork(ip string) string {\n\tparts := strings.Split(ip, \".\")\n\treturn fmt.Sprintf(\"%s.%s.%s.\", parts[0], parts[1], parts[2])\n}", "func EkgKey(domain, id []byte) []byte {\n return createKey(EKG,domain,id)\n}", "func DirectKey(domain,id []byte) []byte {\n return createKey(DIRECT,domain,id)\n}", "func ConnEkgKey(cid types.ConnId) []byte {\n return createKey(CONN,EKG,cid.Serialize())\n}", "func computeKey(cfg external.SharedConfig) string {\n\tvar a []string\n\t// keys must be sorted\n\tif cfg.RoleDurationSeconds != nil {\n\t\ta = append(a, fmt.Sprintf(`\"DurationSeconds\": %d`, int(cfg.RoleDurationSeconds.Seconds())))\n\t}\n\ta = append(a, `\"RoleArn\": `+strconv.Quote(cfg.RoleARN))\n\tif cfg.RoleSessionName != \"\" {\n\t\ta = append(a, `\"RoleSessionName\": `+strconv.Quote(cfg.RoleSessionName))\n\t}\n\ta = append(a, `\"SerialNumber\": `+strconv.Quote(cfg.MFASerial))\n\ts := sha1.Sum([]byte(fmt.Sprintf(\"{%s}\", strings.Join(a, \", \"))))\n\treturn hex.EncodeToString(s[:])\n}", "func (k *Key4) String() string { return k.ipv4.String() }", "func getNetwork() string {\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn \"unknown\"\n\t}\n\n\taddrs, err := net.LookupIP(hostname)\n\tif err != nil {\n\t\treturn hostname\n\t}\n\n\tfor _, addr := range addrs {\n\t\tif ipv4 := addr.To4(); ipv4 != nil {\n\t\t\tip, err := ipv4.MarshalText()\n\t\t\tif err != nil {\n\t\t\t\treturn hostname\n\t\t\t}\n\t\t\thosts, err := net.LookupAddr(string(ip))\n\t\t\tif err != nil || len(hosts) == 0 {\n\t\t\t\treturn hostname\n\t\t\t}\n\t\t\tfqdn := hosts[0]\n\t\t\treturn strings.TrimSuffix(fqdn, \".\") // return fqdn without trailing dot\n\t\t}\n\t}\n\treturn hostname\n}", "func (pubKey PubKeyEd25519) KeyString() string {\n\treturn Fmt(\"%X\", pubKey[:])\n}", "func (pubKey PubKeyEd25519) KeyString() string {\n\treturn Fmt(\"%X\", pubKey[:])\n}", "func calculateServerResponseKey(key string) string {\n s := sha1.Sum([]byte(key + WS_SERVER_ID))\n b64 := base64.StdEncoding.EncodeToString(s[:])\n\n return b64\n}", "func keyToGOB64(k *key) (string, error) {\n\tb := bytes.Buffer{}\n\te := gob.NewEncoder(&b)\n\n\tif err := e.Encode(k); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base64.StdEncoding.EncodeToString(b.Bytes()), nil\n}", "func (api *tenantAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"tenants\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"tenants\", \"/\", name)\n}", "func msgToKeyWithSubnet(m *dns.Msg, ecsIP net.IP, mask int) (key []byte) {\n\tq := m.Question[0]\n\tkeyLen := keyIPIndex + len(q.Name)\n\tmasked := mask != 0\n\tif masked {\n\t\tkeyLen += len(ecsIP)\n\t}\n\n\t// Initialize the slice.\n\tkey = make([]byte, keyLen)\n\n\t// Put DO.\n\topt := m.IsEdns0()\n\tkey[0] = mathutil.BoolToNumber[byte](opt != nil && opt.Do())\n\n\t// Put Qtype.\n\t//\n\t// TODO(d.kolyshev): We should put Qtype in key[1:].\n\tbinary.BigEndian.PutUint16(key[:], q.Qtype)\n\n\t// Put Qclass.\n\tbinary.BigEndian.PutUint16(key[1+packedMsgLenSz:], q.Qclass)\n\n\t// Add mask.\n\tkey[keyMaskIndex] = uint8(mask)\n\tk := keyIPIndex\n\tif masked {\n\t\tk += copy(key[keyIPIndex:], ecsIP)\n\t}\n\n\tcopy(key[k:], strings.ToLower(q.Name))\n\n\treturn key\n}", "func GetLocalNetworkIDKey() string {\n\treturn \"localNetworkID\"\n}", "func createKey(name string, version string, nodeID string) string {\n\treturn fmt.Sprintf(\"%s:%s:%s\", nodeID, name, version)\n}", "func packKey(field []byte, data []byte) ([]byte, error) {\n\treturn packKeys(field, [][]byte{data})\n\t//w := new(bytes.Buffer)\n\t//err := serialization.WriteVarBytes(w, data)\n\t//if err != nil {\n\t//\treturn nil, errors.NewDetailErr(err, errors.ErrNoCode, \"[AuthContract] packKey failed\")\n\t//}\n\t//key := append(field, w.Bytes()...)\n\t//return key, nil\n}", "func (pk *PublicKey) Key() string {\n\treturn string(pk.PublicKeyHex.Value)\n}", "func (api *snapshotrestoreAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"config-restore\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"config-restore\", \"/\", name)\n}", "func (k Key) String() (string, error) {\n\treturn hex.EncodeToString([]byte(k)), nil\n}", "func (codec codec[T]) EncodeKey(key dynamo.Thing) (map[string]types.AttributeValue, error) {\n\thashkey := key.HashKey()\n\tif hashkey == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid key of %T, hashkey cannot be empty\", key)\n\t}\n\n\tsortkey := key.SortKey()\n\tif sortkey == \"\" {\n\t\tsortkey = \"_\"\n\t}\n\n\tgen := map[string]types.AttributeValue{}\n\tgen[codec.pkPrefix] = &types.AttributeValueMemberS{Value: string(hashkey)}\n\tgen[codec.skSuffix] = &types.AttributeValueMemberS{Value: string(sortkey)}\n\n\treturn gen, nil\n}", "func (v ConstructionDeriveRequest) EncodeJSON(b []byte, network []byte) []byte {\n\tb = append(b, network...)\n\tif len(v.Metadata) > 0 {\n\t\tb = append(b, `\"metadata\":`...)\n\t\tb = append(b, v.Metadata...)\n\t\tb = append(b, \",\"...)\n\t}\n\tb = append(b, `\"public_key\":`...)\n\tb = v.PublicKey.EncodeJSON(b)\n\treturn append(b, \"}\"...)\n}", "func GenerateKey(net bitcoin.Network) (*wallet.Key, error) {\r\n\tkey, err := bitcoin.GenerateKey(net)\r\n\tif err != nil {\r\n\t\treturn nil, errors.Wrap(err, \"Failed to generate key\")\r\n\t}\r\n\r\n\tresult := wallet.Key{\r\n\t\tKey: key,\r\n\t}\r\n\r\n\tresult.Address, err = key.RawAddress()\r\n\tif err != nil {\r\n\t\treturn nil, errors.Wrap(err, \"Failed to create key address\")\r\n\t}\r\n\r\n\treturn &result, nil\r\n}", "func StringKey(k *msgs.PublicKey) string {\n\tkeyBytes, err := proto.Marshal(k)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(keyBytes)\n}", "func (key Key) PublicKeyStr() string {\n\taddr := types.AccAddress(key.k.PubKey().Address())\n\treturn addr.String()\n}", "func (v NetworkIdentifier) EncodeJSON(b []byte) []byte {\n\tb = append(b, `{\"blockchain\":`...)\n\tb = json.AppendString(b, v.Blockchain)\n\tb = append(b, `,\"network\":`...)\n\tb = json.AppendString(b, v.Network)\n\tb = append(b, \",\"...)\n\tif v.SubNetworkIdentifier.Set {\n\t\tb = append(b, '\"', 's', 'u', 'b', '_', 'n', 'e', 't', 'w', 'o', 'r', 'k', '_', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', '\"', ':')\n\t\tb = v.SubNetworkIdentifier.Value.EncodeJSON(b)\n\t\tb = append(b, \",\"...)\n\t}\n\tb[len(b)-1] = '}'\n\treturn b\n}", "func HexToKey(s string) Key { return BytesToKey(common.FromHex(s)) }", "func (pk *PublicKey) String() string {\n\treturn pk.Algorithm.String() + \":\" + pk.Key.String()\n}", "func constructKargsFromNetworkConfig(s *machineScope) (string, error) {\n\toutKargs := \"\"\n\tnetworkConfigs := s.providerSpec.Network.Devices\n\tfor configIdx, networkConfig := range networkConfigs {\n\t\t// retrieve any IP addresses assigned by an IP address pool\n\t\taddressesFromPool, gatewayFromPool, err := getAddressesFromPool(configIdx, networkConfig, s)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error getting addresses from IP pool: %w\", err)\n\t\t}\n\t\tvar gateway string\n\t\tif len(gatewayFromPool) > 0 {\n\t\t\tgateway = gatewayFromPool\n\t\t} else {\n\t\t\tgateway = networkConfig.Gateway\n\t\t}\n\n\t\tvar gatewayIp netip.Addr\n\t\tif len(gateway) > 0 {\n\t\t\tgatewayIp, err = netip.ParseAddr(gateway)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"error parsing gateway address: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tipAddresses := []string{}\n\t\tipAddresses = append(ipAddresses, networkConfig.IPAddrs...)\n\t\tipAddresses = append(ipAddresses, addressesFromPool...)\n\n\t\t// construct IP address network kargs for each IP address\n\t\tfor _, address := range ipAddresses {\n\t\t\tprefix, err := netip.ParsePrefix(address)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"error parsing prefix: %w\", err)\n\t\t\t}\n\t\t\tvar ipStr, gatewayStr, maskStr string\n\t\t\taddr := prefix.Addr()\n\t\t\t// IPv6 addresses must be wrapped in [] for dracut network kargs\n\t\t\tif addr.Is6() {\n\t\t\t\tmaskStr = fmt.Sprintf(\"%d\", prefix.Bits())\n\t\t\t\tipStr = fmt.Sprintf(\"[%s]\", addr.String())\n\t\t\t\tif len(gateway) > 0 && gatewayIp.Is6() {\n\t\t\t\t\tgatewayStr = fmt.Sprintf(\"[%s]\", gateway)\n\t\t\t\t}\n\t\t\t} else if addr.Is4() {\n\t\t\t\tmaskStr, err = getSubnetMask(prefix)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"error getting subnet mask: %w\", err)\n\t\t\t\t}\n\t\t\t\tif len(gateway) > 0 && gatewayIp.Is4() {\n\t\t\t\t\tgatewayStr = gateway\n\t\t\t\t}\n\t\t\t\tipStr = addr.String()\n\t\t\t} else {\n\t\t\t\treturn \"\", errors.New(\"IP address must adhere to IPv4 or IPv6 format\")\n\t\t\t}\n\n\t\t\toutKargs = outKargs + fmt.Sprintf(\"ip=%s::%s:%s:::none \", ipStr, gatewayStr, maskStr)\n\t\t}\n\n\t\t// construct nameserver network karg for each defined nameserver\n\t\tfor _, nameserver := range networkConfig.Nameservers {\n\t\t\tip := net.ParseIP(nameserver)\n\t\t\tif ip.To4() == nil {\n\t\t\t\tnameserver = fmt.Sprintf(\"[%s]\", nameserver)\n\t\t\t}\n\t\t\toutKargs = outKargs + fmt.Sprintf(\"nameserver=%s \", nameserver)\n\t\t}\n\t}\n\treturn outKargs, nil\n}", "func (api *licenseAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"licenses\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"licenses\", \"/\", name)\n}", "func EncodeKey(sc *stmtctx.StatementContext, b []byte, v ...types.Datum) ([]byte, error) {\n\treturn encode(sc, b, v, true)\n}", "func KeyPrefixBytes(prefix uint8) []byte {\n\treturn []byte(fmt.Sprintf(\"%d/\", prefix))\n}", "func (k Key) String() string {\n\tvar keyString = k.LCCN\n\tif k.Year > 0 {\n\t\tkeyString += fmt.Sprintf(\"/%04d\", k.Year)\n\t}\n\tif k.Month > 0 {\n\t\tkeyString += fmt.Sprintf(\"%02d\", k.Month)\n\t}\n\tif k.Day > 0 {\n\t\tkeyString += fmt.Sprintf(\"%02d\", k.Day)\n\t}\n\tif k.Ed > 0 {\n\t\tkeyString += fmt.Sprintf(\"%02d\", k.Ed)\n\t}\n\n\treturn keyString\n}", "func (c *publicKey) Encode() (*pb.PublicKey, error) {\n\tif c.ki == nil {\n\t\treturn nil, ErrPublicKeyCannotBeNil()\n\t}\n\n\tblob, err := crypto.MarshalPublicKey(c.ki)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpbuf := pb.PublicKey{Blob: blob}\n\n\treturn &pbuf, nil\n}", "func (api *configurationsnapshotAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"config-snapshot\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"config-snapshot\", \"/\", name)\n}", "func key(item storage.Key) []byte {\n\treturn []byte(item.Namespace() + separator + item.ID())\n}", "func NetworkPolicyKeyFunc(obj interface{}) (string, error) {\n\tpolicy, ok := obj.(*types.NetworkPolicy)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"object is not *types.NetworkPolicy: %v\", obj)\n\t}\n\treturn policy.Name, nil\n}", "func (key PrivateKey) String() string {\n\treturn base58.Encode(key[:])\n}", "func Encode(n int64) string {\n\tif n == 0 {\n\t\treturn string(alphabet[0])\n\t}\n\n\ts := \"\"\n\tfor ; 0 < n; n = n / length {\n\t\ts = string(alphabet[n%length]) + s\n\t}\n\n\treturn s\n}", "func encodeSSHKey(public *rsa.PublicKey) ([]byte, error) {\n\tpublicKey, err := ssh.NewPublicKey(public)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ssh.MarshalAuthorizedKey(publicKey), nil\n}", "func (in *Database) SaveNetwork(netw *types.Network) error {\n\tif netw.ID == \"\" {\n\t\tid := stringid.GenerateRandomID()\n\t\tnetw.ID = id\n\t\tnetw.ShortID = stringid.TruncateID(id)\n\t\tnetw.Created = time.Now()\n\t}\n\treturn in.save(\"network\", netw)\n}", "func ToHostKey(l3Proto L3Protocol, l4Proto L4Protocol, port uint32) string {\n\tstrL3, ok := L3Protocol_name[int32(l3Proto)]\n\tif !ok {\n\t\tstrL3 = InvalidKeyPart\n\t}\n\tstrL4, ok := L4Protocol_name[int32(l4Proto)]\n\tif !ok {\n\t\tstrL3 = InvalidKeyPart\n\t}\n\tstrPort := strconv.Itoa(int(port))\n\tif port == 0 {\n\t\tstrPort = InvalidKeyPart\n\t}\n\tkey := strings.Replace(toHostTemplate, \"{l3}\", strL3, 1)\n\tkey = strings.Replace(key, \"{l4}\", strL4, 1)\n\tkey = strings.Replace(key, \"{port}\", strPort, 1)\n\n\treturn key\n}", "func EncryptKey(key *AccountKey, auth string, scryptN, scryptP int) ([]byte, error) {\n\tauthArray := []byte(auth)\n\n\tsalt := make([]byte, 32)\n\tif _, err := io.ReadFull(rand.Reader, salt); err != nil {\n\t\tpanic(\"reading from crypto/rand failed: \" + err.Error())\n\t}\n\tderivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tencryptKey := derivedKey[:16]\n\thdKeyBytesObject := hdKeyBytes{\n\t\tHdPath: []byte(key.HdPath),\n\t\tMnemonic: []byte(key.Mnemonic),\n\t\tPassphrase: []byte(key.Passphrase),\n\t\tPrivKey: key.PrivateKey,\n\t}\n\thdKeyEncoded, err := msgpack.Marshal(hdKeyBytesObject)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tiv := make([]byte, aes.BlockSize) // 16\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\tpanic(\"reading from crypto/rand failed: \" + err.Error())\n\t}\n\tcipherText, err := aesCTRXOR(encryptKey, hdKeyEncoded, iv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmac := crypto.Keccak256(derivedKey[16:32], cipherText)\n\n\tscryptParamsJSON := make(map[string]interface{}, 5)\n\tscryptParamsJSON[\"n\"] = scryptN\n\tscryptParamsJSON[\"r\"] = scryptR\n\tscryptParamsJSON[\"p\"] = scryptP\n\tscryptParamsJSON[\"dklen\"] = scryptDKLen\n\tscryptParamsJSON[\"salt\"] = hex.EncodeToString(salt)\n\n\tcipherParamsJSON := cipherparamsJSON{\n\t\tIV: hex.EncodeToString(iv),\n\t}\n\n\tcryptoStruct := cryptoJSON{\n\t\tCipher: \"aes-128-ctr\",\n\t\tCipherText: hex.EncodeToString(cipherText),\n\t\tCipherParams: cipherParamsJSON,\n\t\tKDF: keyHeaderKDF,\n\t\tKDFParams: scryptParamsJSON,\n\t\tMAC: hex.EncodeToString(mac),\n\t}\n\tencryptedKeyJSONV3 := encryptedKeyJSONV3{\n\t\thex.EncodeToString(key.Address[:]),\n\t\tkey.Name,\n\t\tcryptoStruct,\n\t\tkey.Id.String(),\n\t\tversion,\n\t}\n\treturn json.Marshal(encryptedKeyJSONV3)\n}", "func encode(n ipld.Node) (ipld.Node, ipld.Link) {\n\tlb := cidlink.LinkBuilder{cid.Prefix{\n\t\tVersion: 1,\n\t\tCodec: 0x0129,\n\t\tMhType: 0x17,\n\t\tMhLength: 4,\n\t}}\n\tlnk, err := lb.Build(context.Background(), ipld.LinkContext{}, n,\n\t\tfunc(ipld.LinkContext) (io.Writer, ipld.StoreCommitter, error) {\n\t\t\tbuf := bytes.Buffer{}\n\t\t\treturn &buf, func(lnk ipld.Link) error {\n\t\t\t\tstorage[lnk] = buf.Bytes()\n\t\t\t\treturn nil\n\t\t\t}, nil\n\t\t},\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn n, lnk\n}", "func (api *credentialsAPI) getFullKey(tenant, name string) string {\n\tif tenant != \"\" {\n\t\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"credentials\", \"/\", tenant, \"/\", name)\n\t}\n\treturn fmt.Sprint(globals.ConfigRootPrefix, \"/\", \"cluster\", \"/\", \"credentials\", \"/\", name)\n}", "func MakeKey(sk StorageKey, id string) []byte {\n\tkey := []byte(id)\n\tkey = append([]byte{byte(sk)}, key...)\n\treturn key\n}", "func encodeKeys(privateKeyRaw *ecdsa.PrivateKey, publicKeyRaw *ecdsa.PublicKey) (string, string) {\n\n\ts := \"START encodeKeys() - Encodes privateKeyRaw & publicKeyRaw to privateKeyHex & publicKeyHex\"\n\tlog.Debug(\"WALLET: GUTS \" + s)\n\n\tprivateKeyx509Encoded, _ := x509.MarshalECPrivateKey(privateKeyRaw)\n\tprivateKeyPEM := pem.EncodeToMemory(\n\t\t&pem.Block{\n\t\t\tType: \"PRIVATE KEY\",\n\t\t\tBytes: privateKeyx509Encoded,\n\t\t})\n\tprivateKeyHex := hex.EncodeToString(privateKeyPEM)\n\n\tpublicKeyx509Encoded, _ := x509.MarshalPKIXPublicKey(publicKeyRaw)\n\tpublicKeyPEM := pem.EncodeToMemory(\n\t\t&pem.Block{\n\t\t\tType: \"PUBLIC KEY\",\n\t\t\tBytes: publicKeyx509Encoded,\n\t\t})\n\tpublicKeyHex := hex.EncodeToString(publicKeyPEM)\n\n\ts = \"END encodeKeys() - Encodes privateKeyRaw & publicKeyRaw to privateKeyHex & publicKeyHex\"\n\tlog.Debug(\"WALLET: GUTS \" + s)\n\n\treturn privateKeyHex, publicKeyHex\n\n}", "func (app *adapter) ToEncoded(pk PrivateKey) string {\n\tbytes := app.ToBytes(pk)\n\treturn base64.StdEncoding.EncodeToString(bytes)\n}", "func encodeExpireKey(key []byte) []byte {\n\texpireKey := []byte{ValuePrefix}\n\texpireKey = append(expireKey, ExpireKey...)\n\texpireKey = append(expireKey, Seperator)\n\texpireKey = append(expireKey, key...)\n\treturn expireKey\n}", "func convertKey(key []byte) string {\n\treturn fmt.Sprintf(\"%x\", key)\n}" ]
[ "0.6027767", "0.5856475", "0.5842569", "0.5747398", "0.5699967", "0.56682754", "0.5556975", "0.54912746", "0.54875445", "0.54775286", "0.54382294", "0.5423966", "0.5418173", "0.53774774", "0.5373105", "0.5369733", "0.5362149", "0.5285002", "0.52529335", "0.52497816", "0.5235867", "0.52046454", "0.5124089", "0.5107828", "0.51050115", "0.50997317", "0.50657904", "0.50544643", "0.5034588", "0.5029709", "0.5022108", "0.5014662", "0.5013887", "0.5009686", "0.5003955", "0.4983961", "0.49678597", "0.49671355", "0.49654177", "0.49601218", "0.49598926", "0.49543086", "0.49463883", "0.49372208", "0.4929824", "0.49291807", "0.49236745", "0.4921922", "0.49208897", "0.49109256", "0.49057266", "0.48982143", "0.48775604", "0.48561543", "0.48473826", "0.48470262", "0.48431674", "0.484091", "0.48272118", "0.48272118", "0.48240507", "0.48234057", "0.48208946", "0.48147616", "0.48067585", "0.48038036", "0.48017678", "0.4794833", "0.4778183", "0.4776314", "0.477323", "0.47700155", "0.47662193", "0.47621238", "0.4761766", "0.4761439", "0.4761397", "0.4756666", "0.47565985", "0.47545764", "0.475347", "0.47528288", "0.47454846", "0.4744731", "0.47397017", "0.47374043", "0.47270912", "0.47201163", "0.4697451", "0.46951416", "0.469439", "0.46898797", "0.46871647", "0.46826828", "0.4681857", "0.46805915", "0.46802828", "0.46782178", "0.46775776", "0.4674633" ]
0.81983703
0
decodeRelativeKey returns a CIDR string, given a relative storage key. e.g: 10.4.0.016 > 10.4.0.0/16
func decodeRelativeKey(key string) string { return strings.Replace(key, "-", "/", -1) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func encodeKey(network net.IPNet) string {\n\treturn fmt.Sprintf(\n\t\tIPAMSubnetStorageKeyFormat,\n\t\tstrings.Replace(network.String(), \"/\", \"-\", -1),\n\t)\n}", "func decodeSubscriptionKey(key string) ([]byte, error) {\n\t// \"=\" padding\n\tbuf := bytes.NewBufferString(key)\n\tif rem := len(key) % 4; rem != 0 {\n\t\tbuf.WriteString(strings.Repeat(\"=\", 4-rem))\n\t}\n\n\tbytes, err := base64.StdEncoding.DecodeString(buf.String())\n\tif err == nil {\n\t\treturn bytes, nil\n\t}\n\n\treturn base64.URLEncoding.DecodeString(buf.String())\n}", "func (pkv PublicKeyValidator) getPeerID(key string) (peer.ID, error) {\n\tif len(key) == 0 || key[0] != '/' {\n\t\treturn \"\", errors.New(ErrInvalidKeyFormat)\n\t}\n\n\tkey = key[1:]\n\n\ti := strings.IndexByte(key, '/')\n\tif i <= 0 {\n\t\treturn \"\", errors.New(ErrInvalidKeyFormat)\n\t}\n\n\tns := key[:i]\n\tif ns != EncryptionNamespace {\n\t\treturn \"\", errors.New(ErrInvalidNamespace)\n\t}\n\n\tpeerID, err := peer.IDB58Decode(key[i+1:])\n\tif err != nil {\n\t\treturn \"\", errors.WithStack(err)\n\t}\n\n\treturn peerID, nil\n}", "func getKeyRelHash(k string, r string) int {\n\th1 := sha1.New()\n\th1.Write([]byte(k))\n\tb1 := h1.Sum(nil)\n\tdata1 := b1[0]\n\tid1 := data1 % 8\n\tid1 = id1 * 4\n\n\th2 := sha1.New()\n\th2.Write([]byte(r))\n\tb2 := h2.Sum(nil)\n\tdata2 := b2[0]\n\tid2 := data2 % 4\n\tretid := int(id1 + id2)\n\t//fmt.Println(\"Hash for key-rel=\", retid)\n\treturn retid\n}", "func KeyToPath(key string) (string) {\n\tpath, err := base64.StdEncoding.DecodeString(key)\n\tif err != nil {\n\t\treturn \"\"\n\t} else {\n\t\treturn string(path)\n\t}\n}", "func (s *Store) ResolveKey(key string) (string, error) {\n\tif !strings.HasPrefix(key, hashPrefix) {\n\t\treturn \"\", fmt.Errorf(\"wrong key prefix\")\n\t}\n\tif len(key) < minlenKey {\n\t\treturn \"\", fmt.Errorf(\"image ID too short\")\n\t}\n\tif len(key) > lenKey {\n\t\tkey = key[:lenKey]\n\t}\n\n\tvar aciInfos []*ACIInfo\n\terr := s.db.Do(func(tx *sql.Tx) error {\n\t\tvar err error\n\t\taciInfos, err = GetACIInfosWithKeyPrefix(tx, key)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn \"\", errwrap.Wrap(errors.New(\"error retrieving ACI Infos\"), err)\n\t}\n\n\tkeyCount := len(aciInfos)\n\tif keyCount == 0 {\n\t\treturn \"\", ErrKeyNotFound\n\t}\n\tif keyCount != 1 {\n\t\treturn \"\", fmt.Errorf(\"ambiguous image ID: %q\", key)\n\t}\n\treturn aciInfos[0].BlobKey, nil\n}", "func decodePubkey(val []byte) (*ecdsa.PublicKey, error) {\n\tdata, err := base64.RawURLEncoding.DecodeString(string(val))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ethcrypto.UnmarshalPubkey(data)\n}", "func readKeyFromExtensionManifest(path string) ([]byte, error) {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tj := make(map[string]interface{})\n\tif err = json.Unmarshal(b, &j); err != nil {\n\t\treturn nil, err\n\t}\n\tif enc, ok := j[\"key\"].(string); ok {\n\t\treturn base64.StdEncoding.DecodeString(enc)\n\t}\n\treturn nil, nil\n}", "func getIDFromKeyARN(arn string) string {\n\tif ss := strings.Split(arn, \"/\"); len(ss) > 0 {\n\t\tarn = ss[len(ss)-1]\n\t}\n\treturn arn\n}", "func DecodeKey(s string) ([]byte, error) {\n\tkey, err := hex.DecodeString(s)\n\tif err == nil && len(key) != KeyLen {\n\t\terr = fmt.Errorf(\"length is %d, expected %d\", len(key), KeyLen)\n\t}\n\treturn key, err\n}", "func (r *Key) urlNormalized() *Key {\n\tnormalized := dcl.Copy(*r).(Key)\n\tnormalized.Name = dcl.SelfLinkToName(r.Name)\n\tnormalized.DisplayName = dcl.SelfLinkToName(r.DisplayName)\n\tnormalized.Project = dcl.SelfLinkToName(r.Project)\n\treturn &normalized\n}", "func DecodeKey(raw []byte) (parsedKey []byte, err error) {\n\tblock, _ := pem.Decode(raw)\n\tif block == nil {\n\t\terr = errors.New(\"failed to decode PEM data\")\n\t\treturn\n\t}\n\tvar keytype certv1.PrivateKeyEncoding\n\tvar key interface{}\n\tif key, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {\n\t\tif key, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {\n\t\t\treturn\n\t\t}\n\t\tkeytype = certv1.PKCS8\n\t} else {\n\t\tkeytype = certv1.PKCS1\n\t}\n\trsaKey := key.(*rsa.PrivateKey)\n\tif keytype == certv1.PKCS1 {\n\t\tparsedKey = x509.MarshalPKCS1PrivateKey(rsaKey)\n\t} else {\n\t\tparsedKey, _ = x509.MarshalPKCS8PrivateKey(rsaKey)\n\t}\n\treturn\n}", "func convertKey(key []byte) string {\n\treturn fmt.Sprintf(\"%x\", key)\n}", "func decodeBase58(b []byte) int64 {\n\tvar id int64\n\tfor p := range b {\n\t\tid = id*58 + int64(decodeBase58Map[b[p]])\n\t}\n\treturn id\n}", "func (rc *RedisClient) GetEncodedPath(media *models.Media, ipAddr string) (string, error) {\n\tconn := rc.Get()\n\tdefer conn.Close()\n\tencodedPath, err := GenerateKey()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tkey := []byte(rc.secret + \":encoded:\" + ipAddr + \":\" + encodedPath)\n\tmediaBytes, err := easyjson.Marshal(media)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t_, err = conn.Do(\"SETEX\", key, strconv.Itoa(rc.userExpiry), mediaBytes)\n\treturn encodedPath, err\n}", "func decodeKeyId(keyId string) (uint64, error) {\n\tbs, err := hex.DecodeString(keyId)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif len(bs) != 8 {\n\t\treturn 0, fmt.Errorf(\"keyId is not 8 bytes as expected, got %d\", len(bs))\n\t}\n\treturn binary.BigEndian.Uint64(bs), nil\n}", "func PathForKey(raw string) paths.Unencrypted {\n\treturn paths.NewUnencrypted(strings.TrimSuffix(raw, \"/\"))\n}", "func (esr *etcdSubnetRegistry) parseNetworkKey(s string) (string, bool) {\n\tif parts := esr.networkRegex.FindStringSubmatch(s); len(parts) == 3 {\n\t\treturn parts[1], parts[2] != \"\"\n\t}\n\n\treturn \"\", false\n}", "func keyToEtcdPath(key string) string {\n var path string\n \n // do it the easy way for now\n parts := strings.Split(key, \".\")\n \n for i, p := range parts {\n if i > 0 { path += \"/\" }\n path += url.QueryEscape(p)\n }\n \n return path\n}", "func readKey(key string, path string) (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(path, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func keyToAddr(key *secp256k1.PrivateKey, net *chaincfg.Params) (dcrutil.Address, error) {\n\tpubKey := (*secp256k1.PublicKey)(&key.PublicKey)\n\tserializedKey := pubKey.SerializeCompressed()\n\tpubKeyAddr, err := dcrutil.NewAddressSecpPubKey(serializedKey, net)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pubKeyAddr.AddressPubKeyHash(), nil\n}", "func parseAccountKey(ctx *cli.Context, args cli.Args) ([]byte, error) {\n\tvar (\n\t\tacctKeyStr string\n\t)\n\tswitch {\n\tcase ctx.IsSet(\"acct_key\"):\n\t\tacctKeyStr = ctx.String(\"acct_key\")\n\tcase args.Present():\n\t\tacctKeyStr = args.First()\n\t\targs = args.Tail()\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"acct_key argument missing\")\n\t}\n\tif len(acctKeyStr) != hex.EncodedLen(33) {\n\t\treturn nil, fmt.Errorf(\"acct_key in invalid format. \" +\n\t\t\t\"must be hex encoded 33 byte public key\")\n\t}\n\treturn hex.DecodeString(acctKeyStr)\n}", "func (dr *Resolver) ResolveFromERPC(pathKey model.PathKey, cache bool) (string, error) {\n\tvar segment string\n\tvar resolutionErr error\n\tdepth := int64(0)\n\n\tentry := counterEntry{\n\t\tresolutionType: metrics.ERPCTag,\n\t\tresolution: metrics.PathResolutionTag,\n\t}\n\n\t// create eRPC request and send using the ioctl syscall\n\tchallenge, err := dr.requestResolve(erpc.ResolvePathOp, pathKey)\n\tif err != nil {\n\t\tdr.missCounters[entry].Inc()\n\t\treturn \"\", fmt.Errorf(\"unable to resolve the path of mountID `%d` and inode `%d` with eRPC: %w\", pathKey.MountID, pathKey.Inode, err)\n\t}\n\n\tvar keys []model.PathKey\n\tvar entries []*PathEntry\n\n\tfilenameParts := make([]string, 0, 128)\n\n\ti := 0\n\t// make sure that we keep room for at least one pathID + character + \\0 => (sizeof(pathID) + 1 = 17)\n\tfor i < dr.erpcSegmentSize-17 {\n\t\tdepth++\n\n\t\t// parse the path_key_t structure\n\t\tpathKey.Inode = model.ByteOrder.Uint64(dr.erpcSegment[i : i+8])\n\t\tpathKey.MountID = model.ByteOrder.Uint32(dr.erpcSegment[i+8 : i+12])\n\n\t\t// check challenge\n\t\tif challenge != model.ByteOrder.Uint32(dr.erpcSegment[i+12:i+16]) {\n\t\t\tif depth >= model.MaxPathDepth {\n\t\t\t\tresolutionErr = errTruncatedParentsERPC\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdr.missCounters[entry].Inc()\n\t\t\treturn \"\", errERPCRequestNotProcessed\n\t\t}\n\n\t\t// skip PathID\n\t\ti += 16\n\n\t\tif dr.erpcSegment[i] == 0 {\n\t\t\tif depth >= model.MaxPathDepth {\n\t\t\t\tresolutionErr = errTruncatedParentsERPC\n\t\t\t} else {\n\t\t\t\tresolutionErr = errERPCResolution\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tif dr.erpcSegment[i] != '/' {\n\t\t\tsegment = model.NullTerminatedString(dr.erpcSegment[i:])\n\t\t\tfilenameParts = append(filenameParts, segment)\n\t\t\ti += len(segment) + 1\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t\tif !IsFakeInode(pathKey.Inode) && cache {\n\t\t\tkeys = append(keys, pathKey)\n\n\t\t\tentry := newPathEntry(model.PathKey{}, segment)\n\t\t\tentries = append(entries, entry)\n\t\t}\n\t}\n\n\tif resolutionErr == nil && len(keys) > 0 {\n\t\tresolutionErr = dr.cacheEntries(keys, entries)\n\n\t\tif depth > 0 {\n\t\t\tdr.hitsCounters[entry].Add(depth)\n\t\t}\n\t}\n\n\tif resolutionErr != nil {\n\t\tdr.missCounters[entry].Inc()\n\t}\n\n\treturn computeFilenameFromParts(filenameParts), resolutionErr\n}", "func BaseDER(path string) string {\n dat, _ := ioutil.ReadFile(path)\n block, _ := pem.Decode(dat)\n if block == nil {\n return \"No Key Found.\"\n } // end if.\n\n pubout := base64.StdEncoding.EncodeToString(block.Bytes)\n return pubout\n\n}", "func RecoverECDSAPrivKeyFromLinearRelationship(pub *ecdsa.PublicKey, digest []byte, sig1, sig2 *ecSignature, a, b *big.Int) *ecdsa.PrivateKey {\n\tc := pub.Curve\n\tN := c.Params().N\n\n\te := hashToInt(digest, c)\n\tw1 := new(big.Int).ModInverse(sig1.S, N) // s1^-1\n\tw2 := new(big.Int).ModInverse(sig2.S, N) // s2^-1\n\tw1.Mul(a, w1) // a * s1^-1\n\te.Mul(e, new(big.Int).Sub(w2, w1)) // m * (s2^-1 - a * s1^-1)\n\td1 := new(big.Int).Sub(b, e) // b - m * (s2^-1 - a * s1^-1)\n\td1.Mod(d1, N)\n\n\td2 := new(big.Int).Sub(new(big.Int).Mul(sig2.R, w2), new(big.Int).Mul(sig1.R, w1)) // r2 * s2^-1 - r1 * a * s1^-1\n\tif d2.Sign() == 0 {\n\t\treturn nil\n\t}\n\td2.ModInverse(d2, N)\n\n\tD := new(big.Int).Mul(d1, d2) // (b - m * (s2^-1 - a * s1^-1)) / (r2 * s2^-1 - r1 * a * s1^-1)\n\tD.Mod(D, N)\n\n\tpriv := new(ecdsa.PrivateKey)\n\tpriv.PublicKey = *pub\n\tpriv.D = D\n\treturn priv\n}", "func TestEncodeDecodeKey(t *testing.T) {\n\tt.Parallel()\n\n\tns := \"some-namespace\"\n\tdomain := \"example.google.com\"\n\tnn := types.NamespacedName{\n\t\tNamespace: ns,\n\t\tName: domain,\n\t}\n\n\tactualNs, actualDomain, err := cache.SplitMetaNamespaceKey(nn.String())\n\ttestutil.AssertNil(t, \"err\", err)\n\ttestutil.AssertEqual(t, \"namespace\", ns, actualNs)\n\ttestutil.AssertEqual(t, \"domain\", domain, actualDomain)\n}", "func cidrtoRfc1035(cidr string) string {\n\tcidr = strings.ReplaceAll(cidr, \":\", \"\")\n\tcidr = strings.ReplaceAll(cidr, \".\", \"\")\n\tcidr = strings.ReplaceAll(cidr, \"/\", \"\")\n\treturn cidr\n}", "func PathToKey(path string) (string) {\n\treturn base64.StdEncoding.EncodeToString([]byte(path))\n}", "func (b *BtcWallet) deriveKeyByBIP32Path(path []uint32) (*btcec.PrivateKey,\n\terror) {\n\n\t// Make sure we get a full path with exactly 5 elements. A path is\n\t// either custom purpose one with 4 dynamic and one static elements:\n\t// m/1017'/coinType'/keyFamily'/0/index\n\t// Or a default BIP49/89 one with 5 elements:\n\t// m/purpose'/coinType'/account'/change/index\n\tconst expectedDerivationPathDepth = 5\n\tif len(path) != expectedDerivationPathDepth {\n\t\treturn nil, fmt.Errorf(\"invalid BIP32 derivation path, \"+\n\t\t\t\"expected path length %d, instead was %d\",\n\t\t\texpectedDerivationPathDepth, len(path))\n\t}\n\n\t// Assert that the first three parts of the path are actually hardened\n\t// to avoid under-flowing the uint32 type.\n\tif err := assertHardened(path[0], path[1], path[2]); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid BIP32 derivation path, \"+\n\t\t\t\"expected first three elements to be hardened: %w\", err)\n\t}\n\n\tpurpose := path[0] - hdkeychain.HardenedKeyStart\n\tcoinType := path[1] - hdkeychain.HardenedKeyStart\n\taccount := path[2] - hdkeychain.HardenedKeyStart\n\tchange, index := path[3], path[4]\n\n\t// Is this a custom lnd internal purpose key?\n\tswitch purpose {\n\tcase keychain.BIP0043Purpose:\n\t\t// Make sure it's for the same coin type as our wallet's\n\t\t// keychain scope.\n\t\tif coinType != b.chainKeyScope.Coin {\n\t\t\treturn nil, fmt.Errorf(\"invalid BIP32 derivation \"+\n\t\t\t\t\"path, expected coin type %d, instead was %d\",\n\t\t\t\tb.chainKeyScope.Coin, coinType)\n\t\t}\n\n\t\treturn b.deriveKeyByLocator(keychain.KeyLocator{\n\t\t\tFamily: keychain.KeyFamily(account),\n\t\t\tIndex: index,\n\t\t})\n\n\t// Is it a standard, BIP defined purpose that the wallet understands?\n\tcase waddrmgr.KeyScopeBIP0044.Purpose,\n\t\twaddrmgr.KeyScopeBIP0049Plus.Purpose,\n\t\twaddrmgr.KeyScopeBIP0084.Purpose,\n\t\twaddrmgr.KeyScopeBIP0086.Purpose:\n\n\t\t// We're going to continue below the switch statement to avoid\n\t\t// unnecessary indentation for this default case.\n\n\t// Currently, there is no way to import any other key scopes than the\n\t// one custom purpose or three standard ones into lnd's wallet. So we\n\t// shouldn't accept any other scopes to sign for.\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid BIP32 derivation path, \"+\n\t\t\t\"unknown purpose %d\", purpose)\n\t}\n\n\t// Okay, we made sure it's a BIP49/84 key, so we need to derive it now.\n\t// Interestingly, the btcwallet never actually uses a coin type other\n\t// than 0 for those keys, so we need to make sure this behavior is\n\t// replicated here.\n\tif coinType != 0 {\n\t\treturn nil, fmt.Errorf(\"invalid BIP32 derivation path, coin \" +\n\t\t\t\"type must be 0 for BIP49/84 btcwallet keys\")\n\t}\n\n\t// We only expect to be asked to sign with key scopes that we know\n\t// about. So if the scope doesn't exist, we don't create it.\n\tscope := waddrmgr.KeyScope{\n\t\tPurpose: purpose,\n\t\tCoin: coinType,\n\t}\n\tscopedMgr, err := b.wallet.Manager.FetchScopedKeyManager(scope)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error fetching manager for scope %v: \"+\n\t\t\t\"%w\", scope, err)\n\t}\n\n\t// Let's see if we can hit the private key cache.\n\tkeyPath := waddrmgr.DerivationPath{\n\t\tInternalAccount: account,\n\t\tAccount: account,\n\t\tBranch: change,\n\t\tIndex: index,\n\t}\n\tprivKey, err := scopedMgr.DeriveFromKeyPathCache(keyPath)\n\tif err == nil {\n\t\treturn privKey, nil\n\t}\n\n\t// The key wasn't in the cache, let's fully derive it now.\n\terr = walletdb.View(b.db, func(tx walletdb.ReadTx) error {\n\t\taddrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)\n\n\t\taddr, err := scopedMgr.DeriveFromKeyPath(addrmgrNs, keyPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error deriving private key: %w\", err)\n\t\t}\n\n\t\tprivKey, err = addr.(waddrmgr.ManagedPubKeyAddress).PrivKey()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error deriving key from path %#v: %w\",\n\t\t\tkeyPath, err)\n\t}\n\n\treturn privKey, nil\n}", "func parsePathKey(path []byte) (k string, rem []byte, err error) {\n\tvar parsed = make([]byte, 0, SHORT_STRING_OPTIMIZED_CAP)\n\tvar c byte\n\n\trem = path\n\n\tfor {\n\t\tswitch {\n\t\tcase rem[0] == '\\\\' && rem[1] == 'u':\n\t\t\tutf8str := make([]byte, 0, SHORT_STRING_OPTIMIZED_CAP)\n\t\t\tutf8str, rem, err = parseUnicode(rem)\n\t\t\tfor _, c := range utf8str {\n\t\t\t\tparsed = append(parsed, c)\n\t\t\t}\n\t\t\tcontinue\n\n\t\tcase rem[0] == '\\\\' && rem[1] == '.':\n\t\t\tparsed = append(parsed, '.')\n\t\t\trem = rem[2:]\n\t\t\tcontinue\n\n\t\tcase rem[0] == '\\\\' && rem[1] == '[':\n\t\t\tparsed = append(parsed, '[')\n\t\t\trem = rem[2:]\n\t\t\tcontinue\n\n\t\tcase rem[0] == '\\\\' && rem[1] == ']':\n\t\t\tparsed = append(parsed, ']')\n\t\t\trem = rem[2:]\n\t\t\tcontinue\n\n\t\tcase rem[0] == '\\\\' && rem[1] == ';':\n\t\t\tparsed = append(parsed, ';')\n\t\t\trem = rem[2:]\n\t\t\tcontinue\n\n\t\tcase rem[0] == '\\\\' && rem[1] != 'u':\n\t\t\tc, rem, err = parseEscaped(rem)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tparsed = append(parsed, c)\n\t\t\tcontinue\n\n\t\tcase rem[0] == '.' || rem[0] == '[' || rem[0] == ';':\n\t\t\tgoto End\n\n\t\tdefault:\n\t\t\tparsed = append(parsed, rem[0])\n\t\t\tpos.col += 1\n\t\t\trem = rem[1:]\n\t\t\tcontinue\n\t\t}\n\t}\nEnd:\n\treturn string(parsed), rem, nil\n}", "func rewriteRawKey(key []byte, rewriteRules *RewriteRules) ([]byte, *import_sstpb.RewriteRule) {\n\tif rewriteRules == nil {\n\t\treturn codec.EncodeBytes([]byte{}, key), nil\n\t}\n\tif len(key) > 0 {\n\t\trule := matchOldPrefix(key, rewriteRules)\n\t\tret := bytes.Replace(key, rule.GetOldKeyPrefix(), rule.GetNewKeyPrefix(), 1)\n\t\treturn codec.EncodeBytes([]byte{}, ret), rule\n\t}\n\treturn nil, nil\n}", "func clientKey(filepath string) string {\n\tkey, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't read key.pem:\", err)\n\t\tos.Exit(1)\n\t}\n\treturn string(key)\n}", "func readKey(key string) (string, error) {\n\tvar env Config\n\tif err := envconfig.Process(\"\", &env); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata, err := ioutil.ReadFile(filepath.Join(env.NSXSecretPath, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func stripKvKeyPrefix(prefix string, full_key string) string {\n\tuse_key := full_key\n\t// Strip leading slash first.\n\tif use_key[0:1] == \"/\" {\n\t\tuse_key = use_key[1:]\n\t}\n\t//log.Printf(\"stripKvKeyPrefix(%s, %s) use_key slice: %s\\n\", prefix, full_key, use_key[0:len(prefix)])\n\tif use_key[0:len(prefix)] == prefix {\n\t\tuse_key = use_key[len(prefix):]\n\t}\n\t//log.Printf(\"stripKvKeyPrefix(%s, %s) new use_key 1: %s\\n\", prefix, full_key, use_key)\n\t// Strip leading slash again if required.\n\tif len(use_key) > 0 && use_key[0:1] == \"/\" {\n\t\tuse_key = use_key[1:]\n\t}\n\t//log.Printf(\"stripKvKeyPrefix(%s, %s) new use_key 2: %s\\n\", prefix, full_key, use_key)\n\treturn use_key\n}", "func (this *Codec) decode(shortUrl string) string {\n\tindex := strings.LastIndex(shortUrl, \"/\")\n\tkey, _ := strconv.Atoi(shortUrl[index+1:])\n\treturn this.urls[key]\n\n}", "func parseRawKey(s string) (string, string, error) {\n\tfor i := 0; i < len(s); i++ {\n\t\tif s[i] == '\"' {\n\t\t\t// Fast path.\n\t\t\treturn s[:i], s[i+1:], nil\n\t\t}\n\t\tif s[i] == '\\\\' {\n\t\t\t// Slow path.\n\t\t\treturn parseRawString(s)\n\t\t}\n\t}\n\treturn s, \"\", fmt.Errorf(`missing closing '\"'`)\n}", "func getKey(data string) string {\n\tsign := md5.Sum([]byte(data))\n\tsignStr := fmt.Sprintf(\"%x\", sign)\n\treturn signStr[:7]\n}", "func ReadKey(key string) (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(MountPath, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func FormatKey(key string) (registry.Key, string) {\n\tfor _, p := range hklmPrefixes {\n\t\tif strings.HasPrefix(key, p) {\n\t\t\treturn registry.LocalMachine, subkey(key, p)\n\t\t}\n\t}\n\tfor _, p := range hkcrPrefixes {\n\t\tif strings.HasPrefix(key, p) {\n\t\t\treturn registry.ClassesRoot, subkey(key, p)\n\t\t}\n\t}\n\n\tonce.Do(func() { initKeys() })\n\n\tif root, k := findSIDKey(key); root != registry.InvalidKey {\n\t\treturn root, k\n\t}\n\tfor _, p := range hkuPrefixes {\n\t\tif strings.HasPrefix(key, p) {\n\t\t\treturn registry.Users, subkey(key, p)\n\t\t}\n\t}\n\n\tif strings.HasPrefix(key, hive) {\n\t\treturn registry.Hive, key\n\t}\n\n\treturn registry.InvalidKey, key\n}", "func KeyByFilename(keyFname string) (*[32]byte, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfp := filepath.Join(cwd, keyFname)\n\tb, err := ioutil.ReadFile(fp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn DecodeKey(string(b[:64])), nil\n}", "func BridgeDomainKey(bdName string) string {\n\treturn BdPrefix + bdName\n}", "func runtimeDecodeBase58(ic *interop.Context) error {\n\tsrc := ic.VM.Estack().Pop().String()\n\tresult, err := base58.Decode(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tic.VM.Estack().PushVal(result)\n\treturn nil\n}", "func ReadKey(r io.Reader) ([]byte, error) {\n\tbr := bufio.NewReader(io.LimitReader(r, 100))\n\tline, err := br.ReadString('\\n')\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\tif err == nil {\n\t\t// Check that we're at EOF.\n\t\t_, err = br.ReadByte()\n\t\tif err == io.EOF {\n\t\t\terr = nil\n\t\t} else if err == nil {\n\t\t\terr = fmt.Errorf(\"file contains more than one line\")\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tline = strings.TrimSuffix(line, \"\\n\")\n\treturn DecodeKey(line)\n}", "func GraphKey(base []string) (string, error) {\n var a IpvsVirtualServer\n a.Protocol = base[0]\n a.Schedule = base[2]\n VirtualServerInfo, err := Hex2IpvsServer(base[1])\n if (err != nil) {\n return \"\", err\n }\n a.IPAddress = VirtualServerInfo.IPAddress\n a.Port = VirtualServerInfo.Port\n var m = [...]string{\n strings.Replace(a.IPAddress,\".\",\"_\",-1),\n a.Port,\n a.Protocol,\n a.Schedule,\n }\n return strings.Replace(GraphNamePrefixTemplate, \"*\", strings.Join(m[:],\"_\"), 1), nil\n}", "func escapeKey(key string, isPrefix bool) string {\n\treturn escape.HexEscape(key, func(r []rune, i int) bool {\n\t\tc := r[i]\n\t\tswitch {\n\t\t// Azure does not work well with backslashes in blob names.\n\t\tcase c == '\\\\':\n\t\t\treturn true\n\t\t// Azure doesn't handle these characters (determined via experimentation).\n\t\tcase c < 32 || c == 34 || c == 35 || c == 37 || c == 63 || c == 127:\n\t\t\treturn true\n\t\t// Escape trailing \"/\" for full keys, otherwise Azure can't address them\n\t\t// consistently.\n\t\tcase !isPrefix && i == len(key)-1 && c == '/':\n\t\t\treturn true\n\t\t// For \"../\", escape the trailing slash.\n\t\tcase i > 1 && r[i] == '/' && r[i-1] == '.' && r[i-2] == '.':\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n}", "func Base58Decode(input []byte) []byte {\n\tdecode, err := base58.Decode(string(input[:]))\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn decode\n}", "func Base58Decode(input []byte) []byte {\n\tdecode, err := base58.Decode(string(input[:]))\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn decode\n}", "func Decode(key string) int64 {\n\tres, err := strconv.ParseInt(key, 16, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\treturn res\n}", "func ResourceKey(c Codec, resourceName, pk string) string {\n\treturn path.Join(c.Key(), resourceName, pk)\n}", "func loadConfigStringEncKey(confReader *config.Config, group, field string) (string, error) {\n\t//Read value from config reader\n\tvalue, err := confReader.String(group, field)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn value, nil\n}", "func getKeyFromVault() ([]byte, error) {\n\tif key == nil {\n\t\t// TODO: read key from Vault\n\t\tkey = []byte(\"LOOKMEUPINEXTERNALSYSTEM\")\n\t}\n\treturn key, nil\n}", "func etcdKeyToMapKey(eKey string) string {\n\tfields := strings.Split(eKey, \"/\")\n\tif l := len(fields); l > 2 {\n\t\treturn fields[l-2] + \"/\" + fields[l-1]\n\t}\n\treturn \"\"\n}", "func toValidImageRefName(name string) string {\n\tif len(name) == 0 {\n\t\treturn name\n\t}\n\n\t// in some certain situations, ref name might contain slashes or other symbols\n\t// k8s naming refers to DNS1123, which states that only alphanumeric characters\n\t// dashes are allowed in a subdomain name\n\t// bug example: https://bugzilla.redhat.com/show_bug.cgi?id=1970805\n\tresult := dnsSubdomainAllowedCharacters.ReplaceAllString(name, \"-\")\n\n\t// Also should check allowed subdomain name length and trim accordingly\n\tif len(result) > kvalidation.DNS1123SubdomainMaxLength {\n\t\tresult = result[:kvalidation.DNS1123SubdomainMaxLength]\n\t}\n\n\t// if after the trim name ends with a dash, trim it, too, must end with an alphanumeric\n\tresult = strings.Trim(result, \"-\")\n\n\treturn result\n}", "func escapeKey(key string) string {\n\treturn escape.HexEscape(key, func(r []rune, i int) bool {\n\t\tc := r[i]\n\t\tswitch {\n\t\t// S3 doesn't handle these characters (determined via experimentation).\n\t\tcase c < 32:\n\t\t\treturn true\n\t\t// For \"../\", escape the trailing slash.\n\t\tcase i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.':\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n}", "func (repo *RemoteRepo) RefKey() []byte {\n\treturn []byte(\"E\" + repo.UUID)\n}", "func decrypt(encoded string, key []byte) (string, error) {\n\tcipherText, err := base64.URLEncoding.DecodeString(encoded)\n\tif err != nil {\n\t\treturn encoded, err\n\t}\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn encoded, err\n\t}\n\tif len(cipherText) < aes.BlockSize {\n\t\terr = errors.New(\"ciphertext block size is too short\")\n\t\treturn encoded, err\n\t}\n\tiv := cipherText[:aes.BlockSize]\n\tcipherText = cipherText[aes.BlockSize:]\n\tstream := cipher.NewCFBDecrypter(block, iv)\n\tstream.XORKeyStream(cipherText, cipherText)\n\tdecoded := string(cipherText)\n\n\t// By design decrypt with incorrect key must end up with the value\n\tif strings.Index(decoded, anchor) != 0 {\n\t\treturn encoded, nil\n\t}\n\n\tdecoded = strings.Replace(decoded, anchor, \"\", 1) // remove anchor from string\n\treturn decoded, nil\n}", "func readPublicKey(path string) (key string, err error) {\n\tbytes, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpubKey, _, _, _, err := ssh.ParseAuthorizedKey(bytes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(ssh.MarshalAuthorizedKey(pubKey)[:]), nil\n}", "func DecodeDedupKey(data datamodel.Node) (string, error) {\n\treturn data.AsString()\n}", "func loadOverlayCidrFromKubeadm(client kubernetes.Interface) (string, error) {\n\tconfig, err := client.CoreV1().ConfigMaps(\"kube-system\").Get(\"kubeadm-config\", metav1.GetOptions{})\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\tclusterConfig, ok := config.Data[\"ClusterConfiguration\"]\n\tif !ok {\n\t\treturn \"\", trace.BadParameter(\"kubeadm configmap is missing ClusterConfiguration\")\n\t}\n\n\tvar parsedConfig kubeadmClusterConfiguration\n\terr = yaml.Unmarshal([]byte(clusterConfig), &parsedConfig)\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\tif cidr, ok := parsedConfig.Networking[\"podSubnet\"]; ok {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tif err != nil {\n\t\t\treturn \"\", trace.Wrap(err)\n\t\t}\n\t\treturn cidr, nil\n\t}\n\n\treturn \"\", trace.BadParameter(\"Unable to locate networking.podSubnet in kubeadm config: %v\", clusterConfig)\n}", "func decodePublicKey(hexKey string) (crypto.PublicKey, error) {\n\tbz, err := hex.DecodeString(hexKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn crypto.DecodePublicKey(crypto.ECDSAP256, bz)\n}", "func key2mid(key string) uint16 {\n\tmid, _ := strconv.Atoi(key[2:])\n\treturn uint16(mid)\n}", "func calculateServerResponseKey(key string) string {\n s := sha1.Sum([]byte(key + WS_SERVER_ID))\n b64 := base64.StdEncoding.EncodeToString(s[:])\n\n return b64\n}", "func CalcBase64() string {\n\tClient := redis.NewClient(&redis.Options{\n\t\tAddr: os.Getenv(\"REDIS_URL_HOST\"),\n\t\tPassword: os.Getenv(\"REDIS_PASSWORD\"),\t\t\n\t\tDB: 0, \n\t})\n\tvar ShortLinkRedisVal int64\n\t\n\tResult, Err := Client.Get(os.Getenv(\"URL_COUNTER_KEY\")).Result()\n\tif Err != nil {\n\t\t// leave this one as the value for key is not defined yet. \n\t\tfmt.Println(\"Key value is not defined yet. \")\n\t}\n\tif Result == \"\" {\n\t\tErr := Client.Set(os.Getenv(\"URL_COUNTER_KEY\"), os.Getenv(\"COUNTER_START_VALUE\"), 0).Err()\n\t\tif Err != nil {\n\t\t\tpanic(Err)\n\t\t}\t\t \n\t\tIntVal, Err := strconv.ParseInt(os.Getenv(\"COUNTER_START_VALUE\"), 10, 64)\n\t\tShortLinkRedisVal = IntVal\n\t} else {\n\t\tResult, Err := Client.Incr(os.Getenv(\"URL_COUNTER_KEY\")).Result()\n\t\tif Err != nil {\n\t\t\t\tpanic(Err)\n\t\t}\t\n\t\tShortLinkRedisVal = Result\n\t}\t\n\tfmt.Println(ShortLinkRedisVal)\n\tCount := 0\n\tconst Base62 string = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\tBaseArray := strings.SplitAfter(Base62, \"\")\t\n\tDividend := ShortLinkRedisVal\n\tShortLinkResponse := make([]string, 10)\t\n\tvar Remainder int64\n\tfor (Dividend > 0) {\n\t\tRemainder = Dividend % 62\t\t\n\t\tDividend = (Dividend / 62)\t\t\n\t\tShortLinkResponse[Count] = BaseArray[Remainder]\n\t\tCount = Count + 1\n\t}\t\n\tResponse := strings.Join(ShortLinkResponse, \"\")\n\treturn Response\n}", "func keyRecvMarkerPath(key *coretypes.Key) string {\n\treturn fmt.Sprintf(\"%s/%s.%s\", recvMarkerDirectory, key.String(), keyRecvExt)\n}", "func decode(k Key, text []byte) (Key, error) {\n\ttmp, err := rippleEncoding.Decode(text)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tv := version(tmp[0])\n\tif !v.valid() {\n\t\treturn nil, errors.New(\"rkey decode: bad version\") // XXX\n\t}\n\tif k != nil && v != k.version() {\n\t\treturn nil, errors.New(\"rkey decode: version mismatch\") // XXX\n\t}\n\tif k == nil {\n\t\tk = v.newKey()\n\t\tif k == nil {\n\t\t\treturn nil, errors.New(\"rkey decode: version not supported\") // XXX\n\t\t}\n\t}\n\tpsize := v.PayloadSize()\n\ttsize := 1 + psize + 4\n\ttmp = padbytes(tmp, tsize) // XXX\n\tif len(tmp) != tsize {\n\t\treturn nil, errors.New(\"rkey decode: bad size\") // XXX\n\t}\n\tsum := doublehash.SumDoubleSha256(tmp[:1+psize])\n\tif !bytes.Equal(tmp[1+psize:], sum[:4]) {\n\t\treturn nil, errors.New(\"rkey decode: bad checksum bytes\") // XXX\n\t}\n\terr = k.setraw(tmp[1 : 1+v.PayloadSize()])\n\treturn k, err\n}", "func keyToProto(defaultAppID string, k *Key) *pb.Reference {\n\tappID := k.appID\n\tif appID == \"\" {\n\t\tappID = defaultAppID\n\t}\n\tn := 0\n\tfor i := k; i != nil; i = i.parent {\n\t\tn++\n\t}\n\te := make([]*pb.Path_Element, n)\n\tfor i := k; i != nil; i = i.parent {\n\t\tn--\n\t\te[n] = &pb.Path_Element{\n\t\t\tType: &i.kind,\n\t\t}\n\t\t// At most one of {Name,Id} should be set.\n\t\t// Neither will be set for incomplete keys.\n\t\tif i.stringID != \"\" {\n\t\t\te[n].Name = &i.stringID\n\t\t} else if i.intID != 0 {\n\t\t\te[n].Id = &i.intID\n\t\t}\n\t}\n\treturn &pb.Reference{\n\t\tApp: proto.String(appID),\n\t\tPath: &pb.Path{\n\t\t\tElement: e,\n\t\t},\n\t}\n}", "func GetAddressStringFromHDKey(hdKey *hdkeychain.ExtendedKey) (string, error) {\r\n\taddress, err := GetAddressFromHDKey(hdKey)\r\n\tif err != nil {\r\n\t\treturn \"\", err\r\n\t}\r\n\treturn address.String(), nil\r\n}", "func parseRef(k8sRef string) (string, string, error) {\n\ts := strings.Split(strings.TrimPrefix(k8sRef, keyReference), \"/\")\n\tif len(s) != 2 {\n\t\treturn \"\", \"\", errors.New(\"kubernetes specification should be in the format k8s://<namespace>/<secret>\")\n\t}\n\treturn s[0], s[1], nil\n}", "func getKey(keyFilePath, keyHandle string) (string, string, error) {\n\tvar rKey string\n\tvar rKeyInfo string\n\tvar rErr error\n\tif keyHandle == \"\" || encryptContainerImage {\n\t\tencryptContainerImage = true\n\t\tlogrus.Debugf(\"secureoverlay2: getting key for encryption: %s \", keyHandle)\n\t\tif keyFilePath != \"\" {\n\n\t\t\tunwrappedKey, err := exec.Command(\"wpm\", \"unwrap-key\", \"-i\", keyFilePath).CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", fmt.Errorf(\"secureoverlay2: Could not get unwrapped key from the wrapped key %v\", err)\n\t\t\t}\n\t\t\tif len(unwrappedKey) == 0 {\n\t\t\t\treturn \"\", \"\", fmt.Errorf(\"secureoverlay2: unwrapped key is empty\")\n\t\t\t}\n\t\t\tkey := string(unwrappedKey)\n\t\t\tkey = strings.TrimSuffix(key, \"\\n\")\n\t\t\tkeyInfo := strings.Split(keyFilePath, \"_\")\n\t\t\trKey, rKeyInfo, rErr = key, keyInfo[1], nil\n\t\t} else {\n\t\t\trKey, rKeyInfo, rErr = \"\", \"\", fmt.Errorf(\"secureoverlay2: keyFilePath empty\")\n\t\t}\n\n\t} else {\n\t\t//fetch the key for encrypting/decrypting the image\n\t\tlogrus.Debugf(\"secureoverlay2: getting key for decryption on : %s \", keyHandle)\n\t\trKey, rKeyInfo, rErr = getKmsKeyFromKeyCache(keyHandle)\n\t}\n\n\treturn rKey, rKeyInfo, rErr\n}", "func ParseBDNameFromKey(key string) (name string, err error) {\n\tlastSlashPos := strings.LastIndex(key, \"/\")\n\tif lastSlashPos > 0 && lastSlashPos < len(key)-1 {\n\t\treturn key[lastSlashPos+1:], nil\n\t}\n\n\treturn key, fmt.Errorf(\"wrong format of the key %s\", key)\n}", "func AddrPubkeyRelationKey(address []byte) []byte {\n\treturn append(AddrPubkeyRelationKeyPrefix, address...)\n}", "func keyToID(key []byte) uint64 {\n\treturn binary.BigEndian.Uint64(key)\n}", "func (dr *Resolver) ResolveNameFromERPC(pathKey model.PathKey) (string, error) {\n\tentry := counterEntry{\n\t\tresolutionType: metrics.ERPCTag,\n\t\tresolution: metrics.SegmentResolutionTag,\n\t}\n\n\tchallenge, err := dr.requestResolve(erpc.ResolveSegmentOp, pathKey)\n\tif err != nil {\n\t\tdr.missCounters[entry].Inc()\n\t\treturn \"\", fmt.Errorf(\"unable to get the name of mountID `%d` and inode `%d` with eRPC: %w\", pathKey.MountID, pathKey.Inode, err)\n\t}\n\n\tif challenge != model.ByteOrder.Uint32(dr.erpcSegment[12:16]) {\n\t\tdr.missCounters[entry].Inc()\n\t\treturn \"\", errERPCRequestNotProcessed\n\t}\n\n\tseg := model.NullTerminatedString(dr.erpcSegment[16:])\n\tif len(seg) == 0 || len(seg) > 0 && seg[0] == 0 {\n\t\tdr.missCounters[entry].Inc()\n\t\treturn \"\", fmt.Errorf(\"couldn't resolve segment (len: %d)\", len(seg))\n\t}\n\n\tdr.hitsCounters[entry].Inc()\n\treturn seg, nil\n}", "func dnsDecodeString(raw string) ([]byte, error) {\n\tpad := 8 - (len(raw) % 8)\n\tnb := []byte(raw)\n\tif pad != 8 {\n\t\tnb = make([]byte, len(raw)+pad)\n\t\tcopy(nb, raw)\n\t\tfor index := 0; index < pad; index++ {\n\t\t\tnb[len(raw)+index] = '='\n\t\t}\n\t}\n\treturn sliverBase32.DecodeString(string(nb))\n}", "func (pk PublicKey) PublicKeyBase58() string {\n\treturn stringEntry(pk[PublicKeyBase58Property])\n}", "func DecodeKey(key string) (*ULTKey, error) {\n\tif key == \"\" {\n\t\treturn nil, ErrInvalidKey\n\t}\n\n\tb, err := b58.Decode(key)\n\tif err != nil {\n\t\treturn nil, ErrInvalidKey\n\t}\n\n\tvar ultKey ULTKey\n\tr := bytes.NewReader(b)\n\terr = binary.Read(r, binary.BigEndian, &ultKey)\n\tif err != nil {\n\t\treturn nil, ErrInvalidKey\n\t}\n\n\tswitch ultKey.Code {\n\tcase KeyTypeAccountID:\n\t\tfallthrough\n\tcase KeyTypeSeed:\n\t\tfallthrough\n\tcase KeyTypeTx:\n\t\tfallthrough\n\tcase KeyTypeTxSet:\n\t\tfallthrough\n\tcase KeyTypeLedgerHeader:\n\t\tfallthrough\n\tcase KeyTypeNodeID:\n\t\treturn &ultKey, nil\n\t}\n\treturn nil, ErrInvalidKey\n}", "func (id *UUID) DecodeString(src []byte) {\n\tconst srcBase = 62\n\tconst dstBase = 0x100000000\n\n\tparts := [StringMaxLen]byte{}\n\n\tpartsIndex := 21\n\tfor i := len(src); i > 0; {\n\t\t// offsets into base62Characters\n\t\tconst offsetUppercase = 10\n\t\tconst offsetLowercase = 36\n\n\t\ti--\n\t\tb := src[i]\n\t\tswitch {\n\t\tcase b >= '0' && b <= '9':\n\t\t\tb -= '0'\n\t\tcase b >= 'A' && b <= 'Z':\n\t\t\tb = offsetUppercase + (b - 'A')\n\t\tdefault:\n\t\t\tb = offsetLowercase + (b - 'a')\n\t\t}\n\t\tparts[partsIndex] = b\n\t\tpartsIndex--\n\t}\n\n\tn := len(id)\n\tbp := parts[:]\n\tbq := make([]byte, 0, len(src))\n\n\tfor len(bp) > 0 {\n\t\tquotient := bq[:0]\n\t\tremainder := uint64(0)\n\n\t\tfor _, c := range bp {\n\t\t\tvalue := uint64(c) + uint64(remainder)*srcBase\n\t\t\tdigit := value / dstBase\n\t\t\tremainder = value % dstBase\n\n\t\t\tif len(quotient) != 0 || digit != 0 {\n\t\t\t\tquotient = append(quotient, byte(digit))\n\t\t\t}\n\t\t}\n\n\t\tid[n-4] = byte(remainder >> 24)\n\t\tid[n-3] = byte(remainder >> 16)\n\t\tid[n-2] = byte(remainder >> 8)\n\t\tid[n-1] = byte(remainder)\n\t\tn -= 4\n\t\tbp = quotient\n\t}\n\n\tvar zero [16]byte\n\tcopy(id[:n], zero[:])\n}", "func MustParseVMStorageKey(key []byte) *dvmTypes.VMAccessPath {\n\t// Key length is expected to be correct: {address_20bytes}:{path_at_least_1byte}\n\texpectedMinLen := VMAddressLength + len(KeyDelimiter) + 1\n\tif len(key) < expectedMinLen {\n\t\tpanic(fmt.Errorf(\"VMKey (%s): invalid key length: expected / actual: %d / %d\", string(key), expectedMinLen, len(key)))\n\t}\n\n\t// Calc indices\n\taddressStartIdx := 0\n\taddressEndIdx := addressStartIdx + VMAddressLength\n\tdelimiterStartIdx := addressEndIdx\n\tdelimiterEndIdx := delimiterStartIdx + len(KeyDelimiter)\n\tpathStartIdx := delimiterEndIdx\n\n\t// Split key\n\taddressValue := key[addressStartIdx:addressEndIdx]\n\tdelimiterValue := key[delimiterStartIdx:delimiterEndIdx]\n\tpathValue := key[pathStartIdx:]\n\n\t// Validate\n\tif !bytes.Equal(delimiterValue, KeyDelimiter) {\n\t\tpanic(fmt.Errorf(\"VMKey (%s): 1st delimiter value is invalid\", string(key)))\n\t}\n\tif len(addressValue) < VMAddressLength {\n\t\tpanic(fmt.Errorf(\"VMKey (%s): address length is invalid: expected / actual: %d / %d\", string(key), VMAddressLength, len(addressValue)))\n\t}\n\tif len(pathValue) == 0 {\n\t\tpanic(fmt.Errorf(\"VMKey (%s): path length is invalid: expected / actual: GT 1 / %d\", string(key), len(pathValue)))\n\t}\n\n\treturn &dvmTypes.VMAccessPath{\n\t\tAddress: addressValue,\n\t\tPath: pathValue,\n\t}\n}", "func fixDesKey(key string) []byte {\n\ttmp := []byte(key)\n\tbuf := make([]byte, 8)\n\tif len(tmp) <= 8 {\n\t\tcopy(buf, tmp)\n\t} else {\n\t\tcopy(buf, tmp[:8])\n\t}\n\tfor i := 0; i < 8; i++ {\n\t\tbuf[i] = fixDesKeyByte(buf[i])\n\t}\n\treturn buf\n}", "func Decode(key uint32) (x, y, level uint32) {\n\tx = Undilate((key >> 4) & 0x05555555)\n\ty = Undilate((key >> 5) & 0x55555555)\n\tlevel = key & 0xF\n\treturn\n}", "func decode(src []byte) ([]byte, int, error) {\n\tb := string(src)\n\tanswer := big.NewInt(0)\n\tj := big.NewInt(1)\n\n\tfor i := len(b) - 1; i >= 0; i-- {\n\t\ttmp := strings.IndexAny(base58table, string(b[i]))\n\t\tif tmp == -1 {\n\t\t\tfmt.Println(b)\n\t\t\treturn []byte(\"\"), 0,\n\t\t\t\terrors.New(\"encoding/base58: invalid character found: ~\" +\n\t\t\t\t\tstring(b[i]) + \"~\")\n\t\t}\n\t\tidx := big.NewInt(int64(tmp))\n\t\ttmp1 := big.NewInt(0)\n\t\ttmp1.Mul(j, idx)\n\n\t\tanswer.Add(answer, tmp1)\n\t\tj.Mul(j, big.NewInt(radix))\n\t}\n\n\ttmpval := answer.Bytes()\n\n\tvar numZeros int\n\tfor numZeros = 0; numZeros < len(b); numZeros++ {\n\t\tif b[numZeros] != base58table[0] {\n\t\t\tbreak\n\t\t}\n\t}\n\tflen := numZeros + len(tmpval)\n\tval := make([]byte, flen, flen)\n\tcopy(val[numZeros:], tmpval)\n\treturn val, len(val), nil\n}", "func deriveFromKeyLoc(scopedMgr *waddrmgr.ScopedKeyManager,\n\taddrmgrNs walletdb.ReadWriteBucket,\n\tkeyLoc keychain.KeyLocator) (*btcec.PrivateKey, error) {\n\n\tpath := waddrmgr.DerivationPath{\n\t\tInternalAccount: uint32(keyLoc.Family),\n\t\tAccount: uint32(keyLoc.Family),\n\t\tBranch: 0,\n\t\tIndex: keyLoc.Index,\n\t}\n\taddr, err := scopedMgr.DeriveFromKeyPath(addrmgrNs, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn addr.(waddrmgr.ManagedPubKeyAddress).PrivKey()\n}", "func GetAddressFromPublicKey(publicKey []byte) string {\n\tpublicKeyHash := sha256.Sum256(publicKey)\n\n\treturn GetBigNumberStringFromBytes(GetFirstEightBytesReversed(publicKeyHash[:sha256.Size])) + \"L\"\n}", "func GraphKeyParse(key []byte) string {\n\ttmp := bytes.Split(key, []byte{0})\n\tgraph := string(tmp[1])\n\treturn graph\n}", "func decodeCoinID(coinID dex.Bytes) (*chainhash.Hash, uint32, error) {\n\tif len(coinID) != 36 {\n\t\treturn nil, 0, fmt.Errorf(\"coin ID wrong length. expected 36, got %d\", len(coinID))\n\t}\n\tvar txHash chainhash.Hash\n\tcopy(txHash[:], coinID[:32])\n\treturn &txHash, binary.BigEndian.Uint32(coinID[32:]), nil\n}", "func decodeCoinID(coinID dex.Bytes) (*chainhash.Hash, uint32, error) {\n\tif len(coinID) != 36 {\n\t\treturn nil, 0, fmt.Errorf(\"coin ID wrong length. expected 36, got %d\", len(coinID))\n\t}\n\tvar txHash chainhash.Hash\n\tcopy(txHash[:], coinID[:32])\n\treturn &txHash, binary.BigEndian.Uint32(coinID[32:]), nil\n}", "func decodeCoinID(coinID dex.Bytes) (*chainhash.Hash, uint32, error) {\n\tif len(coinID) != 36 {\n\t\treturn nil, 0, fmt.Errorf(\"coin ID wrong length. expected 36, got %d\", len(coinID))\n\t}\n\tvar txHash chainhash.Hash\n\tcopy(txHash[:], coinID[:32])\n\treturn &txHash, binary.BigEndian.Uint32(coinID[32:]), nil\n}", "func fingerprintKey(s string) (fingerprint string, err error) {\n\tdata, err := base64.StdEncoding.DecodeString(s)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Can't base64 decode original key\")\n\t}\n\tsha256 := sha256.New()\n\tsha256.Write(data)\n\tb64 := base64.StdEncoding.EncodeToString(sha256.Sum(nil))\n\treturn strings.TrimRight(b64, \"=\"), nil\n}", "func GetValueWithKey(key, url string) (string, error) {\n\tclient := &http.Client{}\n\tres, err := client.Get(url + key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(body) == 0 {\n\t\treturn \"\", errors.New(\"can not get satisfied value\")\n\t}\n\tret := []ConsulValue{}\n\terr = json.Unmarshal(body, &ret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(ret) == 0 {\n\t\treturn \"\", errors.New(\"Can not get value\")\n\t}\n\tdecoded, err := base64.StdEncoding.DecodeString(ret[0].Value)\n\n\treturn string(decoded), err\n}", "func LookupDomainKey(selector string, parentDomain string) ([]byte, error) {\n\tselector = strings.ToLower(selector)\n\tnonce := dnsNonce(nonceStdSize)\n\tdomain := fmt.Sprintf(\"_%s.%s.%s.%s\", nonce, selector, domainKeyMsg, parentDomain)\n\n\ttxt, err := dnsLookup(domain)\n\tif err != nil {\n\t\t// {{if .Debug}}\n\t\tlog.Printf(\"Error fetching server certificate %v\", err)\n\t\t// {{end}}\n\t\treturn nil, err\n\t}\n\tcertPEM, err := base64.RawStdEncoding.DecodeString(txt)\n\tif err != nil {\n\t\t// {{if .Debug}}\n\t\tlog.Printf(\"Error decoding certificate %v\", err)\n\t\t// {{end}}\n\t\treturn nil, err\n\t}\n\treturn certPEM, nil\n}", "func decodeBase62(encStr string) string {\n\treturn string(noPad62Encoding.DecodeToBigInt(encStr).Bytes())\n}", "func ParseRecoveryKey(s string) (out RecoveryKey, err error) {\n\tfor i := 0; i < 8; i++ {\n\t\tif len(s) < 5 {\n\t\t\treturn RecoveryKey{}, errors.New(\"incorrectly formatted: insufficient characters\")\n\t\t}\n\t\tx, err := strconv.ParseUint(s[0:5], 10, 16)\n\t\tif err != nil {\n\t\t\treturn RecoveryKey{}, xerrors.Errorf(\"incorrectly formatted: %w\", err)\n\t\t}\n\t\tbinary.LittleEndian.PutUint16(out[i*2:], uint16(x))\n\n\t\t// Move to the next 5 digits\n\t\ts = s[5:]\n\t\t// Permit each set of 5 digits to be separated by an optional '-', but don't allow the formatted key to end or begin with one.\n\t\tif len(s) > 1 && s[0] == '-' {\n\t\t\ts = s[1:]\n\t\t}\n\t}\n\n\tif len(s) > 0 {\n\t\treturn RecoveryKey{}, errors.New(\"incorrectly formatted: too many characters\")\n\t}\n\n\treturn\n}", "func DstEdgeKeyParse(key []byte) (string, string, string, string, string, byte) {\n\ttmp := bytes.Split(key, []byte{0})\n\tgraph := tmp[1]\n\tdst := tmp[2]\n\tsrc := tmp[3]\n\teid := tmp[4]\n\tlabel := tmp[5]\n\tetype := tmp[6]\n\treturn string(graph), string(src), string(dst), string(eid), string(label), etype[0]\n}", "func resolveBitcodePath(bcPath string) string {\n\tif _, err := os.Stat(bcPath); os.IsNotExist(err) {\n\t\t// If the bitcode file does not exist, try to find it in the store\n\t\tif LLVMBitcodeStorePath != \"\" {\n\t\t\t// Compute absolute path hash\n\t\t\tabsBcPath, _ := filepath.Abs(bcPath)\n\t\t\tstoreBcPath := path.Join(LLVMBitcodeStorePath, getHashedPath(absBcPath))\n\t\t\tif _, err := os.Stat(storeBcPath); os.IsNotExist(err) {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\treturn storeBcPath\n\t\t}\n\t\tLogWarning(\"Failed to find the file %v\\n\", bcPath)\n\t\treturn \"\"\n\t}\n\treturn bcPath\n}", "func resolveRKERegistry(content, registry string) string {\n\texp := `image:.*`\n\treturn regexp.MustCompile(exp).ReplaceAllStringFunc(content, func(origin string) string {\n\t\ts := strings.SplitN(origin, \":\", 2)\n\t\tif len(s) != 2 {\n\t\t\treturn origin\n\t\t}\n\t\toldImg := strings.TrimLeft(s[1], \" \")\n\t\tif !strings.HasPrefix(oldImg, registry) {\n\t\t\tres := \"image: \" + path.Join(registry, oldImg)\n\t\t\tlogrus.Debugf(\"networkaddons: %s replaced by %s\", oldImg, res)\n\t\t\treturn res\n\t\t}\n\n\t\treturn origin\n\t})\n}", "func HexToKey(s string) Key { return BytesToKey(common.FromHex(s)) }", "func getParent(imageName, registryAddr string) (string, error) {\n\t// parse image base name, tag\n\tref, tag, _ := reference.Parse(imageName)\n\ttoken, _ := getToken(ref)\n\n\t// construct get url\n\turl := \"https://registry-1.docker.io/v2/\" + ref + \"/manifests/\" + tag\n\tlogrus.Debugln(\"get url:\", url)\n\n\tclient := http.Client{}\n\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Set(\"Authorization\", \"Bearer \" + token)\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\t// handle error\n\t\treturn \"\", err\n\t}\n\tlogrus.Debugln(\"get parent, resp\", resp)\n\trc := resp.Body\n\tdefer rc.Close()\n\n\t// parse response, store it into a map\n\tm := map[string]interface{}{}\n\tdec := json.NewDecoder(rc)\n\tfor {\n\t\tif err := dec.Decode(&m); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tif errMsg, ok := m[\"error\"]; ok {\n\t\treturn \"\", fmt.Errorf(\"%v\", errMsg)\n\t}\n\n\t// inspect history section to get parent id\n\t// fixme: this convert workflow is tooooo ugly\n\tif history, ok := m[\"history\"].([]interface{}); !ok {\n\t\treturn \"\", ErrParse\n\t} else if len(history) == 0 {\n\t\treturn \"\", fmt.Errorf(\"empty history\")\n\t} else {\n\t\tv1, _ := history[0].(map[string]interface{})\n\t\tvar content map[string]interface{}\n\t\tjString, _ := v1[\"v1Compatibility\"].(string)\n\t\terr = json.Unmarshal([]byte(jString), &content)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tconfig, ok := content[\"config\"]\n\t\tif !ok {\n\t\t\treturn \"\", ErrParse\n\t\t}\n\n\t\tconfigMap, _ := config.(map[string]interface{})\n\t\tparentBase, ok := configMap[\"Image\"]\n\t\tif !ok {\n\t\t\treturn \"\", ErrParse\n\t\t}\n\n\t\tparentID, _ := parentBase.(string)\n\t\tif parentID == \"\" {\n\t\t\treturn \"\", ErrParse\n\t\t}\n\t\treturn parentID, nil\n\t}\n}", "func keyFromGOB64(str string) (*key, error) {\n\tby, err := base64.StdEncoding.DecodeString(str)\n\tif err != nil {\n\t\treturn (*key)(nil), err\n\t}\n\n\tb := bytes.Buffer{}\n\tb.Write(by)\n\td := gob.NewDecoder(&b)\n\n\tvar k *key\n\n\tif err = d.Decode(&k); err != nil {\n\t\tfmt.Println(\"failed gob Decode\", err)\n\t}\n\n\treturn k, nil\n}", "func Decode(encoding Encoding, str string) ([]byte, error) {\n\tswitch {\n\tcase !encoding.valid():\n\t\treturn nil, errInvalidEncoding\n\tcase len(str) == 0:\n\t\treturn nil, nil\n\tcase encoding == CB58 && len(str) > maxCB58DecodeSize:\n\t\treturn nil, fmt.Errorf(\"string length (%d) > maximum for cb58 (%d)\", len(str), maxCB58DecodeSize)\n\t}\n\n\tvar (\n\t\tdecodedBytes []byte\n\t\terr error\n\t)\n\tswitch encoding {\n\tcase Hex:\n\t\tif !strings.HasPrefix(str, hexPrefix) {\n\t\t\treturn nil, errMissingHexPrefix\n\t\t}\n\t\tdecodedBytes, err = hex.DecodeString(str[2:])\n\tcase CB58:\n\t\tdecodedBytes, err = base58.Decode(str)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(decodedBytes) < checksumLen {\n\t\treturn nil, errMissingChecksum\n\t}\n\t// Verify the checksum\n\trawBytes := decodedBytes[:len(decodedBytes)-checksumLen]\n\tif len(rawBytes) > maxCB58EncodeSize {\n\t\treturn nil, fmt.Errorf(\"byte slice length (%d) > maximum for cb58 (%d)\", len(decodedBytes), maxCB58EncodeSize)\n\t}\n\n\tchecksum := decodedBytes[len(decodedBytes)-checksumLen:]\n\tif !bytes.Equal(checksum, hashing.Checksum(rawBytes, checksumLen)) {\n\t\treturn nil, errBadChecksum\n\t}\n\treturn rawBytes, nil\n}", "func keyToString(k []byte) string {\n\tif len(k) != lenHash {\n\t\tpanic(fmt.Sprintf(\"bad hash passed to hashToKey: %x\", k))\n\t}\n\treturn fmt.Sprintf(\"%s%x\", hashPrefix, k)[0:lenKey]\n}" ]
[ "0.571068", "0.5294933", "0.52545106", "0.51253337", "0.5112808", "0.5112781", "0.50850344", "0.5057352", "0.50094235", "0.4925115", "0.4873648", "0.48627982", "0.48295772", "0.4828304", "0.4825008", "0.4823676", "0.48165122", "0.4806838", "0.48006848", "0.4794631", "0.4741591", "0.47363746", "0.47293058", "0.4728139", "0.47236204", "0.47225848", "0.47092107", "0.4687748", "0.4686812", "0.46675277", "0.46618938", "0.46588424", "0.46479234", "0.46457753", "0.4638373", "0.46244055", "0.46162176", "0.45974168", "0.45964804", "0.45727345", "0.45690793", "0.45469233", "0.45465487", "0.4545569", "0.45411706", "0.4535716", "0.4535716", "0.4531773", "0.45299852", "0.45266423", "0.45171356", "0.45070583", "0.4500771", "0.44998935", "0.44923782", "0.44918782", "0.4484163", "0.4483429", "0.4474383", "0.44630384", "0.4460898", "0.44587058", "0.44514292", "0.44501474", "0.44468078", "0.4445847", "0.4443863", "0.4441226", "0.44404024", "0.44358802", "0.4429098", "0.44168255", "0.4413813", "0.4409115", "0.44036326", "0.44014114", "0.43973204", "0.43952617", "0.43923536", "0.4389744", "0.43896294", "0.43887088", "0.4382822", "0.43785056", "0.43746746", "0.43746746", "0.43746746", "0.4370797", "0.43688053", "0.43593678", "0.4352337", "0.4352046", "0.43516883", "0.43516824", "0.43510184", "0.43509287", "0.43438834", "0.43421406", "0.43338567", "0.4333605" ]
0.73456675
0
NewJSONFileWriter creates new json file logger.
func NewJSONFileWriter(file string) (*JSONFileWriter, error) { switch file { case "stdout": return &JSONFileWriter{os.Stdout}, nil case "stderr": return &JSONFileWriter{os.Stderr}, nil default: f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) if err != nil { return nil, err } return &JSONFileWriter{f}, nil } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewJSONLogger(logPath, filename, lv string) (*Logger, error) {\n\tlogger := logrus.New()\n\tlogger.AddHook(NewHook())\n\n\tlogger.Formatter = &logrus.JSONFormatter{}\n\n\tlevel, err := logrus.ParseLevel(lv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.SetLevel(level)\n\n\tfd, err := os.OpenFile(\n\t\tpath.Join(logPath, filename),\n\t\tos.O_CREATE|os.O_APPEND|os.O_WRONLY,\n\t\t0644,\n\t)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif os.MkdirAll(logPath, 0777) != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tgoto Finally\n\t\t}\n\t\treturn nil, err\n\t}\n\nFinally:\n\tlogger.Out = io.MultiWriter(os.Stdout, fd)\n\treturn &Logger{logger}, nil\n}", "func NewJSONLogFile(logPath string, perms os.FileMode, marshalFunc MarshalFunc) (*JSONLogFile, error) {\n\tf, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &JSONLogFile{\n\t\tf: f,\n\t\tperms: perms,\n\t\tclosed: false,\n\t\tmarshalFunc: marshalFunc,\n\t}, nil\n}", "func newJsonLogger(o *logOptions) Logger {\n\treturn &jsonLogger{\n\t\tjsonLogParser: newJsonLogParser(o),\n\t}\n}", "func NewJSONLogger(service string, name string) *JSONLogger {\n\treturn NewJSONLoggerWithWriter(service, name, os.Stdout)\n}", "func loggerJSON(l jsonLog) {\n\tl.Date = time.Now()\n\tif l.Level == 0 {\n\t\tl.Level = 6\n\t}\n\tif Config.MinLogLevel >= l.Level {\n\t\tif l.Version == \"\" {\n\t\t\tl.Version = \"1.1\"\n\t\t}\n\t\tif l.Host == \"\" {\n\t\t\tl.Host = \"Quotes\"\n\t\t}\n\t\tif l.ResponseCode == 0 {\n\t\t\tl.ResponseCode = 200\n\t\t}\n\t\t_ = os.MkdirAll(\"./logs/\", os.ModePerm)\n\t\tf, err := os.OpenFile(\"./logs/logs.json\", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error opening logs.json file: %v\", err)\n\t\t}\n\t\tdata, _ := json.Marshal(l)\n\t\tf.WriteString(string(data) + \"\\n\")\n\t\tf.Close()\n\t}\n}", "func NewJSONWriter(logger logrus.FieldLogger) *JSONWriter {\n\twriter := &JSONWriter{\n\t\tlogger: logger,\n\t}\n\n\twriter.Reporter = writer.reporter\n\treturn writer\n}", "func NewJSONLogger() *StandardLogger {\n\tvar baseLogger = logrus.New()\n\tvar standardLogger = &StandardLogger{baseLogger}\n\tstandardLogger.Formatter = &logrus.JSONFormatter{}\n\treturn standardLogger\n}", "func JsonFileLogger(out SuperMarketLog) error {\n\toutput, _ := json.Marshal(out) // Create he output to log\n\tstringOutput := string(output) + \"\\n\" // Append a newline to the output\n\t//If the file doesn't exist, create it or append to the file\n\tf, err := os.OpenFile(\"rest.log\", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif _, err := f.Write([]byte(stringOutput)); err != nil { //Write out to the log\n\t\tlog.Fatal(err)\n\t}\n\tif err := f.Close(); err != nil { //Close the writer\n\t\tlog.Fatal(err)\n\t}\n\treturn err\n}", "func New(ctx logger.Context) (logger.Logger, error) {\n\tlog, err := os.OpenFile(ctx.LogPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar capval int64 = -1\n\tif capacity, ok := ctx.Config[\"max-size\"]; ok {\n\t\tvar err error\n\t\tcapval, err = units.FromHumanSize(capacity)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar maxFiles = 1\n\tif maxFileString, ok := ctx.Config[\"max-file\"]; ok {\n\t\tmaxFiles, err = strconv.Atoi(maxFileString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif maxFiles < 1 {\n\t\t\treturn nil, fmt.Errorf(\"max-file cannot be less than 1\")\n\t\t}\n\t}\n\n\tvar extra []byte\n\tif attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 {\n\t\tvar err error\n\t\textra, err = json.Marshal(attrs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &JSONFileLogger{\n\t\tf: log,\n\t\tbuf: bytes.NewBuffer(nil),\n\t\tctx: ctx,\n\t\tcapacity: capval,\n\t\tn: maxFiles,\n\t\treaders: make(map[*logger.LogWatcher]struct{}),\n\t\tnotifyRotate: pubsub.NewPublisher(0, 1),\n\t\textra: extra,\n\t}, nil\n}", "func New(ctx logger.Context) (logger.Logger, error) {\n\tlog, err := os.OpenFile(ctx.LogPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar capval int64 = -1\n\tif capacity, ok := ctx.Config[\"max-size\"]; ok {\n\t\tvar err error\n\t\tcapval, err = units.FromHumanSize(capacity)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar maxFiles = 1\n\tif maxFileString, ok := ctx.Config[\"max-file\"]; ok {\n\t\tmaxFiles, err = strconv.Atoi(maxFileString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif maxFiles < 1 {\n\t\t\treturn nil, fmt.Errorf(\"max-file cannot be less than 1\")\n\t\t}\n\t}\n\n\tvar extra []byte\n\tif attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 {\n\t\tvar err error\n\t\textra, err = json.Marshal(attrs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &JSONFileLogger{\n\t\tf: log,\n\t\tbuf: bytes.NewBuffer(nil),\n\t\tctx: ctx,\n\t\tcapacity: capval,\n\t\tn: maxFiles,\n\t\treaders: make(map[*logger.LogWatcher]struct{}),\n\t\tnotifyRotate: pubsub.NewPublisher(0, 1),\n\t\textra: extra,\n\t}, nil\n}", "func writeJSONFile(jsonOutputPath string, writeChannel <-chan map[string]string, done chan<- bool, pretty bool) {\n\t// Init a JSON writer func\n\twriteString := createStringWriter(jsonOutputPath)\n\t// Init the JSON parse func and the breakline char\n\tjsonFunc, breakLine := getJSONFunc(pretty)\n\n\t//Info log...\n\tfmt.Println(\"Writing JSON file...\")\n\n\t// Write the first character of JSON file, starting with \"[\" since it will always generate\n\t// and array of records\n\twriteString(\"[\"+breakLine, false)\n\n\tfirst := true\n\n\tfor {\n\t\t// Waiting for records pushed into writerChannel\n\t\trecord, more := <-writeChannel\n\n\t\t// If the channel is \"open\" for more transmission\n\t\tif more {\n\t\t\t// If it is NOT the first record, break the line\n\t\t\tif !first {\n\t\t\t\twriteString(\",\"+breakLine, false)\n\t\t\t\t// otherwise don't break the line\n\t\t\t} else {\n\t\t\t\tfirst = false\n\t\t\t}\n\t\t\t// Parse the record into JSON\n\t\t\tjsonData := jsonFunc(record)\n\t\t\t// Writing the JSON string with the writer function\n\t\t\twriteString(jsonData, false)\n\t\t\t// If here, then no more records to parse and need to close the file\n\t\t} else {\n\t\t\t// Writing the last char to the file and close it\n\t\t\twriteString(breakLine+\"]\", true)\n\t\t\t// Print that we are done to terminal\n\t\t\tfmt.Printf(\"Done!\\nOutput JSON written to: %s\", jsonOutputPath)\n\t\t\t// Send \"done\" signal to main func to let it know it can start exiting\n\t\t\tdone <- true\n\t\t\t// Break out of the loop\n\t\t\tbreak\n\t\t}\n\t}\n}", "func NewJSONLogger(output zapcore.WriteSyncer, level int) Logger {\n\tl := NewZapLogger(output, getJSONEncoder(), level)\n\treturn &logger{l.Sugar()}\n}", "func createStringWriter(jsonOutputPath string) func(string, bool) {\n\t// Open the JSON file we will start writing to\n\tf, err := os.Create(jsonOutputPath)\n\t// Check for err, gracefully error\n\tcheck(err)\n\n\t// Return the function that will be used to write to the JSON file we decalred above\n\treturn func(data string, close bool) {\n\t\t// Write to the JSON file\n\t\t_, err := f.WriteString(data)\n\t\t// Check for error, gracefully handle\n\t\tcheck(err)\n\t\t// If close == true, then there's no more data left to write to close the file\n\t\tif close {\n\t\t\tf.Close()\n\t\t}\n\t}\n}", "func (logger *ProcessLogger) AddJSONWriter(writer io.Writer) {\n\n\tlog := logrus.New()\n\tlog.Formatter = new(logrus.JSONFormatter)\n\tlog.Out = writer\n\tlogger.entry = append(logger.entry, logrus.NewEntry(log))\n\n}", "func createJSONFile (outputDir string, fileName string) *os.File {\n\n file, err := os.Create(filepath.Join(outputDir, fileName+jsonExt)) \n check(err) \n\n return file\n}", "func (ij *isolatedFile) writeJSONFile(opener openFiler) error {\n\tf, err := opener.OpenFile(ij.path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(f, bytes.NewBuffer(ij.json))\n\terr2 := f.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err2\n}", "func NewFormattedJSONLogger(w io.Writer) kitlog.Logger {\n\treturn &FormattedJSONLogger{w}\n}", "func NewFileWriter(tInfo *InfoDict, fileName string) FileWriter {\n\tvar f FileWriter\n\tf.Info = tInfo\n\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\tlog.Fatal(\"Error creating file to write pieces\\n\", err)\n\t}\n\tif err := file.Truncate(int64(tInfo.Length)); err != nil {\n\t\tlog.Fatal(\"Unable to create a file with enough space for torrent\\n\", err)\n\t}\n\n\tf.DataFile = file // f is now the file where data is to be written\n\tf.Status = CREATED\n\treturn f\n}", "func NewJSONWriter(w http.ResponseWriter) JSONWriterFunc {\n\treturn func(v interface{}, status int) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(status)\n\t\tjson.NewEncoder(w).Encode(v)\n\t}\n}", "func NewJSON() Formatter {\n\treturn &jsonFormat{\n\t\tOutput: os.Stdout,\n\t}\n}", "func New(info logger.Info) (logger.Logger, error) {\n\tlogDir := removeLogDirOption(info.Config)\n\tif logDir == \"\" {\n\t\tlogDir = defaultLogDir\n\t}\n\tinfo.LogPath = filepath.Join(logDir, info.ContainerID)\n\n\tif err := os.MkdirAll(filepath.Dir(info.LogPath), 0755); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up logger dir: %v\", err)\n\t}\n\n\treturn jsonfilelog.New(info)\n}", "func writeJSONFile(json []byte) {\n\terr := ioutil.WriteFile(\"output.json\", json, 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func writeJSONFile(t tree, filename string) {\n\t// marshal data retrieved into JSON\n\tdata, err := json.Marshal(t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error marshaling data for JSON => {%s}\", err)\n\t}\n\n\t// write data into file\n\terr = ioutil.WriteFile(filename, data, os.ModePerm)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to write json data to file, %s => {%s}\", destJSON, err)\n\t}\n}", "func NewLogger(opts Options) *zap.Logger {\n\treturn newRotatingJSONFileLogger(opts)\n}", "func Create(dir string, rotateMode int, fileType string, compress bool) (*Logger, error) {\r\n\tif compress {\r\n\t\tfileType += \".gz\"\r\n\t}\r\n\r\n\tlogger := &Logger{\r\n\t\tdir: dir,\r\n\t\tcloseChan: make(chan int),\r\n\t\trecChan: make(chan Mrecord, 1024),\r\n\t}\r\n\tif err := logger.rotateFile(rotateMode, fileType, compress); err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tlogger.closeWait.Add(1)\r\n\tgo func() {\r\n\t\tvar (\r\n\t\t\tfileTimer *time.Timer\r\n\t\t\tnow = time.Now()\r\n\t\t)\r\n\t\tswitch rotateMode {\r\n\t\tcase ROTATE_BY_DAY:\r\n\t\t\t// next day's time line\r\n\t\t\tfileTimer = time.NewTimer(time.Date(\r\n\t\t\t\tnow.Year(), now.Month(), now.Day(),\r\n\t\t\t\t0, 0, 0, 0, now.Location(),\r\n\t\t\t).Add(24 * time.Hour).Sub(now))\r\n\t\tcase ROTATE_BY_HOUR:\r\n\t\t\t// next hour's time line\r\n\t\t\tfileTimer = time.NewTimer(time.Date(\r\n\t\t\t\tnow.Year(), now.Month(), now.Day(),\r\n\t\t\t\tnow.Hour(), 0, 0, 0, now.Location(),\r\n\t\t\t).Add(time.Hour).Sub(now))\r\n\t\tcase ROTATE_BY_SIZE:\r\n\t\t\t// check log file size per 10 minutes\r\n\t\t\tfileTimer = time.NewTimer(time.Date(\r\n\t\t\t\tnow.Year(), now.Month(), now.Day(),\r\n\t\t\t\tnow.Hour(), now.Minute(), now.Second(), now.Nanosecond(), now.Location(),\r\n\t\t\t).Add(time.Second * 10).Sub(now))\r\n\t\t}\r\n\r\n\t\t// flush per 5 seconds\r\n\t\tflushTicker := time.NewTicker(5 * time.Second)\r\n\t\tdefer func() {\r\n\t\t\tflushTicker.Stop()\r\n\t\t\tlogger.jsonfile.Close()\r\n\t\t\tlogger.closeWait.Done()\r\n\t\t}()\r\n\r\n\t\tfor {\r\n\t\t\tselect {\r\n\t\t\tcase rec := <-logger.recChan:\r\n\t\t\t\tlogger.jsonfile.Put(rec)\r\n\t\t\tcase <-flushTicker.C:\r\n\t\t\t\tif err := logger.jsonfile.Flush(); err != nil {\r\n\t\t\t\t\tlog.Println(\"log flush failed:\", err.Error())\r\n\t\t\t\t}\r\n\t\t\tcase <-fileTimer.C:\r\n\r\n\t\t\t\tswitch rotateMode {\r\n\t\t\t\tcase ROTATE_BY_DAY:\r\n\t\t\t\t\tif err := logger.rotateFile(rotateMode, fileType, compress); err != nil {\r\n\t\t\t\t\t\tpanic(err)\r\n\t\t\t\t\t}\r\n\t\t\t\t\tfileTimer = time.NewTimer(24 * time.Hour)\r\n\t\t\t\tcase ROTATE_BY_HOUR:\r\n\t\t\t\t\tif err := logger.rotateFile(rotateMode, fileType, compress); err != nil {\r\n\t\t\t\t\t\tpanic(err)\r\n\t\t\t\t\t}\r\n\t\t\t\t\tfileTimer = time.NewTimer(time.Hour)\r\n\t\t\t\tcase ROTATE_BY_SIZE:\r\n\t\t\t\t\tif logger.jsonfile.curSize >= logger.jsonfile.maxSize {\r\n\t\t\t\t\t\tif err := logger.rotateFile(rotateMode, fileType, compress); err != nil {\r\n\t\t\t\t\t\t\tpanic(err)\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t\tfileTimer = time.NewTimer(time.Second * 10)\r\n\t\t\t\t}\r\n\t\t\tcase <-logger.closeChan:\r\n\t\t\t\tfor {\r\n\t\t\t\t\tselect {\r\n\t\t\t\t\tcase rec := <-logger.recChan:\r\n\t\t\t\t\t\tlogger.jsonfile.Put(rec)\r\n\t\t\t\t\tdefault:\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}()\r\n\r\n\treturn logger, nil\r\n}", "func (l *Logger) createNew() {\n\tpath := filepath.Join(l.dir, fmt.Sprintf(\"%s-current.log\", l.filePrefix))\n\tif err := os.MkdirAll(l.dir, 0744); err != nil {\n\t\tglog.WithError(err).Fatal(\"Unable to create directory.\")\n\t}\n\tf, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC,\n\t\tos.FileMode(0644))\n\tif err != nil {\n\t\tglog.WithError(err).Fatal(\"Unable to create a new file.\")\n\t}\n\tl.cf = new(CurFile)\n\tl.cf.f = f\n\tcache := new(Cache)\n\tcreateAndUpdateBloomFilter(cache)\n\tatomic.StorePointer(&l.cf.cch, unsafe.Pointer(cache))\n}", "func NewJSONLoggerWithWriter(service string, name string, out io.Writer) *JSONLogger {\n\treturn &JSONLogger{service, name, out}\n}", "func NewJSONStreamWriter(out io.Writer) *JSONStreamWriter {\n\treturn &JSONStreamWriter{\n\t\tout: out,\n\t}\n}", "func newFileWriter(path string) (*fileWriter, error) {\n\tif path == \"\" {\n\t\treturn nil, errEmptyPath\n\t}\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &fileWriter{path}, nil\n}", "func createJSONFile() {\n\tusers := []User{\n\t\t{Username: \"Thao Ho\", Password: \"change me\", Email: \"[email protected]\"},\n\t\t{Username: \"Thao Nhi\", Password: \"change me\", Email: \"[email protected]\"},\n\t}\n\n\tdb := UserDb{Users: users, Type: \"Simple\"}\n\n\t//fmt.Println(users)\n\tvar buf = new(bytes.Buffer)\n\n\tenc := json.NewEncoder(buf)\n\tenc.Encode(db)\n\tf, err := os.Create(\"user.db.json\")\n\tif nil != err {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer f.Close()\n\tio.Copy(f, buf) // can use the command jq '.' user.db.json in terminal commands to format the json file\n}", "func New(level string, writer string, prettyprint string) Logger {\n\tvar lg Logger\n\tlg.level = stringToLevel()[level]\n\tlg.logger = json.NewEncoder(stringToWriter(writer))\n\tif prettyprint == \"true\" {\n\t\tlg.logger.SetIndent(\"\", \" \")\n\t}\n\n\tvar process = strings.Split(os.Args[0], \"/\")\n\tlg.json.Process = process[len(process)-1]\n\n\treturn lg\n}", "func NewTMJSONLogger(w io.Writer) Logger {\n\tlogger := kitlog.NewJSONLogger(w)\n\tlogger = kitlog.With(logger, \"ts\", kitlog.DefaultTimestampUTC)\n\treturn &tmLogger{logger}\n}", "func NewJSONGroupWriter(outputDir string, countPerFile uint64) JSONGroupWriter {\n\treturn JSONGroupWriter{\n\t\toutDir: outputDir,\n\t\tcount: countPerFile,\n\t\trecs: make([]interface{}, countPerFile),\n\t}\n}", "func newFileLogger() *fileLogger {\n\t// Open the log file for writing.\n\tf, err := os.OpenFile(*logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tglog.Fatalf(\"could not open log file: %v\", err)\n\t}\n\treturn &fileLogger{f: f}\n}", "func (m *Mouse) WriteJSON() {\n\tvar flags int\n\tif FileExists() {\n\t\tos.Truncate(\"mouse.json\", 0)\n\t\tflags = os.O_WRONLY\n\t} else {\n\t\tflags = os.O_WRONLY | os.O_CREATE\n\t}\n\tfile, _ := os.OpenFile(\"mouse.json\", flags, 0755)\n\tdefer file.Close()\n\tjson.NewEncoder(file).Encode(m)\n}", "func MakeJSONWriter(w io.Writer) JSONWriter {\n\treturn JSONWriter{w: w}\n}", "func WriteToJSONFile(intermediate []KeyValue, mapTaskNum, reduceTaskNUm int) string {\n\t// fmt.Println(\"Writing file for task \" + strconv.Itoa(mapTaskNum))\n\tfilename := \"temp-mr-\" + strconv.Itoa(mapTaskNum) + \"-\" + strconv.Itoa(reduceTaskNUm)\n\tjfile, _ := os.Create(filename)\n\n\tenc := json.NewEncoder(jfile)\n\tfor _, kv := range intermediate {\n\t\terr := enc.Encode(&kv)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error: \", err)\n\t\t}\n\t}\n\treturn filename\n}", "func writeFile(path string, data []byte) {\n\tfile, _ := os.Create(path)\n\tdefer file.Close()\n\tfile.Write(data)\n\tfmt.Println(\"JSON generate to \" + path + \" complete.\")\n\tfmt.Println()\n}", "func newJSONEncoder() *jsonEncoder {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\n\treturn &jsonEncoder{\n\t\tbuffer: buffer,\n\t\tencoder: encoder,\n\t\tcontentType: jsonContentType,\n\t}\n}", "func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer {\n\t// we can write JSON objects directly to the writer, because they are self-framing\n\treturn w\n}", "func New(out io.Writer) Logger {\n\tl := log.NewJSONLogger(log.NewSyncWriter(out))\n\tl = log.With(l, \"ts\", log.DefaultTimestampUTC)\n\treturn &logger{l}\n}", "func (logger *FileLogger) openNew() error {\n\terr := os.MkdirAll(logger.dir(), 0755)\n\tfmt.Println(logger.dir())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't make directories for new logfile: %q\", err)\n\t}\n\n\t// We use truncate here because this should only get called when we've closed\n\t// the renamed (by logrotate) file ourselves. If someone else creates the file\n\t// in the meantime, just wipe out the contents.\n\tfmt.Println(logger.Filename)\n\tf, err := os.OpenFile(logger.Filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0644))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't open new logfile: %s\", err)\n\t}\n\tlogger.file = f\n\treturn nil\n}", "func newWriteLogger(log *log.Logger, w io.Writer) io.Writer {\n\treturn &writeLogger{log, w}\n}", "func WriteFileJSON(t *testing.T, path string, data interface{}) {\n\tjsn := helper.PrettyJSON(data)\n\n\tWriteFile(t, path, jsn)\n\n}", "func NewFileWriter(filename string) (w *Writer, err error) {\n\tw = &Writer{IndentSize: DefaultIndentSize}\n\tif filename == \"\" || filename == \"-\" {\n\t\tw.Output = &nonCloserWriter{writer: os.Stdout}\n\t} else {\n\t\tos.MkdirAll(filepath.Dir(filename), 0755)\n\t\tw.Output, err = os.OpenFile(filename,\n\t\t\tsyscall.O_WRONLY|syscall.O_CREAT|syscall.O_TRUNC, 0644)\n\t}\n\treturn\n}", "func NewFileWriter(conf *FileLogConfig) *FileLogWriter {\n\tw := &FileLogWriter{\n\t\tC: conf,\n\t}\n\trl, _ := rotatelogs.New(conf.GlobPattern,\n\t\trotatelogs.WithLinkName(conf.LinkName), // 生成软链,指向最新日志文件\n\t\trotatelogs.WithMaxAge(conf.GetMaxAge()), // 文件最大保存时间\n\t\trotatelogs.WithRotationTime(conf.GetRotationTime()), // 日志切割时间间隔\n\t\trotatelogs.WithClock(conf.GetClock()),\n\t\trotatelogs.WithRotationCount(uint(conf.RotationCount)),\n\t)\n\tw.Rl = rl\n\treturn w\n}", "func NewFileWriter(path string) *FileWriter {\n\treturn &FileWriter{\n\t\tpath: path,\n\t\tdecorators: []Decorator{\n\t\t\tFormatDecorator{},\n\t\t\tImportDecorator{},\n\t\t},\n\t}\n}", "func (buf *logBuffer) newLogFile() (err error) {\n\tif buf.file != nil {\n\t\tbuf.Flush()\n\t\tbuf.file.Close()\n\t}\n\tfilename := fmt.Sprintf(\"%s.log.%s.%d\",\n\t\tbuf.level, time.Now().Format(timeFormat), os.Getpid())\n\tif buf.file, err = os.Create(filepath.Join(buf.logdir, filename)); err == nil {\n\t\tbuf.nbytes = 0\n\t\tbuf.Writer = bufio.NewWriterSize(buf.file, int(buf.bufsize))\n\t}\n\treturn\n}", "func createAppJson(dm util.DepManager, appDir, appName, appJson string) error {\n\n\tupdatedJson, err := getAndUpdateAppJson(dm, appName, appJson)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(filepath.Join(appDir, fileFlogoJson), []byte(updatedJson), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func newFile(datadir, what, uuid string) (*File, error) {\n\ttimestamp := time.Now().UTC()\n\tdir := path.Join(datadir, what, timestamp.Format(\"2006/01/02\"))\n\terr := os.MkdirAll(dir, 0755)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tname := dir + \"/ndt7-\" + what + \"-\" + timestamp.Format(\"20060102T150405.000000000Z\") + \".\" + uuid + \".jsonl.gz\"\n\t// My assumption here is that we have nanosecond precision and hence it's\n\t// unlikely to have conflicts. If I'm wrong, O_EXCL will let us know.\n\tfp, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriter, err := gzip.NewWriterLevel(fp, gzip.BestSpeed)\n\tif err != nil {\n\t\tfp.Close()\n\t\treturn nil, err\n\t}\n\treturn &File{\n\t\tWriter: writer,\n\t\tFp: fp,\n\t}, nil\n}", "func NewConsoleLogWriter(json bool) *ConsoleLogWriter {\n\tconsoleWriter := &ConsoleLogWriter{\n\t\tjson: json,\n\t\tformat: \"[%T %D] [%L] (%S) %M\",\n\t\tw: make(chan LogRecord, LogBufferLength),\n\t\t// 兼容以往配置,默认输出 file/func/lineno\n\t\tcaller: true,\n\t}\n\tconsoleWriter.LogCloserInit()\n\n\tgo consoleWriter.run(stdout)\n\treturn consoleWriter\n}", "func newWriter(filename string, verbose bool, out io.Writer) (io.Writer, error) {\n\twriters := make([]io.Writer, 0)\n\tif len(filename) > 0 || !verbose {\n\t\tfile, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0660)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\twriters = append(writers, file)\n\t}\n\n\tif verbose {\n\t\twriters = append(writers, out)\n\t}\n\n\treturn io.MultiWriter(writers...), nil\n}", "func FileWriter(cfg FileConfig) (Writer, error) {\n\treturn func(msg *message.Message) error {\n\t\t// path + year + month + day + topic\n\t\tdir := path.Join(cfg.Path, getDate(\"2006\"), getDate(\"01\"), getDate(\"02\"), msg.Topic())\n\n\t\terr := os.MkdirAll(dir, os.ModePerm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// hour + minutes + seconds + prefix + messageId + suffix\n\t\tname := fmt.Sprintf(\"%s-%s\", getDate(\"15.04.05\"), *msg.MessageId)\n\n\t\tf, err := os.Create(path.Join(dir, name))\n\t\tif err != nil {\n\t\t\tif os.IsExist(err) {\n\t\t\t\tclog.Warn(\"Message already registered\")\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\tfmt.Fprint(f, *msg.Body)\n\t\treturn nil\n\t}, nil\n}", "func WriteJSONFile(payload interface{}, path string, compress bool) (finalPath string, e error) {\n\top := func(c int) error {\n\t\tif c > 0 {\n\t\t\tlog.Printf(\"#%d retrying to write json file to %s...\", c, path)\n\t\t}\n\t\ttmp := fmt.Sprintf(\"%s.tmp\", path)\n\t\tif compress {\n\t\t\tfinalPath = fmt.Sprintf(\"%s.json.gz\", path)\n\t\t} else {\n\t\t\tfinalPath = fmt.Sprintf(\"%s.json\", path)\n\t\t}\n\t\tdir, name := filepath.Dir(tmp), filepath.Base(tmp)\n\t\tex, _, e := FileExists(dir, name, false, false)\n\t\tif e != nil {\n\t\t\treturn repeat.HintStop(errors.WithMessage(e, \"unable to check existence for \"+tmp))\n\t\t}\n\t\tif ex {\n\t\t\tos.Remove(tmp)\n\t\t}\n\t\tdir, name = filepath.Dir(finalPath), filepath.Base(finalPath)\n\t\tex, _, e = FileExists(dir, name, false, false)\n\t\tif e != nil {\n\t\t\treturn repeat.HintStop(errors.WithMessage(e, \"unable to check existence for \"+finalPath))\n\t\t}\n\t\tif ex {\n\t\t\treturn repeat.HintStop(fmt.Errorf(\"%s already exists\", finalPath))\n\t\t}\n\t\tjsonBytes, e := json.Marshal(payload)\n\t\tif e != nil {\n\t\t\tlog.Printf(\"#%d failed to marshal payload %+v: %+v\", c, payload, e)\n\t\t\treturn repeat.HintStop(e)\n\t\t}\n\t\t_, e = bufferedWrite(tmp, jsonBytes, compress)\n\t\tif e != nil {\n\t\t\tlog.Printf(\"#%d %+v\", c, e)\n\t\t\treturn repeat.HintTemporary(e)\n\t\t}\n\t\te = os.Rename(tmp, finalPath)\n\t\tif e != nil {\n\t\t\tlog.Printf(\"#%d failed to rename %s to %s: %+v\", c, tmp, finalPath, e)\n\t\t\treturn repeat.HintTemporary(e)\n\t\t}\n\t\treturn nil\n\t}\n\n\te = repeat.Repeat(\n\t\trepeat.FnWithCounter(op),\n\t\trepeat.StopOnSuccess(),\n\t\trepeat.LimitMaxTries(conf.Args.DefaultRetry),\n\t\trepeat.WithDelay(\n\t\t\trepeat.FullJitterBackoff(500*time.Millisecond).WithMaxDelay(15*time.Second).Set(),\n\t\t),\n\t)\n\n\treturn\n}", "func WriteJSON(file string, data interface{}) error {\n\tcontent, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfd, err := os.Create(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fd.Close()\n\tif _, err = fd.Write(content); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewJSONLogParser(timestampField string, timestampKind kindElement) *JSONLogParser {\n\treturn &JSONLogParser{\n\t\ttimestampField: timestampField,\n\t\ttimestampKind: timestampKind,\n\t}\n}", "func writeEvent(filename string, event *Event) (error) {\n\t_, err := os.Stat(filename)\n\tif os.IsNotExist(err) {\n\t\t_, err := os.Create(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t}\n\tfile, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tdata, _ := json.MarshalIndent(event, \"\", \"\\t\")\n\t_, err = file.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (self *WAL) createNewLog(firstRequestNumber uint32) (*log, error) {\n\tself.nextLogFileSuffix++\n\tlogFileName := path.Join(self.config.WalDir, fmt.Sprintf(\"log.%d\", firstRequestNumber))\n\tlog, _, err := self.openLog(logFileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tself.state.CurrentFileSuffix = log.suffix()\n\tself.state.CurrentFileOffset = 0\n\treturn log, nil\n}", "func JSONLogger(r *http.Request, status int, len int64, d time.Duration) {\n\tos.Stderr.WriteString(JSONLogMessage(time.Now, r.Method, r.URL, status, len, d, nil))\n}", "func NewJSONFormatter(keys map[string]string, full bool) (Formatter, error) {\n\tif len(keys) > 0 {\n\t\tstructure := true\n\t\tmapping := map[string]string{\n\t\t\t\"name\": \"name\", \"time\": \"time\", \"level\": \"level\", \"message\": \"message\",\n\t\t\t\"fields\": \"fields\", \"caller\": \"caller\", \"stack\": \"stack\",\n\t\t}\n\t\tfor key, value := range keys {\n\t\t\tif mapping[key] == \"\" {\n\t\t\t\t// We require that the key-name map must be pure.\n\t\t\t\treturn nil, fmt.Errorf(\"invalid json formatter key %q\", key)\n\t\t\t}\n\t\t\t// We ignore the case where all fields are mapped as empty, which is more practical.\n\t\t\tif value != \"\" && mapping[key] != value {\n\t\t\t\tstructure = false\n\t\t\t\tmapping[key] = value\n\t\t\t}\n\t\t}\n\t\t// when the json field cannot be predicted in advance, we use map to package the log data.\n\t\t// is there a better solution to improve the efficiency of json serialization?\n\t\tif !structure {\n\t\t\treturn NewJSONFormatterFromPool(newJSONFormatterMapPool(full, mapping)), nil\n\t\t}\n\t}\n\t// In most cases, the performance of json serialization of structure is higher than\n\t// that of json serialization of map. When the json field name has not changed, we\n\t// try to use structure for json serialization.\n\treturn NewJSONFormatterFromPool(newJSONFormatterObjectPool(full)), nil\n}", "func WriteNewResponse(w *http.ResponseWriter, r *http.Request, response JSONResponse) {\n\t// Echo back message\n\tresponse.Write(w, r)\n}", "func AddJSON(filename, JSONpath string, JSONdata []byte, pretty bool) error {\n\tjf, err := NewFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjf.SetPretty(pretty)\n\treturn jf.AddJSON(JSONpath, JSONdata)\n}", "func (s *stepSaver) newLogFile(fileName string) error {\n\tlf, err := newLogFile(s.Uploader, s.logFolder, fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.mutex.Lock()\n\ts.logFiles = append(s.logFiles, lf)\n\ts.mutex.Unlock()\n\treturn nil\n}", "func New(fpath string, fns ...Option) (*FileWriter, error) {\n\topt := defaultOption\n\tfor _, fn := range fns {\n\t\tfn(&opt)\n\t}\n\n\tfname := filepath.Base(fpath)\n\tif fname == \"\" {\n\t\treturn nil, fmt.Errorf(\"filename can't empty\")\n\t}\n\tdir := filepath.Dir(fpath)\n\tfi, err := os.Stat(dir)\n\tif err == nil && !fi.IsDir() {\n\t\treturn nil, fmt.Errorf(\"%s already exists and not a directory\", dir)\n\t}\n\tif os.IsNotExist(err) {\n\t\tif err = os.MkdirAll(dir, 0755); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"create dir %s error: %s\", dir, err.Error())\n\t\t}\n\t}\n\n\tcurrent, err := newWrapFile(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstdlog := log.New(os.Stderr, \"flog \", log.LstdFlags)\n\tch := make(chan *bytes.Buffer, opt.ChanSize)\n\n\tfiles, err := parseRotateItem(dir, fname, opt.RotateFormat)\n\tif err != nil {\n\t\t// set files a empty list\n\t\tfiles = list.New()\n\t\tstdlog.Printf(\"parseRotateItem error: %s\", err)\n\t}\n\n\tlastRotateFormat := time.Now().Format(opt.RotateFormat)\n\tvar lastSplitNum int\n\tif files.Len() > 0 {\n\t\trt := files.Front().Value.(rotateItem)\n\t\t// check contains is mush esay than compared with timestamp\n\t\tif strings.Contains(rt.fname, lastRotateFormat) {\n\t\t\tlastSplitNum = rt.rotateNum\n\t\t}\n\t}\n\n\tfw := &FileWriter{\n\t\topt: opt,\n\t\tdir: dir,\n\t\tfname: fname,\n\t\tstdlog: stdlog,\n\t\tch: ch,\n\t\tpool: &sync.Pool{New: func() interface{} { return new(bytes.Buffer) }},\n\n\t\tlastSplitNum: lastSplitNum,\n\t\tlastRotateFormat: lastRotateFormat,\n\n\t\tfiles: files,\n\t\tcurrent: current,\n\t}\n\n\tfw.wg.Add(1)\n\tgo fw.daemon()\n\n\treturn fw, nil\n}", "func writeByJSON(path string, data interface{}) error {\n\tos.MkdirAll(dname, os.ModeDir|os.ModePerm)\n\tfile, err := os.OpenFile(dname+path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tif err := json.NewEncoder(file).Encode(data); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (this *FileLogger) InitLogger(configJson string) error {\n\tvar err error\n\tif len(configJson) > 0 {\n\t\terr = json.Unmarshal([]byte(configJson), this)\n\t\tif err != nil {\n\t\t\t// fmt.Println(err.Error())\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\treturn errors.New(\"configJson cannot be nil\")\n\t}\n\tif len(this.FileName) <= 0 {\n\t\treturn errors.New(\"configJson must hava filename\")\n\t}\n\n\tfi, err := os.Stat(this.FilePath)\n\n\tif err != nil || !fi.IsDir() {\n\t\terr = os.MkdirAll(this.FilePath, os.ModeDir|0777)\n\t\tif err != nil {\n\t\t\t// fmt.Println(err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\treturn this.createLogFile()\n}", "func SaveJSON(f string, iface interface{}) error {\n\tif !PathExists(filepath.Dir(f)) {\n\t\treturn fmt.Errorf(\"Missing dir; %s\", f)\n\t}\n\n\tcontent, err := json.MarshalIndent(iface, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(f, content, 0644)\n}", "func writeJSON(search string, fileName string, nodeArr []nodeJSONStruct, edgeArr []edgeStruct, searchArr []*admin.Group) {\n var (\n jsonFile *os.File\n encoder *json.Encoder\n )\n\n if nodeArr != nil && edges.arr != nil {\n jsonFile = createJSONFile(outputJSONParentsDir, fileName)\n defer jsonFile.Close() \n\n // https://medium.com/eaciit-engineering/better-way-to-read-and-write-json-file-in-golang-9d575b7254f2 \n // also https://www.golangprograms.com/golang-writing-struct-to-json-file.html\n encoder = json.NewEncoder(jsonFile)\n encoder.Encode(parentsJSONStruct{SearchIdentity: search, NodeArr: nodeArr, EdgeArr: edgeArr})\n\n } else if searchArr != nil {\n jsonFile = createJSONFile(outputJSONSearchDir, fileName)\n defer jsonFile.Close() \n\n // https://medium.com/eaciit-engineering/better-way-to-read-and-write-json-file-in-golang-9d575b7254f2 \n // also https://www.golangprograms.com/golang-writing-struct-to-json-file.html\n encoder = json.NewEncoder(jsonFile)\n encoder.Encode(searchJSONStruct{SearchPrefix: search, GroupArr: searchArr})\n }\n}", "func newJSONEncoder() *jsonEncoder {\n\tenc := jsonPool.Get().(*jsonEncoder)\n\tenc.truncate()\n\treturn enc\n}", "func JSONLog(w io.Writer) LogFunc {\n\treturn func(v interface{}) {\n\t\tdata, err := json.Marshal(v)\n\t\tif err != nil {\n\t\t\tdata, err = json.Marshal(struct {\n\t\t\t\tContext string `json:\"context\"`\n\t\t\t\tDebugData string `json:\"debugData\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}{\n\t\t\t\tContext: \"Error marshaling 'debugData' into JSON\",\n\t\t\t\tDebugData: spew.Sdump(v),\n\t\t\t\tError: err.Error(),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\t// We really REALLY should never get here\n\t\t\t\tlog.Println(\"ERROR MARSHALLING THE MARSHALLING ERROR!:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif _, err := fmt.Fprintf(w, \"%s\\n\", data); err != nil {\n\t\t\tlog.Println(\"ERROR WRITING TO LOGGER:\", err)\n\t\t}\n\t}\n}", "func New(fileName string) JLogger {\n\n\tvar l = &jLog{}\n\tl.Logger = newLoggerFile(fileName, true)\n\treturn l\n}", "func (l *JSONFileWriter) Write(e *client.EventsResponse) error {\n\t// do not log if there is no events\n\tif len(e.Events) == 0 {\n\t\treturn nil\n\t}\n\n\tb, err := json.Marshal(e)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(\"un\", string(b))\n\n\tif _, err = l.f.Write(b); err != nil {\n\t\treturn err\n\t}\n\t_, err = l.f.Write([]byte(\"\\n\"))\n\treturn err\n}", "func initLogger(configFilePath string) (*zap.Logger, error) {\n\t// Open our jsonFile\n\tjsonFile, err := os.Open(configFilePath)\n\tif err != nil {\n\t\tfmt.Println(\"Unable To Load Logging Configuration File\", err)\n\t\treturn nil, err\n\t}\n\t// defer the closing of our jsonFile so that we can parse it later on\n\tdefer jsonFile.Close()\n\n\t// Check the configuration by trying to create a new logger\n\tvar newCfg zap.Config\n\tif err := json.NewDecoder(jsonFile).Decode(&newCfg); err != nil {\n\t\tfmt.Println(\"Unable To Parse Logging Configuration File\", err)\n\t\treturn nil, err\n\t}\n\tnewLogger, err := newCfg.Build()\n\tif err != nil {\n\t\tfmt.Println(\"Unable To Build Logger From Configuration\", err)\n\t\treturn nil, err\n\t}\n\n\t// Configuration is good, actually create the new config/logger\n\tjsonFile.Seek(0, 0)\n\tif err := json.NewDecoder(jsonFile).Decode(&cfg); err != nil {\n\t\tfmt.Println(\"Unable To Parse Logging Configuration File\", err)\n\t\treturn nil, err\n\t}\n\tnewLogger, err = cfg.Build()\n\n\treturn newLogger, err\n}", "func (jf *JFile) AddJSON(JSONpath string, JSONdata []byte) error {\n\tif err := jf.rootnode.AddJSON(JSONpath, JSONdata); err != nil {\n\t\treturn err\n\t}\n\t// Use the correct JSON function, depending on the pretty parameter\n\tJSON := jf.rootnode.JSON\n\tif jf.pretty {\n\t\tJSON = jf.rootnode.PrettyJSON\n\t}\n\tdata, err := JSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn jf.Write(data)\n}", "func WriteNew(fname string) error {\n\tf, err := os.Create(fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = f.WriteString(Gen().Encode())\n\tf.Close()\n\tif err == nil {\n\t\tInvalidateCache()\n\t}\n\treturn err\n}", "func NewLogger(logLevel string) (*zap.SugaredLogger, error) {\n\treturn NewLoggerWithEncoding(logLevel, \"json\")\n}", "func NewFileWriter(targetFile string, perm os.FileMode) *FileWriter {\n\treturn &FileWriter{\n\t\ttargetFile: targetFile,\n\t\tfileMode: perm,\n\t}\n}", "func NewFileWriter(path string) *fileWriter {\n\treturn &fileWriter{path: path}\n}", "func (app *application) writeJSON(w http.ResponseWriter, status int, data interface{}) error {\n\tjs, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs = append(js, '\\n')\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(js)\n\n\treturn nil\n}", "func NewFileWriter(workDir string) FileWriter {\n\treturn &writerImpl{\n\t\tworkDir: workDir,\n\t}\n}", "func (currentArticles *Articles) write2jsonFile() {\n\tjsonFile, _ := json.MarshalIndent(&currentArticles, \"\", \" \")\n\t// Permissions: 1 – execute, 2 – write, 4 – read\n\twriteErr := ioutil.WriteFile(savedJSONFile, jsonFile, 0644)\n\tif writeErr != nil {\n\t\tpc, _, _, _ := runtime.Caller(0)\n\t\terrorWebLogger.FatalPrintln(getCurrentRPCmethod(pc), \"Write to json file error.\", writeErr)\n\t}\n}", "func SaveJSONFile(v interface{}, testRelativePath string) {\n\tjsonText, _ := json.MarshalIndent(v, \"\", \"\\t\")\n\n\tfilePath := getFullPath(testRelativePath)\n\terr := ioutil.WriteFile(filePath, jsonText, 0644)\n\n\tthrow.OnError(err)\n}", "func WriteJSON(w io.Writer, data interface{}, args ...interface{}) (err error) {\n\tpw := Writer{\n\t\tOptions: ojg.DefaultOptions,\n\t\tWidth: 80,\n\t\tMaxDepth: 3,\n\t\tSEN: false,\n\t}\n\tpw.w = w\n\tpw.config(args)\n\t_, err = pw.encode(data)\n\n\treturn\n}", "func CreateLogger(c echo.Context, uuid uuid.IUUID, level log.Level) {\n\tonce.Do(func() {\n\n\t\tformatter := new(log.JSONFormatter)\n\t\tformatter.TimestampFormat = \"2018-12-30 23:05:05\"\n\t\tformatter.DisableTimestamp = false\n\t\tlog.SetFormatter(formatter)\n\n\t\tfile, _ := os.OpenFile(\"mylog.txt\", os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)\n\t\tmw := io.MultiWriter(os.Stdout, file)\n\t\tlog.SetOutput(mw)\n\t\tlog.SetLevel(level)\n\n\t\tinstance = &Logger{\n\t\t\tType: \"REQUEST\",\n\t\t\tProcessID: uuid.GetUUID(),\n\t\t\tSourceIP: c.Request().RemoteAddr,\n\t\t\tHTTPMethod: c.Request().Method,\n\t\t\tEndPoint: c.Request().URL.Path,\n\t\t\tTrackingID: \"\", // User ID\n\t\t\tAppID: \"\", // App ID\n\t\t}\n\t})\n}", "func NewJSONLoggerWithHeaders(h ...string) Logger {\n\treturn func(r *http.Request, status int, length int64, d time.Duration) {\n\t\tm := make(map[string]string, len(h))\n\t\tfor _, name := range h {\n\t\t\tm[name] = r.Header.Get(name)\n\t\t}\n\t\tos.Stderr.WriteString(JSONLogMessage(time.Now, r.Method, r.URL, status, length, d, m))\n\t}\n}", "func writeToJSON(fid *os.File, jsonKey *JSON) (bool, error) {\n\tencodedJSON, err := json.Marshal(*jsonKey)\n\tif err == nil {\n\t\twrittenBytes, err := fid.Write(encodedJSON)\n\t\tif err == nil {\n\t\t\treturn writtenBytes > 0, err\n\t\t}\n\t\treturn false, err\n\t}\n\treturn false, err\n}", "func NewJsonFormatter() Formatter {\n\treturn JsonFormatter{}\n}", "func newWriter(config *config.Stream, fs afs.Service, rotationURL string, index int, created time.Time, emitter *emitter.Service) (*writer, error) {\n\twriterCloser, err := fs.NewWriter(context.Background(), config.URL, file.DefaultFileOsMode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := &writer{\n\t\tfs: fs,\n\t\tindex: index,\n\t\tdestURL: config.URL,\n\t\trotationURL: rotationURL,\n\t\tcloser: writerCloser,\n\t\tcreated: created,\n\t}\n\tresult.config = config\n\n\tif rotation := config.Rotation; rotation != nil {\n\t\tinitRotation(result, rotation, created, emitter)\n\t}\n\tif config.IsGzip() {\n\t\tgzWriter := gzip.NewWriter(writerCloser)\n\t\tresult.writer = gzWriter\n\t\tresult.flusher = gzWriter\n\n\t} else {\n\t\twriter := bufio.NewWriter(writerCloser)\n\t\tresult.writer = writer\n\t\tresult.flusher = writer\n\t}\n\treturn result, nil\n}", "func (l *JSONFileWriter) Close() error {\n\treturn l.f.Close()\n}", "func NewFileWriter(w io.Writer, limit Size) File {\n\treturn &FileWriter{Writer: w, Limit: limit}\n}", "func NewTMJSONLoggerNoTS(w io.Writer) Logger {\n\tlogger := kitlog.NewJSONLogger(w)\n\treturn &tmLogger{logger}\n}", "func SetJSONLog() *pkg.Logger {\r\n\treturn defaultLogger.SetJSONLog()\r\n}", "func CreatJsonFiles(path string) {\r\n\tif !pathExists(path) {\r\n\t\terr := os.Mkdir(path, os.ModePerm)\r\n\t\tif err != nil {\r\n\t\t\tfmt.Println(\"[ERROR]json path is error.\", err)\r\n\t\t\treturn\r\n\t\t}\r\n\t}\r\n\r\n\tif configSetting.AllIn {\r\n\t\twriteJsonFile(path+\"/\"+configSetting.ConfigName+\".json\", configMap)\r\n\t} else {\r\n\t\tfor k, v := range configMap {\r\n\t\t\twriteJsonFile(path+\"/\"+k+\".json\", v)\r\n\t\t}\r\n\t}\r\n}", "func SaveJSON(filename string, thing interface{}, mode os.FileMode) error {\n\tdata, err := json.MarshalIndent(thing, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn SaveBinary(filename, data, mode)\n}", "func NewLogger(FileLocation string) error {\r\n\tloggerconfig.Init()\r\n\tlogLevel := viper.GetString(\"ConsoleLevel\")\r\n\tif logLevel == \"\" {\r\n\t\tlogLevel = viper.GetString(\"FileLevel\")\r\n\t}\r\n\r\n\tlevel, err := logrus.ParseLevel(logLevel)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tstdOutHandler := os.Stdout\r\n\tfileHandler := &lumberjack.Logger{\r\n\t\tFilename: FileLocation,\r\n\t\tMaxSize: 2,\r\n\t\tMaxBackups: 6,\r\n\t\tCompress: true,\r\n\t\tMaxAge: 28,\r\n\t}\r\n\tlLogger := &logrus.Logger{\r\n\t\tOut: stdOutHandler,\r\n\t\tFormatter: getFormatter(viper.GetBool(\"ConsoleJSONFormat\")),\r\n\t\tHooks: make(logrus.LevelHooks),\r\n\t\tLevel: level,\r\n\t}\r\n\r\n\tif viper.GetBool(\"EnableConsole\") && viper.GetBool(\"EnableFile\") {\r\n\t\tlLogger.SetOutput(io.MultiWriter(stdOutHandler, fileHandler))\r\n\t} else {\r\n\t\tif viper.GetBool(\"EnableFile\") {\r\n\t\t\tlLogger.SetOutput(fileHandler)\r\n\t\t\tlLogger.SetFormatter(getFormatter(viper.GetBool(\"FileJSONFormat\")))\r\n\t\t}\r\n\t}\r\n\r\n\tlog.logger = lLogger\r\n\r\n\treturn nil\r\n}", "func NewToFile(filename string) *Logger {\n\tconf := zap.NewDevelopmentConfig()\n\tconf.OutputPaths = []string{\n\t\tfilename,\n\t}\n\tl, err := conf.Build(\n\t\tzap.AddStacktrace(zap.FatalLevel),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tz := l.Sugar()\n\treturn &Logger{z}\n}", "func writeJSON(w http.ResponseWriter, data JSON) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(data)\n}", "func writeJSON(w http.ResponseWriter, status int, data mapStringInterface) error {\n\tjs, err := json.Marshal(data)\n\t//js, err := json.MarshalIndent(data, \"\", \"\\t\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs = append(js, '\\n')\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(js)\n\treturn nil\n}", "func write_file(m map[string]string, filename string) {\n jsonString, err := json.Marshal(m)\n if err != nil {\n fmt.Println(err)\n }\n\n err = ioutil.WriteFile(filename, jsonString, 0644)\n}", "func (this *File) Writer(fh io.Writer) error {\n\tenc := json.NewEncoder(fh)\n\tif this.indent {\n\t\tenc.SetIndent(\"\", \" \")\n\t}\n\tif err := enc.Encode(this.data); err != nil {\n\t\treturn err\n\t}\n\t// Success\n\treturn nil\n}" ]
[ "0.75449544", "0.67362654", "0.67352885", "0.654658", "0.6535897", "0.6480687", "0.6433155", "0.640034", "0.6375203", "0.6375203", "0.62555563", "0.61984885", "0.61931527", "0.61560726", "0.6143846", "0.6137884", "0.6104487", "0.6073839", "0.6059404", "0.5970588", "0.59556663", "0.5938677", "0.5867031", "0.5859275", "0.58392024", "0.58009845", "0.57721233", "0.5751002", "0.57481015", "0.57388", "0.57362205", "0.5698468", "0.5679509", "0.5674727", "0.5648959", "0.56390667", "0.5633448", "0.5599108", "0.55835265", "0.5577914", "0.555674", "0.55432117", "0.5533155", "0.5517745", "0.5514891", "0.5496331", "0.5482612", "0.5456424", "0.5424884", "0.54202515", "0.54103106", "0.5408127", "0.54045814", "0.5402689", "0.5342533", "0.53220767", "0.5318103", "0.53128177", "0.53098524", "0.52914447", "0.52849567", "0.52794796", "0.52623683", "0.5258495", "0.525797", "0.52546376", "0.52501124", "0.5245543", "0.5242876", "0.5219794", "0.521939", "0.52176994", "0.52137953", "0.52034616", "0.5195444", "0.5194989", "0.519361", "0.51832813", "0.51720905", "0.5162045", "0.5161607", "0.5159678", "0.51550305", "0.514483", "0.51429313", "0.5138011", "0.51191694", "0.5113953", "0.51031464", "0.5102274", "0.50985307", "0.5097451", "0.5085743", "0.5083052", "0.5071909", "0.5048862", "0.50449", "0.5039107", "0.50348175", "0.50336194" ]
0.743226
1
Write writes event response to file in json format
func (l *JSONFileWriter) Write(e *client.EventsResponse) error { // do not log if there is no events if len(e.Events) == 0 { return nil } b, err := json.Marshal(e) if err != nil { return err } fmt.Println("un", string(b)) if _, err = l.f.Write(b); err != nil { return err } _, err = l.f.Write([]byte("\n")) return err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func writeEvent(filename string, event *Event) (error) {\n\t_, err := os.Stat(filename)\n\tif os.IsNotExist(err) {\n\t\t_, err := os.Create(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t}\n\tfile, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tdata, _ := json.MarshalIndent(event, \"\", \"\\t\")\n\t_, err = file.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func writeTimeEventsToFile(tmEvntRqsts Request, strOutputFileName string) string {\n\tpunchJSON, _ := json.MarshalIndent(tmEvntRqsts, \"\", \" \")\n\t// create the output file\n\toutFile, outFileError := os.Create(strOutputFileName)\n\tif outFileError != nil {\n\t\tlog.Fatal(outFileError)\n\t}\n\tdefer outFile.Close()\n\toutFile.Write(punchJSON)\n\treturn strOutputFileName\n}", "func Write(w http.ResponseWriter, e *Event) error {\n\t_, err := e.WriteTo(w)\n\tw.(http.Flusher).Flush()\n\treturn err\n}", "func write(resp *Response, w http.ResponseWriter) {\n\tjs, _ := json.Marshal(resp)\n\tfmt.Fprint(w, string(js))\n}", "func (ar *AppendEntriesResponse) Write(w http.ResponseWriter) error {\n b, err := json.Marshal(ar)\n if err != nil {\n return err\n }\n w.Write(b)\n return nil\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func writeJSON(w http.ResponseWriter, statusCode int, response interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\n\tencoder := json.NewEncoder(w)\n\tencoder.Encode(response)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeJSONResponse(w http.ResponseWriter, r *http.Request){\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tcustomResponse := &CustomResponse{\n\t\tPlayer: \"Stephen Curry\",\n\t\tRole: []string{\"Guard\", \"Captain\", \"Face of the team\"},\n\t}\n\tjson, _ := json2.Marshal(customResponse)\n\tw.Write(json)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (s *Server) writeJSON(w http.ResponseWriter, data []byte) {\n\tw.Header().Set(contentType, jsonContentType)\n\t_, err := w.Write(data)\n\tif err != nil {\n\t\ts.logger.Errorf(\"error writing to response: %v\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func writeJSON(w http.ResponseWriter, v interface{}) error {\n\t// Indent the JSON so it's easier to read for hackers.\n\tdata, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"content-type\", \"application/json; charset=utf-8\")\n\t_, err = w.Write(data)\n\treturn err\n}", "func writeJSONResponse(w http.ResponseWriter, body interface{}) {\n\tbs, err := json.Marshal(body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif _, err := w.Write(bs); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func writeJSONResponse(w http.ResponseWriter, body interface{}) {\n\tbs, err := json.Marshal(body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif _, err := w.Write(bs); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (app *application) writeJSON(w http.ResponseWriter, status int, data envelope, headers http.Header) error {\n\t// Encode the data to JSON, return error if any.\n\tjs, err := json.MarshalIndent(data, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Append a newline to make it easier to view in terminal applications.\n\tjs = append(js, '\\n')\n\n\t// Loop through the header map and add each header to the http.ResponseWriter header map.\n\tfor key, value := range headers {\n\t\tw.Header()[key] = value\n\t}\n\n\t// Add the \"Content-Type: application/json\" header.\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t// Write status code.\n\tw.WriteHeader(status)\n\tw.Write(js)\n\n\treturn nil\n}", "func (app *application) writeJSON(w http.ResponseWriter, status int, data interface{}) error {\n\tjs, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs = append(js, '\\n')\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(js)\n\n\treturn nil\n}", "func (ce CustomEvent) WriteJSON() logger.JSONObj {\n\treturn logger.JSONObj{\n\t\t\"userID\": ce.UserID,\n\t\t\"sessionID\": ce.SessionID,\n\t\t\"context\": ce.Context,\n\t}\n}", "func writeJSON(w http.ResponseWriter, data JSON) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(data)\n}", "func writeJSON(w http.ResponseWriter, code int, v interface{}) error {\n\t// Set content type as json\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t// write the HTTP status code\n\tw.WriteHeader(code)\n\n\t// Write the Json output\n\treturn json.NewEncoder(w).Encode(v)\n}", "func writeFile(path string, data []byte) {\n\tfile, _ := os.Create(path)\n\tdefer file.Close()\n\tfile.Write(data)\n\tfmt.Println(\"JSON generate to \" + path + \" complete.\")\n\tfmt.Println()\n}", "func writeJSON(w http.ResponseWriter, data interface{}) {\n\tb, err := json.Marshal(data)\n\tif err != nil {\n\t\twriteErr(w, 500, err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(b)\n}", "func WriteFileJSON(t *testing.T, path string, data interface{}) {\n\tjsn := helper.PrettyJSON(data)\n\n\tWriteFile(t, path, jsn)\n\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func writeJsonResponse(w http.ResponseWriter, content *[]byte, code int) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(*content)\n}", "func writeJSON(w http.ResponseWriter, status int, data mapStringInterface) error {\n\tjs, err := json.Marshal(data)\n\t//js, err := json.MarshalIndent(data, \"\", \"\\t\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs = append(js, '\\n')\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(js)\n\treturn nil\n}", "func writeJSONFile(json []byte) {\n\terr := ioutil.WriteFile(\"output.json\", json, 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func writeJSON(w http.ResponseWriter, o interface{}) {\n\tw.WriteHeader(http.StatusOK)\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\te.Encode(o)\n}", "func writeJSONFile(jsonOutputPath string, writeChannel <-chan map[string]string, done chan<- bool, pretty bool) {\n\t// Init a JSON writer func\n\twriteString := createStringWriter(jsonOutputPath)\n\t// Init the JSON parse func and the breakline char\n\tjsonFunc, breakLine := getJSONFunc(pretty)\n\n\t//Info log...\n\tfmt.Println(\"Writing JSON file...\")\n\n\t// Write the first character of JSON file, starting with \"[\" since it will always generate\n\t// and array of records\n\twriteString(\"[\"+breakLine, false)\n\n\tfirst := true\n\n\tfor {\n\t\t// Waiting for records pushed into writerChannel\n\t\trecord, more := <-writeChannel\n\n\t\t// If the channel is \"open\" for more transmission\n\t\tif more {\n\t\t\t// If it is NOT the first record, break the line\n\t\t\tif !first {\n\t\t\t\twriteString(\",\"+breakLine, false)\n\t\t\t\t// otherwise don't break the line\n\t\t\t} else {\n\t\t\t\tfirst = false\n\t\t\t}\n\t\t\t// Parse the record into JSON\n\t\t\tjsonData := jsonFunc(record)\n\t\t\t// Writing the JSON string with the writer function\n\t\t\twriteString(jsonData, false)\n\t\t\t// If here, then no more records to parse and need to close the file\n\t\t} else {\n\t\t\t// Writing the last char to the file and close it\n\t\t\twriteString(breakLine+\"]\", true)\n\t\t\t// Print that we are done to terminal\n\t\t\tfmt.Printf(\"Done!\\nOutput JSON written to: %s\", jsonOutputPath)\n\t\t\t// Send \"done\" signal to main func to let it know it can start exiting\n\t\t\tdone <- true\n\t\t\t// Break out of the loop\n\t\t\tbreak\n\t\t}\n\t}\n}", "func writeJSONResponse(payload interface{}, statusCode int, w http.ResponseWriter) {\n\t// Headers must be set before call WriteHeader or Write. see https://golang.org/pkg/net/http/#ResponseWriter\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\tif payload != nil {\n\t\ttypes.PanicIfError(json.NewEncoder(w).Encode(payload))\n\t}\n\n\tlogrus.Infof(\"%d Response sent. Payload: %#v\", statusCode, payload)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *GetEventsEventIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (a *API) writeJSON(w http.ResponseWriter, r *http.Request, code int, object interface{}) {\n\tjsonObject, err := json.Marshal(object)\n\tif err != nil {\n\t\ta.log(r).Warnf(\"Failed to encode json response: %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(jsonObject)\n\tif err != nil {\n\t\ta.log(r).Warnf(\"Failed to write response: %s\", err)\n\t}\n}", "func respondJSON (output interface{}, w http.ResponseWriter, r *http.Request) {\n // Convert Message to JSON\n jsonResponse, _ := json.MarshalIndent(output, \"\", \"\\t\")\n \n // Indicate to browser file is to be downloaded and serve\n w.Header().Add(\"Content-Disposition\", \"Attachment;filename=response\")\n w.WriteHeader(http.StatusCreated)\n w.Write(jsonResponse) \n}", "func writeResponse(w http.ResponseWriter, body interface{}, e error) error {\n\tvar (\n\t\tpayload []byte\n\t\tresponse Response\n\t\terr error\n\t)\n\tresponse = Response{\n\t\tResult: body,\n\t\tError: ResponseError{},\n\t}\n\tif e != nil {\n\t\tresponse.Error.Error = fmt.Sprintf(\"%v\", e)\n\t}\n\tpayload, err = json.MarshalIndent(response, \"\", \"\\t\")\n\n\tif !util.ErrorCheck(err) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(payload)\n\t}\n\treturn err\n}", "func (h *Handler) WriteResult(w http.ResponseWriter, result interface{}) error {\n\toutput, err := json.Marshal(result)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not serialize event result (%w)\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif _, err := w.Write(output); err != nil {\n\t\treturn fmt.Errorf(\"could not write event result (%w)\", err)\n\t}\n\treturn nil\n}", "func SaveToFile() error {\n\tdata, err := json.Marshal(Events)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(eventsFilename, data, 0644)\n}", "func writeJSONResponse(w http.ResponseWriter, status int, data []byte) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(data)))\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\tw.WriteHeader(status)\n\tw.Write(data)\n}", "func WriteJSON(w http.ResponseWriter, data model.APIData) {\n\tdata.GenerateMetaData()\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\terr := json.NewEncoder(w).Encode(model.JSONResponse{Data: data})\n\tif err != nil && err != http.ErrHandlerTimeout {\n\t\tpanic(err)\n\t}\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func WriteJson(w http.ResponseWriter, payload JsonSerializable, statusCode int, logger FileLogger) {\n\tjsonPayload, err := payload.json()\n\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\tw.Write([]byte(err.Error()))\n\t}\n\n\tw.WriteHeader(statusCode)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(jsonPayload)\n}", "func write_file(m map[string]string, filename string) {\n jsonString, err := json.Marshal(m)\n if err != nil {\n fmt.Println(err)\n }\n\n err = ioutil.WriteFile(filename, jsonString, 0644)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func writeJSON(w http.ResponseWriter, thing interface{}, indent string) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tencoder := json.NewEncoder(w)\n\tencoder.SetIndent(\"\", indent)\n\tif err := encoder.Encode(thing); err != nil {\n\t\tapiLog.Warnf(\"JSON encode error: %v\", err)\n\t}\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (r *Response) Write(w io.Writer) error", "func writeJSONFile(t tree, filename string) {\n\t// marshal data retrieved into JSON\n\tdata, err := json.Marshal(t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error marshaling data for JSON => {%s}\", err)\n\t}\n\n\t// write data into file\n\terr = ioutil.WriteFile(filename, data, os.ModePerm)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to write json data to file, %s => {%s}\", destJSON, err)\n\t}\n}", "func writeJSON(w http.ResponseWriter, v interface{}, status int) error {\n\t// set application/json header\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(status)\n\n\t// json encoder\n\tvar encoder = json.NewEncoder(w)\n\n\t// encodes interface to json\n\treturn encoder.Encode(v)\n}", "func write(w http.ResponseWriter, status int, payload interface{}) {\n\tw.WriteHeader(status)\n\traw, err := json.Marshal(payload)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, _ = w.Write(raw)\n}", "func (server *testHTTPServerImpl) WriteJSONResponse(w http.ResponseWriter, jsonData []byte) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(contentTypeHeaderName, \"application/json; charset=utf-8\")\n\t_, err := w.Write(jsonData)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to json repsonse; %s\", err)\n\t}\n}", "func (c *connection) writeJson(message msg) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\tfmt.Println(\"=>\", message)\n\treturn c.ws.WriteJSON(message)\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func WriteJSON(w http.ResponseWriter, v interface{}) error {\n\tdata, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\treturn &errcode.HTTPErr{Status: http.StatusInternalServerError, Err: err}\n\t}\n\n\tw.Header().Set(\"content-type\", \"application/json; charset=utf-8\")\n\n\t_, err = w.Write(data)\n\treturn err\n}", "func writeJSON(w http.ResponseWriter, data interface{}) error {\n\tif err, ok := data.(error); ok {\n\t\tdata = struct{ Error string }{err.Error()}\n\t\tw.WriteHeader(400)\n\t}\n\to, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(o)\n\treturn err\n}", "func Write(w http.ResponseWriter, result interface{}, status int) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tjson.NewEncoder(w).Encode(result)\n}", "func writeJSON(w http.ResponseWriter, code int, value interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tjson.NewEncoder(w).Encode(value)\n}", "func (e *eventRecorder) Write(b []byte) (int, error) {\n\tn, err := e.ResponseWriter.Write(b)\n\te.event.Size += int64(n)\n\treturn n, err\n}", "func WriteNewResponse(w *http.ResponseWriter, r *http.Request, response JSONResponse) {\n\t// Echo back message\n\tresponse.Write(w, r)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func writeFile(v string) {\n\t// 打开文件\n\tfilePtr, err := os.OpenFile(\"mqtt.json\", os.O_CREATE|os.O_WRONLY, os.ModePerm)\n\tif err != nil {\n\t\tmqtt.ERROR.Println(err)\n\t}\n\n\tdefer filePtr.Close()\n\n\ttype Data struct {\n\t\tDeviceID string `JSON:\"deviceID\"` //设备id\n\t\tTimestamp string `JSON:\"timestamp\"` //时间戳\n\t\tFields map[string]string `JSON:\"fields\"` //标签\n\t}\n\tvar data Data\n\tif err := json.Unmarshal([]byte(v), &data); err == nil {\n\n\t\t// 创建Json编码器\n\t\tencoder := json.NewEncoder(filePtr)\n\t\terr = encoder.Encode(data)\n\t\tif err != nil {\n\t\t\tmqtt.ERROR.Println(\"writeFile failed\", err.Error())\n\t\t} else {\n\t\t\tmqtt.ERROR.Println(\"writeFile success\")\n\t\t}\n\t} else {\n\t\tmqtt.ERROR.Println(err)\n\t}\n\n}", "func (s *SubmissionHandler) writeJSON(i interface{}) error {\n\tif e := util.WriteJSON(s.conn, i); e != nil {\n\t\treturn e\n\t}\n\t_, e := s.conn.Write([]byte(util.EOT))\n\treturn e\n}", "func WriteJSON(file string, data interface{}) error {\n\tcontent, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfd, err := os.Create(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fd.Close()\n\tif _, err = fd.Write(content); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *Mouse) WriteJSON() {\n\tvar flags int\n\tif FileExists() {\n\t\tos.Truncate(\"mouse.json\", 0)\n\t\tflags = os.O_WRONLY\n\t} else {\n\t\tflags = os.O_WRONLY | os.O_CREATE\n\t}\n\tfile, _ := os.OpenFile(\"mouse.json\", flags, 0755)\n\tdefer file.Close()\n\tjson.NewEncoder(file).Encode(m)\n}", "func (handler Handler) WriteJSON(w http.ResponseWriter, v interface{}, statusCode int) error {\n\tif !handler.wroteHeader {\n\t\thandler.WriteJSONHeader(w)\n\t\tw.WriteHeader(statusCode)\n\t}\n\n\tb, err := handler.EncodeJSON(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (fs *OneFile) Write(name, event string) (int, error) {\n\tvar err error\n\n\tif !strings.HasSuffix(event, \"\\n\") {\n\t\tevent = event + \"\\n\"\n\t}\n\n\tn, err := fs.r.Write([]byte(event))\n\tif err != nil {\n\t\treturn n, errors.Wrap(err, \"fs.file.write\")\n\t}\n\treturn n, nil\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func WriteJSON(w http.ResponseWriter, v interface{}, status int) {\n\tp, err := json.Marshal(&v)\n\tif err != nil {\n\t\tJSONError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(p)\n}", "func (c *connection) writeJSON(payload Payload) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\treturn c.ws.WriteJSON(payload)\n}", "func WriteJSONResponse(w http.ResponseWriter, val interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(val)\n}", "func JsonFileLogger(out SuperMarketLog) error {\n\toutput, _ := json.Marshal(out) // Create he output to log\n\tstringOutput := string(output) + \"\\n\" // Append a newline to the output\n\t//If the file doesn't exist, create it or append to the file\n\tf, err := os.OpenFile(\"rest.log\", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif _, err := f.Write([]byte(stringOutput)); err != nil { //Write out to the log\n\t\tlog.Fatal(err)\n\t}\n\tif err := f.Close(); err != nil { //Close the writer\n\t\tlog.Fatal(err)\n\t}\n\treturn err\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func WriteJSON(w http.ResponseWriter, data interface{}, status int) error {\n\tb, e := json.Marshal(data)\n\tif e != nil {\n\t\treturn sdk.WrapError(e, \"WriteJSON> unable to marshal : %v\", e)\n\t}\n\treturn Write(w, b, status, \"application/json\")\n}", "func (file *File) WriteJSON(object interface{}) error {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn file.write(data)\n}", "func (app *application) writeJSON(w http.ResponseWriter, status int, data interface{}, headers http.Header) error {\n\t// Encode the data to JSON, returning the error if there was one.\n\tjs, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Append a newline to make it easier to view in terminal applications.\n\tjs = append(js, '\\n')\n\n\t// At this point, we know that we won't encounter any more errors before writing the\n\t// response, so it's safe to add any headers that we want to include. We loop\n\t// through the header map and add each header to the http.ResponseWriter header map.\n\t// Note that it's OK if the provided header map is nil. Go doesn't throw an error\n\t// if you try to range over (or generally, read from) a nil map.\n\tfor key, value := range headers {\n\t\tw.Header()[key] = value\n\t}\n\n\t// Add the \"Content-Type: application/json\" header, then write the status code and\n\t// JSON response.\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(js)\n\n\treturn nil\n}", "func WriteJSONResponse(w http.ResponseWriter, status int, value interface{}) {\n\tdata, _ := json.Marshal(value)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(data)))\n\tw.WriteHeader(status)\n\tw.Write(data)\n}", "func (o *PostEventDefault) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(500)\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func WriteJSON(w http.ResponseWriter, data interface{}) error {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tb, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn err\n\t}\n\tw.Write(b)\n\treturn nil\n}", "func WriteJSON(w http.ResponseWriter, v interface{}) error {\n\tjsonReply, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t_, err = w.Write(jsonReply)\n\treturn err\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func WriteJSON(outputPath string, data interface{}) error {\n\tvar b bytes.Buffer\n\tencoder := json.NewEncoder(&b)\n\tif err := encoder.Encode(data); err != nil {\n\t\tlog.Error(\"Error while Encoding object\")\n\t\treturn err\n\t}\n\terr := ioutil.WriteFile(outputPath, b.Bytes(), DefaultFilePermission)\n\tif err != nil {\n\t\tlog.Errorf(\"Error writing json to file: %s\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func Write(w http.ResponseWriter, result interface{}, status int) error {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\treturn json.NewEncoder(w).Encode(result)\n}", "func WriteJSON(file string, content interface{}, args ...interface{}) error {\n\tbody, err := json.Marshal(content)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn Write(file, string(body), args...)\n}", "func WriteJson(w http.ResponseWriter, object interface{}) {\n\tWriteResponse(w, object, nil)\n}", "func (e *EJ) Write(data interface{}) error {\n\n\tvalue, err := json.Marshal(data)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"json marshal: %s\", err.Error())\n\t}\n\n\tif err := e.jsonHandler.Write(value); err != nil {\n\t\treturn fmt.Errorf(\"write: %s\", err.Error())\n\t}\n\n\treturn nil\n}", "func writeSuccessResponseJSON(w http.ResponseWriter, response []byte) {\n\twriteResponse(w, http.StatusOK, response, mimeJSON)\n}", "func (h *JSONWriter) Write(w http.ResponseWriter, r *http.Request, e interface{}) {\n\th.WriteCode(w, r, http.StatusOK, e)\n}", "func writeJSON(w http.ResponseWriter, obj interface{}) {\n\tif json.NewEncoder(w).Encode(obj) != nil {\n\t\thttp.Error(w, \"Failed to encode response\", http.StatusInternalServerError)\n\t}\n}", "func WriteJSON(res *restful.Response, code int, payload interface{}) {\n\n\t// response, err := json.Marshal(payload)\n\t// if err != nil {\n\t// \tInternalServerErrorResponse(w, err)\n\t// } else {\n\n\t_ = res.WriteHeaderAndJson(code, payload, \"application/json\")\n\t// w.Header().Set(\"Content-Type\", )\n\t// w.WriteHeader(code)\n\t// w.Write(response)\n\t// }\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func writeResponse(logger *log.Logger, w http.ResponseWriter, status int, content string) {\n\tw.WriteHeader(status)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tresp := Response{\n\t\tStatus: http.StatusText(status),\n\t\tContent: content,\n\t}\n\n\terr := json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlogger.Printf(\"could not encode response: %v\", err)\n\t}\n}", "func (s Status) WriteJSON(w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(s.Code)\n\t_, err := fmt.Fprintf(w, `{\"error\":%q}`, s.String())\n\treturn err\n}", "func Write(w http.ResponseWriter, data interface{}, statusCode int) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.WriteHeader(statusCode)\n\n\t// For now, we're assuming json.Marshal succeeds...\n\tmarshalledData, _ := json.Marshal(data)\n\tw.Write(marshalledData)\n}", "func (response *JSONResponse) Write(w *http.ResponseWriter, r *http.Request) {\n\t// Deref writer\n\twriter := *w\n\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\n\t// Add string Status if it doesn't exist, add appropriate headers\n\tif response.OK {\n\t\tif response.Status == \"\" {\n\t\t\tresponse.Status = \"success\"\n\t\t}\n\t\twriter.WriteHeader(http.StatusOK)\n\t} else {\n\t\tif response.Status == \"\" {\n\t\t\tresponse.Status = \"fail\"\n\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t} else if response.Status == \"error\" {\n\t\t\twriter.WriteHeader(http.StatusNoContent)\n\t\t} else {\n\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t}\n\t}\n\n\t// Log this to debug\n\tlog.Debug().\n\t\tStr(\"Path\", r.URL.Path).\n\t\tStr(\"Method\", r.Method).\n\t\tStr(\"Output\", fmt.Sprintf(\"%v\", response.Output)).\n\t\tStr(\"Status\", response.Status).\n\t\tBool(\"OK\", response.OK).\n\t\tMsg(\"Full Response:\")\n\n\t// Write out this response\n\tjson.NewEncoder(writer).Encode(response.Output)\n}", "func (o *CreateFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}" ]
[ "0.74765354", "0.67054415", "0.65498316", "0.64659226", "0.6440549", "0.6394546", "0.63838637", "0.6372237", "0.6349943", "0.6326542", "0.63255614", "0.6323442", "0.63022256", "0.6286735", "0.6286735", "0.6261201", "0.6259819", "0.62488407", "0.62475145", "0.61788714", "0.61557037", "0.6149556", "0.61494225", "0.6145607", "0.6130164", "0.6121488", "0.6111957", "0.6098527", "0.60747945", "0.60744977", "0.60732406", "0.6065464", "0.60620767", "0.6058165", "0.6044843", "0.6043863", "0.60233253", "0.60221857", "0.6016953", "0.599149", "0.59894454", "0.59771204", "0.5965978", "0.59640723", "0.5946334", "0.59453416", "0.5939233", "0.5937188", "0.59308213", "0.5929745", "0.59292644", "0.59241146", "0.59104496", "0.59014004", "0.5899309", "0.5894777", "0.587213", "0.58696485", "0.5868923", "0.5867067", "0.5864197", "0.5851313", "0.58379877", "0.5828216", "0.5820949", "0.581411", "0.5811673", "0.58099973", "0.5790868", "0.5788432", "0.577401", "0.576566", "0.57632184", "0.57585365", "0.5750492", "0.57480645", "0.5734961", "0.5726134", "0.5725721", "0.57230663", "0.5722215", "0.5721713", "0.5718981", "0.5717542", "0.5715985", "0.5701718", "0.5678387", "0.567679", "0.5666095", "0.5663242", "0.5659763", "0.5658716", "0.5653246", "0.56501836", "0.5649279", "0.5648322", "0.56263644", "0.5622201", "0.5615416", "0.5602921" ]
0.8088646
0
Close closes the File. It returns an error, if any.
func (l *JSONFileWriter) Close() error { return l.f.Close() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func FileClose(f *os.File,) error", "func (ff *File) Close() error {\n\treturn nil\n}", "func (f *realFile) Close() error { return f.file.Close() }", "func (ff failingFile) Close() error {\n\treturn nil\n}", "func (t *File) Close() error {\n\tt.closed = true\n\treturn t.file.Close()\n}", "func (f *file) Close() error {\n\treturn nil\n}", "func (f *File) Close() error {\n\treturn nil\n}", "func (f *File) Close() error {\n\treturn nil\n}", "func Close(f *os.File) error {\n\treturn f.Close()\n}", "func (lf *File) Close() error {\n\tlf.mutex.Lock()\n\tdefer lf.mutex.Unlock()\n\n\tif lf.file != nil {\n\t\terr := lf.file.Close()\n\t\tlf.file = nil\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (f *File) Close() error {\n\tvar err error\n\tif f.closer != nil {\n\t\terr = f.closer.Close()\n\t\tf.closer = nil\n\t}\n\treturn err\n}", "func (f *File) Close() error {\n\treturn errUnsupported\n}", "func (cf *closeableFile) Close() error {\n\tcf.mu.Lock()\n\tdefer cf.mu.Unlock()\n\t// Sanity check - close should not have been called yet.\n\tif cf.closed {\n\t\tcf.staticOptions.Critical(\"cannot close the file; already closed\")\n\t}\n\n\t// Ensure that all data has actually hit the disk.\n\tif err := cf.File.Sync(); err != nil {\n\t\treturn err\n\t}\n\tcf.closed = true\n\treturn cf.File.Close()\n}", "func (f *File) Close() error {\n\tif f.IsClosed {\n\t\treturn os.ErrClosed\n\t}\n\n\tf.IsClosed = true\n\n\treturn nil\n}", "func (s *Stream) Close() error {\n\treturn s.file.Close()\n}", "func (f *File) Close() {\n\tf.src.Close()\n}", "func CloseFile(f *os.File) {\n\terr := f.Close()\n\tMustCheck(err)\n}", "func (r *RLockedFile) Close() (err error) {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif r.refs == 0 {\n\t\treturn os.ErrInvalid\n\t}\n\n\tr.refs--\n\tif r.refs == 0 {\n\t\terr = r.File.Close()\n\t}\n\n\treturn err\n}", "func (self *SAMFile) Close() error {\n\tif self == nil {\n\t\treturn nil\n\t}\n\treturn self.samClose()\n}", "func (h *Handler) Close() error {\n\t// close os.File\n\treturn h.file.Close()\n}", "func (r *RTC) Close() error {\n\treturn r.file.Close()\n}", "func (f *File) Close(_ context.Context) error {\n\tif f.f == nil {\n\t\tif f.closed {\n\t\t\treturn filestore.ErrClosed\n\t\t}\n\t\treturn filestore.ErrNotOpened\n\t}\n\tfile := f.f\n\tf.f = nil\n\tf.closed = true\n\terr := file.Close()\n\tif err != nil {\n\t\tif errors.Is(err, os.ErrClosed) {\n\t\t\treturn filestore.ErrClosed\n\t\t}\n\t\treturn filestore.ErrInternal\n\t}\n\treturn nil\n}", "func (s *Fs) Close() error {\n\treturn nil\n}", "func (f *FATFile) Close() error {\n\treturn nil\n}", "func CloseFile(file closer) {\n\terr := file.Close()\n\tCheck(err)\n}", "func (f *File) Close() error {\n\tif f.tempFile != nil {\n\t\tdefer f.tempFile.Close()\n\n\t\terr := os.Remove(f.tempFile.Name())\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\n\t\tf.tempFile = nil\n\t}\n\n\tif f.writeBuffer != nil {\n\n\t\thandle, err := f.getObjectHandle()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tctx, cancel := context.WithCancel(f.fileSystem.ctx)\n\t\tdefer func() { cancel() }()\n\t\tw := handle.NewWriter(ctx)\n\t\tdefer w.Close()\n\t\tif _, err := io.Copy(w, f.writeBuffer); err != nil {\n\t\t\t//cancel context (replaces CloseWithError)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tf.writeBuffer = nil\n\treturn nil\n}", "func (p *LocalFile) Close() error {\n\tif p.f != nil {\n\t\treturn p.f.Close()\n\t}\n\tp.f = nil\n\treturn nil\n}", "func (p *LocalFile) Close() error {\n\tif p.f != nil {\n\t\treturn p.f.Close()\n\t}\n\tp.f = nil\n\treturn nil\n}", "func (m *FileSource) Close() error { return nil }", "func (f *File) Close() error {\n\treturn syscall.Close(f.lockfd)\n}", "func closeFile(f *os.File) error {\n\tif f != nil {\n\t\tif err := f.Close(); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (t *T) Close() {\n\tt.file.Close()\n}", "func (f *File) Close() error {\n\tf.mu.Lock()\n\tif f.rc != nil {\n\t\tclose(f.rc)\n\t\tf.rc = nil\n\t}\n\tf.mu.Unlock()\n\tif v, ok := f.r.(io.ReadCloser); ok {\n\t\treturn v.Close()\n\t}\n\treturn ErrNotSupported\n}", "func (i *Item) Close() error {\n\treturn i.File.Close()\n}", "func (*mockFile) Close() error {\n\treturn nil\n}", "func (fod fileOrDir) Close() (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = translateErr(err)\n\t\t}\n\t}()\n\tif fod.file != nil {\n\t\terr = fod.file.Close()\n\t}\n\tif fod.dir != nil {\n\t\tfod.dir.node = nil\n\t}\n\tfod.file = nil\n\tfod.dir = nil\n\treturn err\n}", "func (c *fileClient) Close() error {\n\treturn errNotImplemented.New(Kind)\n}", "func (s *FileSource) Close() error {\n\treturn nil\n}", "func (f *File) Close() error {\n\n\tf.closeFileHandles()\n\tf.modTime = defaultTime\n\n\treturn os.RemoveAll(f.dir)\n}", "func (f *File) Close() error {\n\tif f.Body != nil {\n\t\tif asCloser, ok := f.Body.(io.ReadCloser); ok {\n\t\t\treturn asCloser.Close()\n\t\t}\n\t}\n\treturn nil\n}", "func (r *RotatingFile) Close() error {\n\tdefer r.lock.Unlock()\n\tr.lock.Lock()\n\tsignal.Stop(r.signalChannel)\n\treturn r.file.Close()\n}", "func (fc *fileCloser) Close() error {\n\terr := fc.Reader.Close() // usually a nopcloser, but sometimes state cleanup.\n\terr2 := fc.f.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err2\n}", "func (f *assetFile) Close() error {\n\treturn nil\n}", "func (fp *File) Close() error {\n\terr := fp.Writer.Close()\n\tif err != nil {\n\t\tfp.Fp.Close()\n\t\treturn err\n\t}\n\treturn fp.Fp.Close()\n}", "func (spi *SPI) Close() error {\n\treturn spi.file.Close()\n}", "func (file *File) Close() (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tos.Remove(file.tmpfile.Name())\n\t\t\tfile.aborted = true\n\t\t}\n\t}()\n\n\tif file.closed {\n\t\terr = errors.New(\"close on closed file\")\n\t\treturn\n\t}\n\tif file.aborted {\n\t\terr = errors.New(\"close on aborted file\")\n\t\treturn\n\t}\n\n\tif err = file.tmpfile.Close(); err != nil {\n\t\treturn\n\t}\n\n\terr = os.Rename(file.tmpfile.Name(), file.name)\n\n\tfile.closed = true\n\treturn\n}", "func closeFile(outfile *os.File) {\n\terr := outfile.Close()\n\tif err != nil {\n\t\tlog.Printf(\"error closing %s: %v\\n\", outfile.Name(), err)\n\t\tos.Exit(1)\n\t}\n}", "func (this *File) Close() error {\n\tthis.log.Debug(\"<persistence.File>Close{ path=%v }\", strconv.Quote(this.path))\n\n\t// Stop all tasks\n\tif err := this.Tasks.Close(); err != nil {\n\t\treturn err\n\t}\n\n\t// Success\n\treturn nil\n}", "func closeFile(f *os.File) {\n\t_ = f.Close()\n}", "func (t *TarFile) Close() error {\n\terr := t.Writer.Close()\n\treturn err\n}", "func (f *SeriesFile) Close() error {\n\tdefer os.RemoveAll(f.Path())\n\treturn f.SeriesFile.Close()\n}", "func (l *Logger) Close() error {\n\treturn l.file.Close()\n}", "func (l *FileLog) Close() error {\n\tl.rw.Lock()\n\tdefer l.rw.Unlock()\n\n\tvar err error\n\tif l.file != nil {\n\t\terr = l.file.Close()\n\t\tl.file = nil\n\t}\n\treturn err\n}", "func (tf *Temp) Close() error {\n\ttf.Lock()\n\tdefer tf.Unlock()\n\n\tif err := tf.file.Close(); err != nil {\n\t\treturn ex.New(err)\n\t}\n\tif err := os.Remove(tf.file.Name()); err != nil {\n\t\treturn ex.New(err)\n\t}\n\treturn nil\n}", "func CloseLogFile() error {\n\treturn file.Close()\n}", "func (directory) Close() error { return nil }", "func (fs *FileStream) Close() error {\n\tfs.f = nil\n\tfs.size = 0\n\treturn nil\n}", "func (fi *File) Close() error {\n\tfi.Lock()\n\tif fi.hasChanges {\n\t\terr := fi.mod.Sync()\n\t\tif err != nil {\n\t\t\tfi.Unlock()\n\t\t\treturn err\n\t\t}\n\n\t\tfi.hasChanges = false\n\n\t\t// explicitly stay locked for flushUp call,\n\t\t// it will manage the lock for us\n\t\treturn fi.flushUp()\n\t}\n\tfi.Unlock()\n\n\treturn nil\n}", "func (fm *FileIORWManager) Close() (err error) {\n\treturn fm.fd.Close()\n}", "func (l *FileLog) Close() (err error) {\n\tl.Lock()\n\tdefer l.Unlock()\n\n\tif err = l.file.Close(); err != nil {\n\t\treturn err\n\t}\n\tl.file = nil\n\treturn nil\n}", "func (f *SeriesFile) Close() {\n\tdefer os.RemoveAll(f.Path())\n\tif err := f.SeriesFile.Close(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (writer *FileWriter) Close() error {\n\treturn writer.file.Close()\n}", "func (f *File) Close() {\n\tf.handleConcatenatedFiles()\n\tif f.AddInitPy {\n\t\tif err := f.AddInitPyFiles(); err != nil {\n\t\t\tlog.Fatalf(\"%s\", err)\n\t\t}\n\t}\n\tif err := f.w.Close(); err != nil {\n\t\tlog.Fatalf(\"Failed to finalise zip file: %s\", err)\n\t}\n\tif err := f.f.Close(); err != nil {\n\t\tlog.Fatalf(\"Failed to close file: %s\", err)\n\t}\n}", "func (f MemFile) Close() error {\n\treturn nil\n}", "func (f *File) Close() error {\n\tf.l.Lock()\n\tdefer f.l.Unlock()\n\tif f.closed {\n\t\treturn nil\n\t}\n\tf.closed = true\n\tif f.f != nil {\n\t\tf.closeFile()\n\t}\n\tif f.sig != nil {\n\t\tsignal.Stop(f.sig)\n\t\tclose(f.sig)\n\t\tf.sig = nil\n\t}\n\tif f.pidfile != \"\" {\n\t\tos.Remove(f.pidfile)\n\t}\n\treturn nil\n}", "func (s *FileStore) Close() error {\n\treturn nil\n}", "func (r *Reader) Close() error {\n\tatomic.StoreInt32(&r.closed, 1)\n\treturn r.file.Close()\n}", "func (x *Reader) Close() error {\n\tx.Reader = nil\n\tif x.File != nil {\n\t\tif err := x.File.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tx.File = nil\n\t}\n\treturn nil\n}", "func (o *handler) Close() {\r\n\tif o.f != nil {\r\n\t\tif err := o.f.Close(); err != nil {\r\n\t\t\tfmt.Printf(\"ERROR closing file: %s\", err)\r\n\t\t}\r\n\t\to.f = nil\r\n\t}\r\n}", "func CloseFile() error {\n\tlogrus.SetOutput(os.Stdout)\n\n\tif logFile != nil {\n\t\treturn logFile.Close()\n\t}\n\treturn nil\n}", "func (mf TMMFile) Close() error {\n\te := int(C.munmap_binfile(castGoTMMFileToC(mf)))\n\tif e != 0 {\n\t\treturn fmt.Errorf(\"got %d error while unmapping the file\", e)\n\t}\n\treturn nil\n}", "func (f *FileHandler) Close() {}", "func (e *ExcelFile) Close() error {\n\treturn e.zipFile.Close()\n}", "func (l *Logger) closeFile() (err error) {\n\tif l.f == nil {\n\t\t// File does not exist - no need to close, return\n\t\treturn\n\t}\n\n\t// Get current file's name, we need this for post-close actions\n\tname := l.f.Name()\n\n\t// Flush contents\n\tif err = l.flush(); err != nil {\n\t\treturn\n\t}\n\n\t// Close file\n\tif err = l.f.Close(); err != nil {\n\t\treturn\n\t}\n\n\t// Set file to nil\n\tl.f = nil\n\t// Set buffer to nil\n\tl.w = nil\n\n\tif l.count == 0 {\n\t\t// File has no contents, remove file within a gorotuine\n\t\tgo os.Remove(name)\n\t} else if l.onRotate != nil {\n\t\t// File has been rotated & onRotate func is set, call on on rotate func within a gorotuine\n\t\tgo l.onRotate(name)\n\t}\n\n\tl.count = 0\n\treturn\n}", "func closeFile(file io.Closer) {\n\tif err := file.Close(); err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"Error: %s\\n\", err)\n\t}\n}", "func (r *Reader) Close() error {\n\treturn r.imageFile.Close()\n}", "func (dv DomVisit) Close() {\n\tif dv.badFile != nil {\n\t\terr := dv.badFile.Close()\n\t\tcheck(err)\n\t}\n}", "func (z *zipFile) Close() error {\n\terr := z.Writer.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn z.f.Close()\n}", "func (w *BufferedFileWriter) Close() error {\n\tclose(w.stopChan)\n\tw.lock.Lock()\n\terr := w.buffer.Flush()\n\tw.buffer = nil\n\tif err == nil {\n\t\terr = w.file.Close()\n\t} else {\n\t\te := w.file.Close()\n\t\tif e != nil {\n\t\t\tlogError(e)\n\t\t}\n\t}\n\tw.file = nil\n\tw.lock.Unlock()\n\treturn err\n}", "func (file *Remote) Close() error {\n\t_, err := file.client.Send(&Tclunk{\n\t\tFID: file.fid,\n\t})\n\treturn err\n}", "func (r *Scanner) Close() error {\n\terr1 := r.journalDir.Close()\n\terr2 := r.file.Close()\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\treturn err2\n}", "func (t *Tail) Close() error {\n\terr := t.posFd.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = t.fileFd.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *Region) Close() error {\n\treturn r.f.Close()\n}", "func (p *pathVec) Close() {\n\tp.file.Close()\n}", "func (f *FileRotator) Close() error {\n\tf.fileLock.Lock()\n\tdefer f.fileLock.Unlock()\n\n\t// Stop the ticker and flush for one last time\n\tf.flushTicker.Stop()\n\tf.flushBuffer()\n\n\t// Stop the go routines\n\tif !f.closed {\n\t\tclose(f.doneCh)\n\t\tclose(f.purgeCh)\n\t\tf.closed = true\n\t\tf.currentFile.Close()\n\t}\n\n\treturn nil\n}", "func (wt *WireTap) Close() {\n\twt.file.Close()\n}", "func (f *AnonymousFile) Close(ctx context.Context) error {\n\tf.position = 0\n\treturn nil\n}", "func (w *FileWriter) Close() (err error) {\n\tw.Lock()\n\tdefer w.Unlock()\n\n\t// Rotate before closing\n\tif err := w.rotateIfNeeded(); err != nil {\n\t\treturn err\n\t}\n\n\t// Close the file if we did not rotate\n\tif err := w.current.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tw.current = nil\n\treturn nil\n}", "func (e *Extent) Close() (err error) {\n\tif e.HasClosed() {\n\t\treturn\n\t}\n\tif err = e.file.Close(); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func Close() {\n\tif initialized {\n\t\treturn\n\t}\n\tspiFile.Close()\n}", "func (r *Reader) Close() error {\n\treturn r.logFile.Close()\n}", "func (g *Gonf) Close() {\n\tif err := g.file.Close(); err != nil {\n\t\tlog.Printf(\"%v\\n\")\n\t}\n\tif err := g.watcher.Close(); err != nil {\n\t\tlog.Printf(\"%v\\n\")\n\t}\n\tg.watchFile = false\n}", "func (l *Logger) Close() {\n\tl.file.Close()\n}", "func (l *Logger) Close() {\n\tl.file.Close()\n}", "func (cfp *FsPool) Close() error {\n\tif cfp.reader != nil {\n\t\terr := cfp.reader.Close()\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tcfp.reader = nil\n\t\tcfp.fileIndex = -1\n\t}\n\n\treturn nil\n}", "func (h *Handler) Close() error {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\treturn h.closeORCFile()\n}", "func (f stdioFileHandle) Close() error {\n\treturn ErrUnsupported\n}", "func (D Disk) Close() {\n\tD.filePnt.Close()\n}", "func (f *CachedFile) Close() error {\n\tf.wg.Done()\n\treturn nil\n}", "func (f *RecordStorage) Close() {\n\tf.file.Close()\n}", "func (l *SimpleLogger) Close() error {\n\tif l.file != nil {\n\t\treturn l.file.Close()\n\t}\n\treturn nil\n}" ]
[ "0.80392176", "0.75970346", "0.755414", "0.74301916", "0.74277353", "0.7345811", "0.732451", "0.732451", "0.720555", "0.71502286", "0.7140525", "0.71077", "0.70642084", "0.700904", "0.6963524", "0.69555306", "0.6917481", "0.69032925", "0.6894671", "0.68582016", "0.6857412", "0.68524355", "0.68273723", "0.6807885", "0.67451113", "0.67148775", "0.6710524", "0.6710524", "0.66929805", "0.6690267", "0.6676614", "0.6659871", "0.66588694", "0.6640903", "0.6636935", "0.6610872", "0.6600828", "0.65865874", "0.6575193", "0.6572432", "0.6568676", "0.65583086", "0.65560126", "0.6555541", "0.6550477", "0.6542039", "0.65337396", "0.64866805", "0.6484322", "0.6479825", "0.64661485", "0.64605725", "0.6410419", "0.6384994", "0.63770837", "0.6330745", "0.6307846", "0.6302377", "0.6286049", "0.6279169", "0.6272206", "0.6269076", "0.62583685", "0.62528616", "0.6237575", "0.62145865", "0.6212486", "0.62041336", "0.62010044", "0.6200498", "0.61976266", "0.6180711", "0.617361", "0.6169612", "0.6155331", "0.61496633", "0.6106085", "0.6096002", "0.6094416", "0.60901994", "0.60726154", "0.6045643", "0.6041657", "0.60403633", "0.60334194", "0.6019028", "0.6013506", "0.6000457", "0.59914625", "0.59836376", "0.59767115", "0.59729975", "0.5972801", "0.5972801", "0.59663963", "0.59586096", "0.5949737", "0.5922093", "0.5918171", "0.59035784", "0.59024525" ]
0.0
-1
FROM HERE: Advanced Unicode normalization and filtering, see and for more details.
func NormalizeText(text string) string { isOk := func(r rune) bool { return r < 32 || r >= 127 } // The isOk filter is such that there is no need to chain to norm.NFC t := transform.Chain(norm.NFKD, transform.RemoveFunc(isOk)) // This Transformer could also trivially be applied as an io.Reader // or io.Writer filter to automatically do such filtering when reading // or writing data anywhere. text, _, _ = transform.String(t, text) text = strings.Trim(text, " ") return strings.ToLower(text) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert(source string) string {\n\tdefaultDiacriticsRemovalMap := map[string][]string{\n\t\t\"A\": {\"\\u0041\", \"\\u24B6\", \"\\uFF21\", \"\\u00C0\", \"\\u00C1\", \"\\u00C2\", \"\\u1EA6\", \"\\u1EA4\", \"\\u1EAA\", \"\\u1EA8\", \"\\u00C3\", \"\\u0100\", \"\\u0102\", \"\\u1EB0\", \"\\u1EAE\", \"\\u1EB4\", \"\\u1EB2\", \"\\u0226\", \"\\u01E0\", \"\\u00C4\", \"\\u01DE\", \"\\u1EA2\", \"\\u00C5\", \"\\u01FA\", \"\\u01CD\", \"\\u0200\", \"\\u0202\", \"\\u1EA0\", \"\\u1EAC\", \"\\u1EB6\", \"\\u1E00\", \"\\u0104\", \"\\u023A\", \"\\u2C6F\"},\n\t\t\"AA\": {\"\\uA732\"},\n\t\t\"AE\": {\"\\u00C6\", \"\\u01FC\", \"\\u01E2\"},\n\t\t\"AO\": {\"\\uA734\"},\n\t\t\"AU\": {\"\\uA736\"},\n\t\t\"AV\": {\"\\uA738\", \"\\uA73A\"},\n\t\t\"AY\": {\"\\uA73C\"},\n\t\t\"B\": {\"\\u0042\", \"\\u24B7\", \"\\uFF22\", \"\\u1E02\", \"\\u1E04\", \"\\u1E06\", \"\\u0243\", \"\\u0182\", \"\\u0181\"},\n\t\t\"C\": {\"\\u0043\", \"\\u24B8\", \"\\uFF23\", \"\\u0106\", \"\\u0108\", \"\\u010A\", \"\\u010C\", \"\\u00C7\", \"\\u1E08\", \"\\u0187\", \"\\u023B\", \"\\uA73E\"},\n\t\t\"D\": {\"\\u0044\", \"\\u24B9\", \"\\uFF24\", \"\\u1E0A\", \"\\u010E\", \"\\u1E0C\", \"\\u1E10\", \"\\u1E12\", \"\\u1E0E\", \"\\u0110\", \"\\u018B\", \"\\u018A\", \"\\u0189\", \"\\uA779\"},\n\t\t\"DZ\": {\"\\u01F1\", \"\\u01C4\"},\n\t\t\"Dz\": {\"\\u01F2\", \"\\u01C5\"},\n\t\t\"E\": {\"\\u0045\", \"\\u24BA\", \"\\uFF25\", \"\\u00C8\", \"\\u00C9\", \"\\u00CA\", \"\\u1EC0\", \"\\u1EBE\", \"\\u1EC4\", \"\\u1EC2\", \"\\u1EBC\", \"\\u0112\", \"\\u1E14\", \"\\u1E16\", \"\\u0114\", \"\\u0116\", \"\\u00CB\", \"\\u1EBA\", \"\\u011A\", \"\\u0204\", \"\\u0206\", \"\\u1EB8\", \"\\u1EC6\", \"\\u0228\", \"\\u1E1C\", \"\\u0118\", \"\\u1E18\", \"\\u1E1A\", \"\\u0190\", \"\\u018E\"},\n\t\t\"F\": {\"\\u0046\", \"\\u24BB\", \"\\uFF26\", \"\\u1E1E\", \"\\u0191\", \"\\uA77B\"},\n\t\t\"G\": {\"\\u0047\", \"\\u24BC\", \"\\uFF27\", \"\\u01F4\", \"\\u011C\", \"\\u1E20\", \"\\u011E\", \"\\u0120\", \"\\u01E6\", \"\\u0122\", \"\\u01E4\", \"\\u0193\", \"\\uA7A0\", \"\\uA77D\", \"\\uA77E\"},\n\t\t\"H\": {\"\\u0048\", \"\\u24BD\", \"\\uFF28\", \"\\u0124\", \"\\u1E22\", \"\\u1E26\", \"\\u021E\", \"\\u1E24\", \"\\u1E28\", \"\\u1E2A\", \"\\u0126\", \"\\u2C67\", \"\\u2C75\", \"\\uA78D\"},\n\t\t\"I\": {\"\\u0049\", \"\\u24BE\", \"\\uFF29\", \"\\u00CC\", \"\\u00CD\", \"\\u00CE\", \"\\u0128\", \"\\u012A\", \"\\u012C\", \"\\u0130\", \"\\u00CF\", \"\\u1E2E\", \"\\u1EC8\", \"\\u01CF\", \"\\u0208\", \"\\u020A\", \"\\u1ECA\", \"\\u012E\", \"\\u1E2C\", \"\\u0197\"},\n\t\t\"J\": {\"\\u004A\", \"\\u24BF\", \"\\uFF2A\", \"\\u0134\", \"\\u0248\"},\n\t\t\"K\": {\"\\u004B\", \"\\u24C0\", \"\\uFF2B\", \"\\u1E30\", \"\\u01E8\", \"\\u1E32\", \"\\u0136\", \"\\u1E34\", \"\\u0198\", \"\\u2C69\", \"\\uA740\", \"\\uA742\", \"\\uA744\", \"\\uA7A2\"},\n\t\t\"L\": {\"\\u004C\", \"\\u24C1\", \"\\uFF2C\", \"\\u013F\", \"\\u0139\", \"\\u013D\", \"\\u1E36\", \"\\u1E38\", \"\\u013B\", \"\\u1E3C\", \"\\u1E3A\", \"\\u0141\", \"\\u023D\", \"\\u2C62\", \"\\u2C60\", \"\\uA748\", \"\\uA746\", \"\\uA780\"},\n\t\t\"LJ\": {\"\\u01C7\"},\n\t\t\"Lj\": {\"\\u01C8\"},\n\t\t\"M\": {\"\\u004D\", \"\\u24C2\", \"\\uFF2D\", \"\\u1E3E\", \"\\u1E40\", \"\\u1E42\", \"\\u2C6E\", \"\\u019C\"},\n\t\t\"N\": {\"\\u004E\", \"\\u24C3\", \"\\uFF2E\", \"\\u01F8\", \"\\u0143\", \"\\u00D1\", \"\\u1E44\", \"\\u0147\", \"\\u1E46\", \"\\u0145\", \"\\u1E4A\", \"\\u1E48\", \"\\u0220\", \"\\u019D\", \"\\uA790\", \"\\uA7A4\"},\n\t\t\"NJ\": {\"\\u01CA\"},\n\t\t\"Nj\": {\"\\u01CB\"},\n\t\t\"O\": {\"\\u004F\", \"\\u24C4\", \"\\uFF2F\", \"\\u00D2\", \"\\u00D3\", \"\\u00D4\", \"\\u1ED2\", \"\\u1ED0\", \"\\u1ED6\", \"\\u1ED4\", \"\\u00D5\", \"\\u1E4C\", \"\\u022C\", \"\\u1E4E\", \"\\u014C\", \"\\u1E50\", \"\\u1E52\", \"\\u014E\", \"\\u022E\", \"\\u0230\", \"\\u00D6\", \"\\u022A\", \"\\u1ECE\", \"\\u0150\", \"\\u01D1\", \"\\u020C\", \"\\u020E\", \"\\u01A0\", \"\\u1EDC\", \"\\u1EDA\", \"\\u1EE0\", \"\\u1EDE\", \"\\u1EE2\", \"\\u1ECC\", \"\\u1ED8\", \"\\u01EA\", \"\\u01EC\", \"\\u00D8\", \"\\u01FE\", \"\\u0186\", \"\\u019F\", \"\\uA74A\", \"\\uA74C\"},\n\t\t\"OI\": {\"\\u01A2\"},\n\t\t\"OO\": {\"\\uA74E\"},\n\t\t\"OU\": {\"\\u0222\"},\n\t\t\"OE\": {\"\\u008C\", \"\\u0152\"},\n\t\t\"oe\": {\"\\u009C\", \"\\u0153\"},\n\t\t\"P\": {\"\\u0050\", \"\\u24C5\", \"\\uFF30\", \"\\u1E54\", \"\\u1E56\", \"\\u01A4\", \"\\u2C63\", \"\\uA750\", \"\\uA752\", \"\\uA754\"},\n\t\t\"Q\": {\"\\u0051\", \"\\u24C6\", \"\\uFF31\", \"\\uA756\", \"\\uA758\", \"\\u024A\"},\n\t\t\"R\": {\"\\u0052\", \"\\u24C7\", \"\\uFF32\", \"\\u0154\", \"\\u1E58\", \"\\u0158\", \"\\u0210\", \"\\u0212\", \"\\u1E5A\", \"\\u1E5C\", \"\\u0156\", \"\\u1E5E\", \"\\u024C\", \"\\u2C64\", \"\\uA75A\", \"\\uA7A6\", \"\\uA782\"},\n\t\t\"S\": {\"\\u0053\", \"\\u24C8\", \"\\uFF33\", \"\\u1E9E\", \"\\u015A\", \"\\u1E64\", \"\\u015C\", \"\\u1E60\", \"\\u0160\", \"\\u1E66\", \"\\u1E62\", \"\\u1E68\", \"\\u0218\", \"\\u015E\", \"\\u2C7E\", \"\\uA7A8\", \"\\uA784\"},\n\t\t\"T\": {\"\\u0054\", \"\\u24C9\", \"\\uFF34\", \"\\u1E6A\", \"\\u0164\", \"\\u1E6C\", \"\\u021A\", \"\\u0162\", \"\\u1E70\", \"\\u1E6E\", \"\\u0166\", \"\\u01AC\", \"\\u01AE\", \"\\u023E\", \"\\uA786\"},\n\t\t\"TZ\": {\"\\uA728\"},\n\t\t\"U\": {\"\\u0055\", \"\\u24CA\", \"\\uFF35\", \"\\u00D9\", \"\\u00DA\", \"\\u00DB\", \"\\u0168\", \"\\u1E78\", \"\\u016A\", \"\\u1E7A\", \"\\u016C\", \"\\u00DC\", \"\\u01DB\", \"\\u01D7\", \"\\u01D5\", \"\\u01D9\", \"\\u1EE6\", \"\\u016E\", \"\\u0170\", \"\\u01D3\", \"\\u0214\", \"\\u0216\", \"\\u01AF\", \"\\u1EEA\", \"\\u1EE8\", \"\\u1EEE\", \"\\u1EEC\", \"\\u1EF0\", \"\\u1EE4\", \"\\u1E72\", \"\\u0172\", \"\\u1E76\", \"\\u1E74\", \"\\u0244\"},\n\t\t\"V\": {\"\\u0056\", \"\\u24CB\", \"\\uFF36\", \"\\u1E7C\", \"\\u1E7E\", \"\\u01B2\", \"\\uA75E\", \"\\u0245\"},\n\t\t\"VY\": {\"\\uA760\"},\n\t\t\"W\": {\"\\u0057\", \"\\u24CC\", \"\\uFF37\", \"\\u1E80\", \"\\u1E82\", \"\\u0174\", \"\\u1E86\", \"\\u1E84\", \"\\u1E88\", \"\\u2C72\"},\n\t\t\"X\": {\"\\u0058\", \"\\u24CD\", \"\\uFF38\", \"\\u1E8A\", \"\\u1E8C\"},\n\t\t\"Y\": {\"\\u0059\", \"\\u24CE\", \"\\uFF39\", \"\\u1EF2\", \"\\u00DD\", \"\\u0176\", \"\\u1EF8\", \"\\u0232\", \"\\u1E8E\", \"\\u0178\", \"\\u1EF6\", \"\\u1EF4\", \"\\u01B3\", \"\\u024E\", \"\\u1EFE\"},\n\t\t\"Z\": {\"\\u005A\", \"\\u24CF\", \"\\uFF3A\", \"\\u0179\", \"\\u1E90\", \"\\u017B\", \"\\u017D\", \"\\u1E92\", \"\\u1E94\", \"\\u01B5\", \"\\u0224\", \"\\u2C7F\", \"\\u2C6B\", \"\\uA762\"},\n\t\t\"a\": {\"\\u0061\", \"\\u24D0\", \"\\uFF41\", \"\\u1E9A\", \"\\u00E0\", \"\\u00E1\", \"\\u00E2\", \"\\u1EA7\", \"\\u1EA5\", \"\\u1EAB\", \"\\u1EA9\", \"\\u00E3\", \"\\u0101\", \"\\u0103\", \"\\u1EB1\", \"\\u1EAF\", \"\\u1EB5\", \"\\u1EB3\", \"\\u0227\", \"\\u01E1\", \"\\u00E4\", \"\\u01DF\", \"\\u1EA3\", \"\\u00E5\", \"\\u01FB\", \"\\u01CE\", \"\\u0201\", \"\\u0203\", \"\\u1EA1\", \"\\u1EAD\", \"\\u1EB7\", \"\\u1E01\", \"\\u0105\", \"\\u2C65\", \"\\u0250\", \"\\u00A0\", \"@\"},\n\t\t\"aa\": {\"\\uA733\"},\n\t\t\"ae\": {\"\\u00E6\", \"\\u01FD\", \"\\u01E3\"},\n\t\t\"ao\": {\"\\uA735\"},\n\t\t\"au\": {\"\\uA737\"},\n\t\t\"av\": {\"\\uA739\", \"\\uA73B\"},\n\t\t\"ay\": {\"\\uA73D\"},\n\t\t\"b\": {\"\\u0062\", \"\\u24D1\", \"\\uFF42\", \"\\u1E03\", \"\\u1E05\", \"\\u1E07\", \"\\u0180\", \"\\u0183\", \"\\u0253\"},\n\t\t\"c\": {\"\\u0063\", \"\\u24D2\", \"\\uFF43\", \"\\u0107\", \"\\u0109\", \"\\u010B\", \"\\u010D\", \"\\u00E7\", \"\\u1E09\", \"\\u0188\", \"\\u023C\", \"\\uA73F\", \"\\u2184\", \"\\u00A9\"},\n\t\t\"d\": {\"\\u0064\", \"\\u24D3\", \"\\uFF44\", \"\\u1E0B\", \"\\u010F\", \"\\u1E0D\", \"\\u1E11\", \"\\u1E13\", \"\\u1E0F\", \"\\u0111\", \"\\u018C\", \"\\u0256\", \"\\u0257\", \"\\uA77A\"},\n\t\t\"dz\": {\"\\u01F3\", \"\\u01C6\"},\n\t\t\"e\": {\"\\u0065\", \"\\u24D4\", \"\\uFF45\", \"\\u00E8\", \"\\u00E9\", \"\\u00EA\", \"\\u1EC1\", \"\\u1EBF\", \"\\u1EC5\", \"\\u1EC3\", \"\\u1EBD\", \"\\u0113\", \"\\u1E15\", \"\\u1E17\", \"\\u0115\", \"\\u0117\", \"\\u00EB\", \"\\u1EBB\", \"\\u011B\", \"\\u0205\", \"\\u0207\", \"\\u1EB9\", \"\\u1EC7\", \"\\u0229\", \"\\u1E1D\", \"\\u0119\", \"\\u1E19\", \"\\u1E1B\", \"\\u0247\", \"\\u025B\", \"\\u01DD\"},\n\t\t\"f\": {\"\\u0066\", \"\\u24D5\", \"\\uFF46\", \"\\u1E1F\", \"\\u0192\", \"\\uA77C\"},\n\t\t\"g\": {\"\\u0067\", \"\\u24D6\", \"\\uFF47\", \"\\u01F5\", \"\\u011D\", \"\\u1E21\", \"\\u011F\", \"\\u0121\", \"\\u01E7\", \"\\u0123\", \"\\u01E5\", \"\\u0260\", \"\\uA7A1\", \"\\u1D79\", \"\\uA77F\"},\n\t\t\"h\": {\"\\u0068\", \"\\u24D7\", \"\\uFF48\", \"\\u0125\", \"\\u1E23\", \"\\u1E27\", \"\\u021F\", \"\\u1E25\", \"\\u1E29\", \"\\u1E2B\", \"\\u1E96\", \"\\u0127\", \"\\u2C68\", \"\\u2C76\", \"\\u0265\"},\n\t\t\"hv\": {\"\\u0195\"},\n\t\t\"i\": {\"\\u0069\", \"\\u24D8\", \"\\uFF49\", \"\\u00EC\", \"\\u00ED\", \"\\u00EE\", \"\\u0129\", \"\\u012B\", \"\\u012D\", \"\\u00EF\", \"\\u1E2F\", \"\\u1EC9\", \"\\u01D0\", \"\\u0209\", \"\\u020B\", \"\\u1ECB\", \"\\u012F\", \"\\u1E2D\", \"\\u0268\", \"\\u0131\"},\n\t\t\"j\": {\"\\u006A\", \"\\u24D9\", \"\\uFF4A\", \"\\u0135\", \"\\u01F0\", \"\\u0249\"},\n\t\t\"k\": {\"\\u006B\", \"\\u24DA\", \"\\uFF4B\", \"\\u1E31\", \"\\u01E9\", \"\\u1E33\", \"\\u0137\", \"\\u1E35\", \"\\u0199\", \"\\u2C6A\", \"\\uA741\", \"\\uA743\", \"\\uA745\", \"\\uA7A3\"},\n\t\t\"l\": {\"\\u006C\", \"\\u24DB\", \"\\uFF4C\", \"\\u0140\", \"\\u013A\", \"\\u013E\", \"\\u1E37\", \"\\u1E39\", \"\\u013C\", \"\\u1E3D\", \"\\u1E3B\", \"\\u017F\", \"\\u0142\", \"\\u019A\", \"\\u026B\", \"\\u2C61\", \"\\uA749\", \"\\uA781\", \"\\uA747\"},\n\t\t\"lj\": {\"\\u01C9\"},\n\t\t\"m\": {\"\\u006D\", \"\\u24DC\", \"\\uFF4D\", \"\\u1E3F\", \"\\u1E41\", \"\\u1E43\", \"\\u0271\", \"\\u026F\"},\n\t\t\"n\": {\"\\u006E\", \"\\u24DD\", \"\\uFF4E\", \"\\u01F9\", \"\\u0144\", \"\\u00F1\", \"\\u1E45\", \"\\u0148\", \"\\u1E47\", \"\\u0146\", \"\\u1E4B\", \"\\u1E49\", \"\\u019E\", \"\\u0272\", \"\\u0149\", \"\\uA791\", \"\\uA7A5\"},\n\t\t\"nj\": {\"\\u01CC\"},\n\t\t\"o\": {\"\\u006F\", \"\\u24DE\", \"\\uFF4F\", \"\\u00F2\", \"\\u00F3\", \"\\u00F4\", \"\\u1ED3\", \"\\u1ED1\", \"\\u1ED7\", \"\\u1ED5\", \"\\u00F5\", \"\\u1E4D\", \"\\u022D\", \"\\u1E4F\", \"\\u014D\", \"\\u1E51\", \"\\u1E53\", \"\\u014F\", \"\\u022F\", \"\\u0231\", \"\\u00F6\", \"\\u022B\", \"\\u1ECF\", \"\\u0151\", \"\\u01D2\", \"\\u020D\", \"\\u020F\", \"\\u01A1\", \"\\u1EDD\", \"\\u1EDB\", \"\\u1EE1\", \"\\u1EDF\", \"\\u1EE3\", \"\\u1ECD\", \"\\u1ED9\", \"\\u01EB\", \"\\u01ED\", \"\\u00F8\", \"\\u01FF\", \"\\u0254\", \"\\uA74B\", \"\\uA74D\", \"\\u0275\"},\n\t\t\"oi\": {\"\\u01A3\"},\n\t\t\"ou\": {\"\\u0223\"},\n\t\t\"oo\": {\"\\uA74F\"},\n\t\t\"p\": {\"\\u0070\", \"\\u24DF\", \"\\uFF50\", \"\\u1E55\", \"\\u1E57\", \"\\u01A5\", \"\\u1D7D\", \"\\uA751\", \"\\uA753\", \"\\uA755\", \"\\u2117\"},\n\t\t\"q\": {\"\\u0071\", \"\\u24E0\", \"\\uFF51\", \"\\u024B\", \"\\uA757\", \"\\uA759\"},\n\t\t\"r\": {\"\\u0072\", \"\\u24E1\", \"\\uFF52\", \"\\u0155\", \"\\u1E59\", \"\\u0159\", \"\\u0211\", \"\\u0213\", \"\\u1E5B\", \"\\u1E5D\", \"\\u0157\", \"\\u1E5F\", \"\\u024D\", \"\\u027D\", \"\\uA75B\", \"\\uA7A7\", \"\\uA783\", \"\\u00AE\"},\n\t\t\"s\": {\"\\u0073\", \"\\u24E2\", \"\\uFF53\", \"\\u00DF\", \"\\u015B\", \"\\u1E65\", \"\\u015D\", \"\\u1E61\", \"\\u0161\", \"\\u1E67\", \"\\u1E63\", \"\\u1E69\", \"\\u0219\", \"\\u015F\", \"\\u023F\", \"\\uA7A9\", \"\\uA785\", \"\\u1E9B\"},\n\t\t\"t\": {\"\\u0074\", \"\\u24E3\", \"\\uFF54\", \"\\u1E6B\", \"\\u1E97\", \"\\u0165\", \"\\u1E6D\", \"\\u021B\", \"\\u0163\", \"\\u1E71\", \"\\u1E6F\", \"\\u0167\", \"\\u01AD\", \"\\u0288\", \"\\u2C66\", \"\\uA787\"},\n\t\t\"tz\": {\"\\uA729\"},\n\t\t\"u\": {\"\\u0075\", \"\\u24E4\", \"\\uFF55\", \"\\u00F9\", \"\\u00FA\", \"\\u00FB\", \"\\u0169\", \"\\u1E79\", \"\\u016B\", \"\\u1E7B\", \"\\u016D\", \"\\u00FC\", \"\\u01DC\", \"\\u01D8\", \"\\u01D6\", \"\\u01DA\", \"\\u1EE7\", \"\\u016F\", \"\\u0171\", \"\\u01D4\", \"\\u0215\", \"\\u0217\", \"\\u01B0\", \"\\u1EEB\", \"\\u1EE9\", \"\\u1EEF\", \"\\u1EED\", \"\\u1EF1\", \"\\u1EE5\", \"\\u1E73\", \"\\u0173\", \"\\u1E77\", \"\\u1E75\", \"\\u0289\"},\n\t\t\"v\": {\"\\u0076\", \"\\u24E5\", \"\\uFF56\", \"\\u1E7D\", \"\\u1E7F\", \"\\u028B\", \"\\uA75F\", \"\\u028C\"},\n\t\t\"vy\": {\"\\uA761\"},\n\t\t\"w\": {\"\\u0077\", \"\\u24E6\", \"\\uFF57\", \"\\u1E81\", \"\\u1E83\", \"\\u0175\", \"\\u1E87\", \"\\u1E85\", \"\\u1E98\", \"\\u1E89\", \"\\u2C73\"},\n\t\t\"x\": {\"\\u0078\", \"\\u24E7\", \"\\uFF58\", \"\\u1E8B\", \"\\u1E8D\"},\n\t\t\"y\": {\"\\u0079\", \"\\u24E8\", \"\\uFF59\", \"\\u1EF3\", \"\\u00FD\", \"\\u0177\", \"\\u1EF9\", \"\\u0233\", \"\\u1E8F\", \"\\u00FF\", \"\\u1EF7\", \"\\u1E99\", \"\\u1EF5\", \"\\u01B4\", \"\\u024F\", \"\\u1EFF\"},\n\t\t\"z\": {\"\\u007A\", \"\\u24E9\", \"\\uFF5A\", \"\\u017A\", \"\\u1E91\", \"\\u017C\", \"\\u017E\", \"\\u1E93\", \"\\u1E95\", \"\\u01B6\", \"\\u0225\", \"\\u0240\", \"\\u2C6C\", \"\\uA763\"},\n\t}\n\n\tdiacriticsMap := map[rune]string{}\n\n\tfor base, letters := range defaultDiacriticsRemovalMap {\n\t\tfor i := range letters {\n\t\t\truneValue, _ := utf8.DecodeRuneInString(letters[i])\n\t\t\tdiacriticsMap[runeValue] = base\n\t\t}\n\t}\n\n\tcleanSource := \"\"\n\n\t// transforms accents and special characters from trim source\n\tfor _, runeValue := range strings.TrimSpace(source) {\n\t\tif v, ok := diacriticsMap[runeValue]; ok {\n\t\t\tcleanSource += v\n\t\t} else {\n\t\t\tcleanSource += string(runeValue)\n\t\t}\n\t}\n\n\t// replace special caracters with \"-\"\n\tre := regexp.MustCompile(\"[^a-zA-Z0-9]\")\n\tcleanSource = re.ReplaceAllLiteralString(cleanSource, \"-\")\n\n\t// replace \"-\" suite\n\tre = regexp.MustCompile(\"[-]+\")\n\tcleanSource = re.ReplaceAllLiteralString(cleanSource, \"-\")\n\n\t// remove \"-\" at string start\n\tcleanSource = strings.TrimPrefix(cleanSource, \"-\")\n\n\t// remove \"-\" at string end\n\tcleanSource = strings.TrimSuffix(cleanSource, \"-\")\n\n\t// to lower case\n\treturn strings.ToLower(cleanSource)\n}", "func filterCharsAndNormalize(strData string) string {\n\tpattern := regexp.MustCompile(`[\\W_]+`)\n\treturn strings.ToLower(pattern.ReplaceAllString(strData, ` `))\n}", "func UnicodeSanitize(s string) string {\n\tsource := []rune(s)\n\ttarget := make([]rune, 0, len(source))\n\n\tfor i, r := range source {\n\t\tif r == '%' && i+2 < len(source) && ishex(source[i+1]) && ishex(source[i+2]) {\n\t\t\ttarget = append(target, r)\n\t\t} else if unicode.IsLetter(r) || unicode.IsDigit(r) || unicode.IsMark(r) || r == '.' || r == '/' || r == '\\\\' || r == '_' || r == '-' || r == '#' || r == '+' || r == '~' {\n\t\t\ttarget = append(target, r)\n\t\t}\n\t}\n\n\tvar result string\n\n\tif true {\n\t\t// remove accents - see https://blog.golang.org/normalization\n\t\tt := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)\n\t\tresult, _, _ = transform.String(t, string(target))\n\t} else {\n\t\tresult = string(target)\n\t}\n\n\treturn result\n}", "func stripCtlAndExtFromUnicode(str string) string {\n\tisOk := func(r rune) bool {\n\t\treturn r < 32 || r >= 127\n\t}\n\t// The isOk filter is such that there is no need to chain to norm.NFC\n\tt := transform.Chain(norm.NFKD, transform.RemoveFunc(isOk))\n\t// This Transformer could also trivially be applied as an io.Reader\n\t// or io.Writer filter to automatically do such filtering when reading\n\t// or writing data anywhere.\n\tstr, _, _ = transform.String(t, str)\n\treturn str\n}", "func UnicodeSanitize(s string) string {\n\tsource := []rune(s)\n\ttarget := make([]rune, 0, len(source))\n\n\tfor _, r := range source {\n\t\tif unicode.IsLetter(r) ||\n\t\t\tunicode.IsDigit(r) ||\n\t\t\tunicode.IsMark(r) ||\n\t\t\tr == '.' ||\n\t\t\tr == '/' ||\n\t\t\tr == '\\\\' ||\n\t\t\tr == '_' ||\n\t\t\tr == '-' ||\n\t\t\tr == '%' ||\n\t\t\tr == ' ' ||\n\t\t\tr == '#' {\n\t\t\ttarget = append(target, r)\n\t\t}\n\t}\n\n\treturn string(target)\n}", "func FromUnicode(s string) string {\n\t// emoji表情的数据表达式\n\tre := regexp.MustCompile(\"\\\\[[\\\\\\\\u0-9a-zA-Z]+\\\\]\")\n\t// 提取emoji数据表达式\n\treg := regexp.MustCompile(\"\\\\[\\\\\\\\u|]\")\n\tsrc := re.FindAllString(s, -1)\n\tfor i := 0; i < len(src); i++ {\n\t\te := reg.ReplaceAllString(src[i], \"\")\n\t\tp, err := strconv.ParseInt(e, 16, 32)\n\t\tif err == nil {\n\t\t\ts = strings.Replace(s, src[i], string(rune(p)), -1)\n\t\t}\n\t}\n\n\treturn s\n}", "func (ed *Data) Normalize(raw string, opts StripOpts) string {\n\tpending := []rune{0}\n\n\t// #0: Special-case single rune tone modifiers, which appear in test data.\n\tvar singleTone bool\n\tfor i, r := range raw {\n\t\tif i == 0 && IsSkinTone(r) {\n\t\t\tsingleTone = true\n\t\t} else {\n\t\t\tsingleTone = false\n\t\t\tbreak\n\t\t}\n\t}\n\tif singleTone {\n\t\treturn raw\n\t}\n\n\t// #1: Remove VS16 and other modifiers.\n\tfor _, r := range raw {\n\t\tif r == runeVS16 {\n\t\t\t// remove VS16\n\t\t\tcontinue\n\t\t} else if IsSkinTone(r) {\n\t\t\tif opts.Tone {\n\t\t\t\t// strip without checking\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl := len(pending)\n\t\t\tif d, ok := ed.emoji[pending[l-1]]; ok && d.modifierBase {\n\t\t\t\t// great, skin tone is valid here\n\t\t\t\tpending = append(pending, r)\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if IsGender(r) && opts.Gender {\n\t\t\t// remove gender modifiers\n\t\t\tl := len(pending)\n\t\t\tif pending[l-1] == runeZWJ {\n\t\t\t\t// ... and drop a previous ZWJ if we find one\n\t\t\t\tpending = pending[:l-1]\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tpending = append(pending, r)\n\t}\n\tpending = append(pending, 0)\n\n\t// #2: Iterate chars, removing non-emoji.\n\tlp := len(pending) - 1\n\tout := make([]rune, 0, lp)\n\tvar pendingZWJ int\n\tvar allowZWJ int\n\tfor i := 1; i < lp; i++ {\n\t\tr := pending[i]\n\t\tif r == runeZWJ {\n\t\t\tif allowZWJ == i {\n\t\t\t\tpendingZWJ = i + 1 // add it before valid rune at next index\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tprev := pending[i-1]\n\n\t\tif r == runeCap {\n\t\t\t// allow if previous was number\n\t\t\tif IsBeforeCap(prev) {\n\t\t\t\tout = append(out, r)\n\t\t\t\tallowZWJ = i + 1\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif IsTag(r) {\n\t\t\t// allow if following a base or previous tag\n\t\t\tif IsTagBase(prev) || IsTag(prev) {\n\t\t\t\tout = append(out, r)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif IsTagCancel(r) {\n\t\t\t// allow if following a tag\n\t\t\tif IsTag(prev) {\n\t\t\t\tout = append(out, r)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif IsTag(prev) {\n\t\t\t// cancel the tag sequence if we got this far\n\t\t\tout = append(out, runeTagCancel)\n\t\t}\n\n\t\tif IsSkinTone(r) {\n\t\t\t// skin tone counts as a VS16, so look for a previous tone\n\t\t\tallowZWJ = i + 1\n\t\t\tl := len(out)\n\t\t\tif out[l-1] == runeVS16 {\n\t\t\t\tout[l-1] = r\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tout = append(out, r)\n\t\t\tcontinue\n\t\t}\n\n\t\tif IsFlagPart(r) {\n\t\t\t// just allow\n\t\t\t// TODO(samthor): Are these part of the data? Do we need this branch?\n\t\t\tout = append(out, r)\n\t\t\tcontinue\n\t\t}\n\n\t\tif d, ok := ed.emoji[r]; ok {\n\t\t\tif pendingZWJ == i {\n\t\t\t\tout = append(out, runeZWJ)\n\t\t\t}\n\n\t\t\tout = append(out, r)\n\t\t\tif d.unqualified {\n\t\t\t\tif IsSkinTone(pending[i+1]) {\n\t\t\t\t\t// do nothing as this acts as a VS16\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// stick a VS16 on the end\n\t\t\t\tout = append(out, runeVS16)\n\t\t\t}\n\t\t\tallowZWJ = i + 1\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// #3: Profit!\n\treturn string(out)\n}", "func toUtf(input string) string {\n\tsr := strings.NewReader(input)\n\ttr := transform.NewReader(sr, charmap.Windows1251.NewDecoder())\n\tbuf, err := ioutil.ReadAll(tr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ts := string(buf)\n\n\treturn s\n}", "func (arg1 *UConverter) ResetToUnicode()", "func Normalize(s string) string {\n\n // A transformation to remove non-spacing marks.\n markT := func(r rune) bool { return unicode.Is(unicode.Mn, r) }\n\n // A transformation to remove clean non-letter runes.\n mappingT := func(r rune) rune {\n if !validRune[r] {\n return ' '\n }\n return r\n }\n\n // A chain of transformation for a string.\n t := transform.Chain(\n norm.NFKD,\n transform.RemoveFunc(markT),\n runes.Map(mappingT),\n )\n\n r := transform.NewReader(strings.NewReader(s), t)\n buf := new(bytes.Buffer)\n buf.ReadFrom(r)\n\n trimmed := strings.Trim(space.ReplaceAllString(buf.String(), \" \"), \" \")\n\n return strings.ToLower(trimmed)\n}", "func Normalize(s string) string {\n\ts = strings.ToLower(s)\n\ts = nonalpha.ReplaceAllString(s, \" \")\n\ts = encodings.ReplaceAllString(s, \"\")\n\ts = spaces.ReplaceAllString(s, \" \")\n\ts = strings.TrimSpace(s)\n\treturn s\n}", "func (arg1 *UConverter) ResetFromUnicode()", "func normalize(s string) string {\n\tvar sb strings.Builder\n\tfor _, c := range s {\n\t\tif !unicode.IsLetter(c) && !unicode.IsNumber(c) {\n\t\t\tcontinue\n\t\t}\n\t\tsb.WriteRune(unicode.ToLower(c))\n\t}\n\treturn sb.String()\n}", "func transformizer(replacer *strings.Replacer, str string) (result string, err error) {\n\tt := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)\n\tpreResult, _, err := transform.String(t, str)\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tresult = strings.ToLower(replacer.Replace(preResult))\n\treturn\n}", "func ToKlingon(value string) (string, error) {\n\n\t// Return error for empty string\n\tif value == \"\" {\n\t\treturn \"\", errors.New(\"Not translatable\")\n\t}\n\n\ttranslated := []string{}\n\n\t// Total length of the string\n\tlength := len(value)\n\n\tcursor := 0\n\n\t// Loop until all values are validated\n\tfor cursor < length {\n\n\t\t// Search for 2 characters\n\t\t// expected : ch - gh - ng\n\t\tif cursor+1 < length {\n\n\t\t\ttranslatedChar, ok := dictionary[strings.ToLower(value[cursor:cursor+2])]\n\n\t\t\t// Matched!\n\t\t\tif ok {\n\t\t\t\t// Save translated characters to array\n\t\t\t\ttranslated = append(translated, translatedChar)\n\n\t\t\t\t// Add 2 to cursor\n\t\t\t\tcursor += 2\n\n\t\t\t\t// Skip the remaining process\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Search for 3 characters\n\t\t// expected : tlh\n\t\tif cursor+2 < length {\n\t\t\ttranslatedChar, ok := dictionary[strings.ToLower(value[cursor:cursor+3])]\n\n\t\t\t// Matched!\n\t\t\tif ok {\n\t\t\t\t// Save translated characters to array\n\t\t\t\ttranslated = append(translated, translatedChar)\n\n\t\t\t\t// Add 3 to cursor\n\t\t\t\tcursor += 3\n\n\t\t\t\t// Skip the remaining process\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Convert []byte to string\n\t\tcurrentChar := string(value[cursor])\n\n\t\t// Search the dictionary\n\t\ttranslatedChar, ok := dictionary[currentChar]\n\n\t\t// If failed, search for the lowercase\n\t\tif !ok {\n\n\t\t\ttranslatedChar, ok = dictionary[strings.ToLower(currentChar)]\n\n\t\t\t// The character isn't translatable -- return error\n\t\t\tif !ok {\n\t\t\t\treturn \"\", errors.New(\"Not translatable\")\n\t\t\t}\n\t\t}\n\n\t\t// Save translated characters to array\n\t\ttranslated = append(translated, translatedChar)\n\n\t\t// Add 1 to cursor\n\t\tcursor++\n\n\t}\n\n\t// Separate each character with space\n\treturn strings.Join(translated, \" \"), nil\n\n}", "func precompute(s string) string {\n\ttrimmed := strings.TrimSpace(strings.ToLower(punctuationReplacer.Replace(s)))\n\n\t// UTF-8 normalization\n\tt := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) // Mn: nonspacing marks\n\tresult, _, _ := transform.String(t, trimmed)\n\treturn result\n}", "func Normalize(s string) string {\n\tif r, _, err := transform.String(\n\t\ttransform.Chain(\n\t\t\tnorm.NFD,\n\t\t\trunes.Remove(runes.In(unicode.Mn)),\n\t\t\tnorm.NFC,\n\t\t),\n\t\tstrings.ToLower(s),\n\t); err == nil {\n\t\treturn r\n\t}\n\n\treturn s\n}", "func GetUnicode() string {\n\treturn \"汉语考试服务网: 首页-\"\n}", "func normalize(pt string) []rune {\n\toutput := make([]rune, 0, len(pt))\n\tfor _, r := range pt {\n\t\tif unicode.IsLetter(r) || unicode.IsNumber(r) {\n\t\t\toutput = append(output, unicode.ToLower(r))\n\t\t}\n\t}\n\n\treturn output\n}", "func TestNormalizeString(t *testing.T) {\n\tvar ReplaceTests = []struct {\n\t\tin string\n\t\tout\t string\n\t}{\n\t\t{\"Lazy -_Dog\", \"LazyDog\"},\n\t}\n\tfor _,r := range(ReplaceTests) {\n\t\tassert.Equal(t,normalizeString(r.in),r.out, \"strings don't match expected output\")\n\t}\n}", "func preprocess(input string) string {\n input = strings.TrimRight(input, \"\\n.!\")\n input = strings.ToLower(input)\n\n formattedInput := strings.Split(input, \" \")\n\tfor i, word := range formattedInput {\n\t\tformattedInput[i] = strings.ToLower(strings.Trim(word, \".! \\n\"))\n }\n \n formattedInput = PostProcess(formattedInput)\n\n input = strings.Join(formattedInput,\" \")\n\n return input\n}", "func preProcess(s string) []string {\n\t// find all chars that are not alphabet\n\treg := regexp.MustCompile(\"[^a-zA-Z']+\")\n\t// replace those chars with spaces\n\ts = reg.ReplaceAllString(s, \" \")\n\ts = strings.ToUpper(s)\n\treturn strings.Fields(s)\n}", "func normalizeHeader(header string) string {\n\tre := regexp.MustCompile(\"[[:^ascii:]]\")\n\treturn strings.ToLower(strings.TrimSpace(re.ReplaceAllLiteralString(header, \"\")))\n}", "func Translate(input string) string {\n\n\tt, err := tokenizer.New(ipa.Dict(), tokenizer.OmitBosEos())\n\tif err != nil {\n\t\treturn fmt.Sprintln(\"error in initializing tokenizer\", err)\n\t}\n\n\t// split into word list\n\ttokens := t.Analyze(input, tokenizer.Search)\n\n\t// replace 'translatable' words\n\tdatabaseURL := os.Getenv(\"DATABASE_URL\")\n\tdb, err := gorm.Open(\"postgres\", databaseURL)\n\tif err != nil {\n\t\treturn fmt.Sprintln(\"error in openning database,\", err)\n\t}\n\tdefer db.Close()\n\n\tret := \"\"\n\tprecedingPos := \"\"\n\tfor i, token := range tokens {\n\t\tif token.Class == tokenizer.DUMMY || token.Surface == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// prefix addition\n\t\t// ! these process should be refactored\n\t\t// to have more generality\n\t\t// 連続する名詞の頭に「お」\n\t\tpos := token.POS()\n\t\tif pos[0] == \"名詞\" &&\n\t\t\t(pos[1] == \"一般\" || pos[1] == \"サ変接続\" || pos[1] == \"数\" || pos[1] == \"形容動詞語幹\") {\n\t\t\t// 先頭にあるか,一つ前が名詞,接頭詞でない\n\t\t\tif i == 0 || (precedingPos != \"名詞\" && precedingPos != \"接頭詞\") {\n\t\t\t\tret += \"お\"\n\t\t\t}\n\t\t}\n\t\tprecedingPos = pos[0]\n\n\t\t// look up database\n\t\tcand := []RegisteredWord{}\n\t\tposStr := strings.Join(token.POS(), \",\")\n\t\tresult := db.Where(\"(source_surface=? OR source_surface IS NULL) AND (? LIKE source_pos || '%' OR source_pos IS NULL)\", token.Surface, posStr).Find(&cand)\n\t\tif result.Error != nil {\n\t\t\treturn fmt.Sprintln(\"error in db query,\", result.Error)\n\t\t}\n\n\t\t// translate\n\t\tif len(cand) > 0 {\n\t\t\t// [TODO] consider better replacement logic\n\t\t\t// such as maximizing `digree of fun'\n\n\t\t\t// if the word has multiple candidates, choose one of them at random\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\tp := rand.Intn(len(cand))\n\t\t\tret += cand[p].TargetSurface\n\t\t} else {\n\t\t\t// not registered word\n\t\t\tret += token.Surface\n\t\t}\n\n\t\t// suffix addition\n\t\t// 丁寧語変換\n\t\tif token.POS()[0] == \"動詞\" {\n\t\t\t// collect required info about the next token\n\t\t\tvar nextPos string\n\t\t\tvar nextBase string\n\t\t\tvar nextSurface string\n\t\t\tif i+1 < len(tokens) {\n\t\t\t\tnextPos = tokens[i+1].POS()[0]\n\t\t\t\tnextBase, _ = tokens[i+1].BaseForm()\n\t\t\t\tnextSurface = tokens[i+1].Surface\n\t\t\t}\n\n\t\t\t// 動詞で終わる or 動詞のすぐ後に「ます」「て」以外の助詞助動詞あるいは句点が続く\n\t\t\t// => 丁寧語でないとみなす\n\t\t\tif i == len(tokens)-1 || nextPos == \"句点\" ||\n\t\t\t\t(nextPos == \"助詞\" && nextBase != \"て\") ||\n\t\t\t\t(nextPos == \"助動詞\" && nextBase != \"ます\") {\n\t\t\t\t// 動詞を連用形に活用する\n\t\t\t\tconj := ConjugateVerb(token, renyo)\n\t\t\t\t// remove overlapping\n\t\t\t\truneret := []rune(ret)\n\t\t\t\tsurflen := len([]rune(token.Surface))\n\t\t\t\tret = string(runeret[:len(runeret)-surflen])\n\t\t\t\t// concat conjugated verb\n\t\t\t\tret += conj\n\t\t\t\t// 「ます」を適切な活用の上追加する\n\t\t\t\tret += Conjugate(\"ます\", nextBase, nextPos)\n\t\t\t\t// [TBC] しない -> しません\n\t\t\t\tif nextPos == \"助動詞\" && nextSurface == \"ない\" {\n\t\t\t\t\ttokens[i+1].Surface = \"ん\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// explicit EOS\n\t\t// e.g., ました。 -> ましたわ。\n\t\tif token.POS()[0] == \"句点\" && i > 0 &&\n\t\t\ttokens[i-1].POS()[0] != \"助詞\" &&\n\t\t\ttokens[i-1].POS()[0] != \"記号\" &&\n\t\t\ttokens[i-1].POS()[0] != \"感動詞\" {\n\t\t\t// at random (at 50% probability)\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\tp := rand.Float32()\n\t\t\tif p < 0.5 {\n\t\t\t\tret += \"わ\"\n\t\t\t} else {\n\t\t\t\tret += \"の\"\n\t\t\t}\n\t\t}\n\t\t// implicit EOS\n\t\t// e.g., した -> したの。\n\t\tif i == len(tokens)-1 &&\n\t\t\t(token.POS()[0] != \"助詞\" &&\n\t\t\t\ttoken.POS()[0] != \"記号\" &&\n\t\t\t\ttoken.POS()[0] != \"名詞\" &&\n\t\t\t\ttoken.POS()[0] != \"感動詞\") {\n\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\tp := rand.Float32()\n\t\t\tif p < 0.5 {\n\t\t\t\tret += \"わ\"\n\t\t\t} else {\n\t\t\t\tret += \"の\"\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ret\n}", "func (arg1 *UConverter) GetSubstChars(arg2 *UErrorCode) []byte", "func NormalizeRunes(str string) string {\n\tstr = strings.ToLower(str)\n\tnorm := func(r rune) rune {\n\t\tswitch r {\n\t\t// TODO find a standard golang normalization ?\n\t\tcase ' ', '\\t', '(', ')', '\"', '\\'', ',', ';', ':', '?', '.', '/', '+':\n\t\t\treturn ' '\n\t\tcase '%', '^', '=', '`', '*', '&', '!', '°', '_':\n\t\t\treturn ' '\n\t\tcase 'à', 'ä':\n\t\t\treturn 'a'\n\t\tcase 'ç':\n\t\t\treturn 'c'\n\t\tcase 'é', 'è', 'ê', 'ë':\n\t\t\treturn 'e'\n\t\tcase 'ï', 'î':\n\t\t\treturn 'i'\n\t\tcase 'ô', 'ö':\n\t\t\treturn 'o'\n\t\tcase 'û', 'ü':\n\t\t\treturn 'u'\n\t\t}\n\t\tswitch {\n\t\tcase r >= 'a' && r <= 'z':\n\t\t\treturn r\n\t\tcase r >= 'A' && r <= 'Z':\n\t\t\treturn r\n\t\tcase r >= '0' && r <= '9':\n\t\t\treturn r\n\t\tcase r == '-':\n\t\t\treturn r\n\t\t}\n\t\t// Unknown characters should not be allowed in\n\t\treturn -1\n\t}\n\treturn strings.Map(norm, str)\n}", "func (arg1 *UConverter) FromUnicode(in *[]byte, out *[]uint16, offsets *[]int32, arg5 bool, arg6 *UErrorCode)", "func NormalizeText(text string) string {\n\ttransformer := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)\n\ttext, _, _ = transform.String(transformer, text)\n\n\treturn text\n}", "func replaceUnicode(s string) string {\n\tif s[1] == '#' {\n\t\tr, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)\n\t\treturn string(rune(r))\n\t}\n\tr, _, _, _ := strconv.UnquoteChar(s, 0)\n\treturn string(r)\n}", "func normalize(key string) string {\n\t// drop runes not in '[a-zA-Z0-9_]', with lowering\n\t// ':' is also dropped\n\treturn strings.Map(func(r rune) rune {\n\t\tif (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' {\n\t\t\treturn r\n\t\t}\n\t\tif r >= 'A' && r <= 'Z' {\n\t\t\treturn 'a' + (r - 'A')\n\t\t}\n\t\treturn -1\n\t}, key)\n}", "func (arg1 *UConverter) ToUnicode(in *[]uint16, out *[]byte, offsets *[]int32, arg5 bool, arg6 *UErrorCode)", "func correctUtf8Punctuation(s string) string {\n\treturn strings.Replace(s, \"’\", \"'\", -1)\n\t// TODO(amit): Improve this function with more characters.\n}", "func addDiacritic(w word) string {\n if w.tone < 1 || w.tone > 4 {\n return w.syllable\n }\n\n var result string\n\n // check for 'a' and 'e' by trying to replace them\n for _, v := range \"aeAE\" {\n result = strings.Replace(w.syllable, string(v), string(diacriticRuneForRuneAndTone(v, w.tone)), -1)\n if result != w.syllable {\n return result\n }\n }\n\n // check for ou by trying to replace it\n oRune, _ := utf8.DecodeRuneInString(\"o\")\n ORune, _ := utf8.DecodeRuneInString(\"O\")\n\n result = strings.Replace(w.syllable, \"ou\", string(diacriticRuneForRuneAndTone(oRune, w.tone))+\"u\", -1)\n if result != w.syllable {\n return result\n }\n result = strings.Replace(w.syllable, \"Ou\", string(diacriticRuneForRuneAndTone(ORune, w.tone))+\"u\", -1)\n if result != w.syllable {\n return result\n }\n\n // put diacritic on last vowel in string\n lastVowelIndex := indexOfLastVowel(w.syllable)\n //FIXME deal with error\n lastVowelRune, _ := utf8.DecodeRuneInString(w.syllable[lastVowelIndex:]) \n lastVowelDiacriticRune := diacriticRuneForRuneAndTone(lastVowelRune, w.tone)\n\n result = w.syllable[0:lastVowelIndex]\n result += string(lastVowelDiacriticRune)\n result += w.syllable[lastVowelIndex+1 : utf8.RuneCountInString(w.syllable)]\n\n return result\n}", "func ConvertTextEffects(result *string) {\n\n\t// *strong in jira* -> `strong in jira`\n\tstrongJira := regexp.MustCompile(`(?U)\\*([^}]+)\\*`)\n\n\t// _emphasis in jira_ -> *emphasis in jira*\n\temphasisJira := regexp.MustCompile(`(?U)_([^}]+)_`)\n\n\t// ??citation in jira?? -> <cite>citation in jira</cite>\n\tcitationJira := regexp.MustCompile(`(?U)\\?\\?([^}]+)\\?\\?`)\n\n\t// -deleted in jira- -> ~~deleted in jira~~\n\t// deletedJira := regexp.MustCompile(`(?U)\\-([^}]+)\\-`)\n\n\t// +inserted in jira+ -> <ins>inserted in jira</ins>\n\tinsertedJira := regexp.MustCompile(`(?U)\\+([^}]+)\\+`)\n\n\t// +superscript in jira+ -> <sup>superscript in jira</sup>\n\tsuperscriptJira := regexp.MustCompile(`(?U)\\^([^}]+)\\^`)\n\n\t// ~subscript in jira~ -> <sub>subscript in jira</sub>\n\tsubscriptJira := regexp.MustCompile(`(?U)\\~([^}]+)\\~`)\n\n\t// {{monospaced in jira}} -> `codes in jira`\n\tmonospacedJira := regexp.MustCompile(`(?U)\\{\\{([^}]+)\\}\\}`)\n\n\t*result = strongJira.ReplaceAllString(*result, \"**\"+`$1`+\"**\")\n\t*result = emphasisJira.ReplaceAllString(*result, \"*\"+`$1`+\"*\")\n\t*result = citationJira.ReplaceAllString(*result, \"<cite>\"+`$1`+\"</cite>\")\n\t*result = subscriptJira.ReplaceAllString(*result, \"<sub>\"+`$1`+\"</sub>\")\n\t// *result = deletedJira.ReplaceAllString(*result, \"~~\"+`$1`+\"~~\")\n\t*result = insertedJira.ReplaceAllString(*result, \"<ins>\"+`$1`+\"</ins>\")\n\t*result = superscriptJira.ReplaceAllString(*result, \"<sup>\"+`$1`+\"</sup>\")\n\t*result = monospacedJira.ReplaceAllString(*result, \"`\"+`$1`+\"`\")\n\n}", "func Normalize(input string) string {\n\tre := regexp.MustCompile(extendedKoreanRegex)\n\tendingNormalized := re.ReplaceAllStringFunc(\n\t\tinput,\n\t\tfunc(m string) string {\n\t\t\treturn normalizeEnding(m)\n\t\t},\n\t)\n\n\texclamationNormalized := removeRepeatingChar(endingNormalized)\n\trepeatingNormalized := normalizeRepeating(exclamationNormalized)\n\tcodaNNormalized := normalizeCodaN(repeatingNormalized)\n\ttypoCorrected := correctTypo(codaNNormalized)\n\n\treturn typoCorrected\n}", "func NormalizeString(s string) string {\n l := strings.ToLower(s)\n t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)\n n, _, _ := transform.String(t, l)\n return n\n}", "func normalize(phone string) string {\n\t// bytes buffer is more efficient than string concatenation with +\n\tvar buf bytes.Buffer\n\tfor _, ch := range phone {\n\t\tif ch >= '0' && ch <= '9' {\n\t\t\t// WriteRune: appends UTF-8 of input to buffer\n\t\t\tbuf.WriteRune(ch)\n\t\t}\n\t}\n\n\treturn buf.String()\n}", "func Ucwords(str string) string {\n\treturn strings.Title(str)\n}", "func (arg1 *UConverter) SetSubstChars(arg2 []byte, arg3 *UErrorCode)", "func NormalizeString(str string) string {\n\tvar result bytes.Buffer\n\tfor _, r := range str {\n\t\tif strings.ContainsRune(semanticAlphabet, r) {\n\t\t\tresult.WriteRune(r)\n\t\t}\n\t}\n\treturn result.String()\n}", "func (complexShaperThai) preprocessText(plan *otShapePlan, buffer *Buffer, font *Font) {\n\t/* The following is NOT specified in the MS OT Thai spec, however, it seems\n\t* to be what Uniscribe and other engines implement. According to Eric Muller:\n\t*\n\t* When you have a SARA AM, decompose it in NIKHAHIT + SARA AA, *and* move the\n\t* NIKHAHIT backwards over any tone mark (0E48-0E4B).\n\t*\n\t* <0E14, 0E4B, 0E33> . <0E14, 0E4D, 0E4B, 0E32>\n\t*\n\t* This reordering is legit only when the NIKHAHIT comes from a SARA AM, not\n\t* when it's there to start with. The string <0E14, 0E4B, 0E4D> is probably\n\t* not what a user wanted, but the rendering is nevertheless nikhahit above\n\t* chattawa.\n\t*\n\t* Same for Lao.\n\t*\n\t* Note:\n\t*\n\t* Uniscribe also does some below-marks reordering. Namely, it positions U+0E3A\n\t* after U+0E38 and U+0E39. We do that by modifying the ccc for U+0E3A.\n\t* See unicode.modified_combining_class (). Lao does NOT have a U+0E3A\n\t* equivalent.\n\t */\n\n\t/*\n\t* Here are the characters of significance:\n\t*\n\t*\t\t\tThai\tLao\n\t* SARA AM:\t\tU+0E33\tU+0EB3\n\t* SARA AA:\t\tU+0E32\tU+0EB2\n\t* Nikhahit:\t\tU+0E4D\tU+0ECD\n\t*\n\t* Testing shows that Uniscribe reorder the following marks:\n\t* Thai:\t<0E31,0E34..0E37,0E47..0E4E>\n\t* Lao:\t<0EB1,0EB4..0EB7,0EC7..0ECE>\n\t*\n\t* Note how the Lao versions are the same as Thai + 0x80.\n\t */\n\n\tbuffer.clearOutput()\n\tcount := len(buffer.Info)\n\tfor buffer.idx = 0; buffer.idx < count; {\n\t\tu := buffer.cur(0).codepoint\n\t\tif !isSaraAm(u) {\n\t\t\tbuffer.nextGlyph()\n\t\t\tcontinue\n\t\t}\n\n\t\t/* Is SARA AM. Decompose and reorder. */\n\t\tbuffer.outputRune(nikhahitFromSaraAm(u))\n\t\tbuffer.prev().setContinuation()\n\t\tbuffer.replaceGlyph(saraAaFromSaraAm(u))\n\n\t\t/* Make Nikhahit be recognized as a ccc=0 mark when zeroing widths. */\n\t\tend := len(buffer.outInfo)\n\t\tbuffer.outInfo[end-2].setGeneralCategory(nonSpacingMark)\n\n\t\t/* Ok, let's see... */\n\t\tstart := end - 2\n\t\tfor start > 0 && isToneMark(buffer.outInfo[start-1].codepoint) {\n\t\t\tstart--\n\t\t}\n\n\t\tif start+2 < end {\n\t\t\t/* Move Nikhahit (end-2) to the beginning */\n\t\t\tbuffer.mergeOutClusters(start, end)\n\t\t\tt := buffer.outInfo[end-2]\n\t\t\tcopy(buffer.outInfo[start+1:], buffer.outInfo[start:end-2])\n\t\t\tbuffer.outInfo[start] = t\n\t\t} else {\n\t\t\t/* Since we decomposed, and NIKHAHIT is combining, merge clusters with the\n\t\t\t* previous cluster. */\n\t\t\tif start != 0 && buffer.ClusterLevel == MonotoneGraphemes {\n\t\t\t\tbuffer.mergeOutClusters(start-1, end)\n\t\t\t}\n\t\t}\n\t}\n\tbuffer.swapBuffers()\n\n\t/* If font has Thai GSUB, we are done. */\n\tif plan.props.Script == language.Thai && !plan.map_.foundScript[0] {\n\t\tdoThaiPuaShaping(buffer, font)\n\t}\n}", "func Scrub(original string) string {\n\tscrubbed := strings.Map(func(r rune) rune {\n\t\tswitch {\n\t\tcase unicode.IsPunct(r):\n\t\t\treturn -1\n\t\tcase unicode.IsUpper(r):\n\t\t\treturn unicode.ToLower(r)\n\t\tdefault:\n\t\t\treturn r\n\t\t}\n\t}, original)\n\tif scrubbed == \"\" {\n\t\treturn original\n\t}\n\treturn scrubbed\n}", "func (d *indexedDocument) normalized() string {\n\tvar w strings.Builder\n\tfor i, t := range d.Tokens {\n\t\tw.WriteString(d.dict.getWord(t.ID))\n\t\tif (i + 1) != d.size() {\n\t\t\tw.WriteString(\" \")\n\t\t}\n\t}\n\treturn w.String()\n}", "func normalize(d []byte) []byte {\n\t// Source: https://www.programming-books.io/essential/go/normalize-newlines-1d3abcf6f17c4186bb9617fa14074e48\n\t// replace CR LF \\r\\n (windows) with LF \\n (unix)\n\td = bytes.Replace(d, []byte{13, 10}, []byte{10}, -1)\n\t// replace CF \\r (mac) with LF \\n (unix)\n\td = bytes.Replace(d, []byte{13}, []byte{10}, -1)\n\treturn d\n}", "func normalizeDiallableCharsOnly(number string) string {\n\treturn normalizeHelper(\n\t\tnumber, DIALLABLE_CHAR_MAPPINGS, true /* remove non matches */)\n}", "func removeAccents(val string) (string, error) {\n\tt := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)\n\toutput, _, err := transform.String(t, val)\n\tif err != nil {\n\t\treturn \"##ERRO##\", err\n\t}\n\treturn output, nil\n}", "func RomanizeHepburn(str string) string {\n\tstr = strings.Replace(str, \"ō\", \"ou\", -1)\n\tstr = strings.Replace(str, \"ū\", \"uu\", -1)\n\treturn str\n}", "func Normalize(name string) string {\n\tfargs := func(c rune) bool {\n\t\treturn !unicode.IsLetter(c) && !unicode.IsNumber(c)\n\t}\n\t// get function\n\treturn strings.Join(strings.FieldsFunc(name, fargs), \"-\")\n}", "func (font *PdfFont) CharcodesToUnicodeWithStats(charcodes []textencoding.CharCode) (runelist []rune, numHits, numMisses int) {\n\trunes := make([]rune, 0, len(charcodes))\n\tnumMisses = 0\n\tfor _, code := range charcodes {\n\t\tif font.baseFields().toUnicodeCmap != nil {\n\t\t\tr, ok := font.baseFields().toUnicodeCmap.CharcodeToUnicode(cmap.CharCode(code))\n\t\t\tif ok {\n\t\t\t\trunes = append(runes, r)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// Fall back to encoding.\n\t\tencoder := font.Encoder()\n\t\tif encoder != nil {\n\t\t\tr, ok := encoder.CharcodeToRune(code)\n\t\t\tif ok {\n\t\t\t\trunes = append(runes, r)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tcommon.Log.Debug(\"ERROR: No rune. code=0x%04x charcodes=[% 04x] CID=%t\\n\"+\n\t\t\t\"\\tfont=%s\\n\\tencoding=%s\",\n\t\t\tcode, charcodes, font.baseFields().isCIDFont(), font, encoder)\n\t\tnumMisses++\n\t\trunes = append(runes, cmap.MissingCodeRune)\n\t}\n\n\tif numMisses != 0 {\n\t\tcommon.Log.Debug(\"ERROR: Couldn't convert to unicode. Using input.\\n\"+\n\t\t\t\"\\tnumChars=%d numMisses=%d\\n\"+\n\t\t\t\"\\tfont=%s\",\n\t\t\tlen(charcodes), numMisses, font)\n\t}\n\n\treturn runes, len(runes), numMisses\n}", "func ToTitleSpecial(c unicode.SpecialCase) MapFunc {\n\treturn func(s string) string { return strings.ToTitleSpecial(c, s) }\n}", "func cleanString(s string, r *regexp.Regexp) string {\n\n\t// Remove any trailing space to avoid ending on -\n\ts = strings.Trim(s, \" \")\n\n\t// Flatten accents first so that if we remove non-ascii we still get a legible name\n\ts = Accents(s)\n\n\t// Replace certain joining characters with an underscore\n\ts = separators.ReplaceAllString(s, \"_\")\n\n\t// Remove all other unrecognised characters - NB we do allow any printable characters\n\ts = r.ReplaceAllString(s, \"\")\n\n\t// Remove any multiple dashes caused by replacements above\n\ts = dashes.ReplaceAllString(s, \"-\")\n\n\treturn s\n}", "func SanitizeLine(s string) string {\n\ts = strings.ToLower(s)\n\ts = strings.Replace(s, \"⁄\", \"/\", -1)\n\ts = strings.Replace(s, \" / \", \"/\", -1)\n\n\t// special cases\n\ts = strings.Replace(s, \"butter milk\", \"buttermilk\", -1)\n\ts = strings.Replace(s, \"bicarbonate of soda\", \"baking soda\", -1)\n\ts = strings.Replace(s, \"soda bicarbonate\", \"baking soda\", -1)\n\n\t// remove parentheses\n\tre := regexp.MustCompile(`(?s)\\((.*)\\)`)\n\tfor _, m := range re.FindAllStringSubmatch(s, -1) {\n\t\ts = strings.Replace(s, m[0], \" \", 1)\n\t}\n\n\ts = \" \" + strings.TrimSpace(s) + \" \"\n\n\t// replace unicode fractions with fractions\n\tfor v := range corpusFractionNumberMap {\n\t\ts = strings.Replace(s, v, \" \"+corpusFractionNumberMap[v].fractionString+\" \", -1)\n\t}\n\n\t// remove non-alphanumeric\n\treg, _ := regexp.Compile(\"[^a-zA-Z0-9/.]+\")\n\ts = reg.ReplaceAllString(s, \" \")\n\n\t// replace fractions with unicode fractions\n\tfor v := range corpusFractionNumberMap {\n\t\ts = strings.Replace(s, corpusFractionNumberMap[v].fractionString, \" \"+v+\" \", -1)\n\t}\n\n\ts = strings.Replace(s, \" one \", \" 1 \", -1)\n\n\treturn s\n}", "func Unmetafy(str string) string {\n\tvar j int\n\tbs := []byte(str)\n\tfor i := 0; i < len(bs); i++ {\n\t\tif bs[i] == meta {\n\t\t\ti++\n\t\t\tbs[j] = bs[i] ^ 32\n\t\t} else {\n\t\t\tbs[j] = bs[i]\n\t\t}\n\t\tj++\n\t}\n\treturn string(bs[:j])\n}", "func (font *PdfFont) CharcodesToUnicode(charcodes []textencoding.CharCode) []rune {\n\tstrlist, _, _ := font.CharcodesToUnicodeWithStats(charcodes)\n\treturn strlist\n}", "func Unicode2String(form string) (to string, err error) {\n\tbs, err := hex.DecodeString(strings.Replace(form, `\\u`, ``, -1))\n\tif err != nil {\n\t\treturn\n\t}\n\tfor i, bl, br, r := 0, len(bs), bytes.NewReader(bs), uint16(0); i < bl; i += 2 {\n\t\tbinary.Read(br, binary.BigEndian, &r)\n\t\tto += string(r)\n\t}\n\treturn\n}", "func EnglishTextAndNumbersOnly(s string) string {\n\n\t// sBefore := s\n\n\ts = strings.TrimSpace(s)\n\ts = separators.Replace(s)\n\n\ts = germanUmlaute.Replace(s)\n\ts = englishTextAndNumbersOnly.ReplaceAllString(s, \" \")\n\ts = severalSpaces.ReplaceAllString(s, \" \")\n\n\ts = strings.TrimSpace(s)\n\n\t// if sBefore != s {\n\t// \tlog.Printf(\"\\n%v\\n%v\", sBefore, s)\n\t// }\n\n\treturn s\n}", "func (g *Generator) convertCyrillicToLatin() {\n\tfor _, runeValue := range g.lowerCased {\n\t\tg.converted = g.converted + librarian.Dictionary[string(runeValue)]\n\t}\n}", "func sanitizeText(str string) string {\n\t// count bytes in output & check whether modification is required\n\tnlen := 0\n\tmustmod := false\n\tfor _, r := range []rune(str) {\n\t\toutrune := cleanRune(r)\n\t\tif outrune != '\\000' {\n\t\t\tnlen++\n\t\t}\n\t\tif outrune != r {\n\t\t\tmustmod = true\n\t\t}\n\t}\n\n\t// if no modification is required, use the original string\n\tif !mustmod {\n\t\treturn str\n\t}\n\n\t// build new string\n\tnstr := make([]byte, nlen)\n\ti := 0\n\tfor _, r := range []rune(str) {\n\t\toutrune := cleanRune(r)\n\t\tif outrune != '\\000' {\n\t\t\tnstr[i] = byte(outrune)\n\t\t\ti++\n\t\t}\n\t}\n\n\t// unsafe convert byte slice to string\n\treturn *(*string)(unsafe.Pointer(&reflect.StringHeader{\n\t\tData: uintptr(unsafe.Pointer(&nstr[0])),\n\t\tLen: len(nstr),\n\t}))\n}", "func text2words(raw string) []string {\n\tparts := strings.Fields(raw)\n\tout := make([]string, 0, len(parts))\n\tfor _, word := range parts {\n\t\tword = strings.Trim(word, `'\".,:;-()`)\n\t\tword = strings.ToLower(word)\n\t\tif isAZ(word) {\n\t\t\tout = append(out, word)\n\t\t}\n\t}\n\treturn out\n}", "func filterToLower(b byte) byte {\n\n\tif b >= 'A' && b <= 'Z' {\n\t\treturn 'a' + (b - 'A')\n\t} else if b >= 'a' && b <= 'z' {\n\t\treturn b\n\t} else {\n\t\treturn ' ' // binary etc converted to space\n\t}\n}", "func NormalizeString(word string) string {\n\tletters := []string{}\n\tfor _, letter := range word {\n\t\tletters = append(letters, strings.ToLower(string(letter)))\n\t}\n\tsort.Strings(letters)\n\treturn strings.Join(letters, \"\")\n}", "func fromUnicode(d []byte) (string, error) {\n\tif len(d)%2 > 0 {\n\t\treturn \"\", errors.New(\"Unicode (UTF 16 LE) specified, but uneven data length\")\n\t}\n\ts := make([]uint16, len(d)/2)\n\terr := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(utf16.Decode(s)), nil\n}", "func UnicodeDecode(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeAttr) (row_splits tf.Output, char_values tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{\"input_encoding\": input_encoding}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"UnicodeDecode\",\n\t\tInput: []tf.Input{\n\t\t\tinput,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0), op.Output(1)\n}", "func cleanANSIEscapeCodes(s string) string {\n\t// spaceControlCharacters includes tab, new line, vertical tab, new page, and\n\t// carriage return. These are in the unicode.Cc category, but that category also\n\t// contains ESC (U+001B) which we don't want.\n\tspaceControlCharacters := unicode.RangeTable{\n\t\tR16: []unicode.Range16{\n\t\t\t{Lo: 0x0009, Hi: 0x000D, Stride: 1},\n\t\t},\n\t}\n\n\t// Why not make this deny-only (instead of allow-only)? Because unicode.C\n\t// contains newline and tab characters that we want.\n\tallowedRanges := []*unicode.RangeTable{\n\t\tunicode.L,\n\t\tunicode.M,\n\t\tunicode.N,\n\t\tunicode.P,\n\t\tunicode.S,\n\t\tunicode.Z,\n\t\t&spaceControlCharacters,\n\t}\n\tbuilder := strings.Builder{}\n\tfor _, roon := range s {\n\t\tif unicode.IsOneOf(allowedRanges, roon) {\n\t\t\tbuilder.WriteRune(roon) // returns nil error, per go doc\n\t\t} else {\n\t\t\tfmt.Fprintf(&builder, \"%U\", roon)\n\t\t}\n\t}\n\treturn builder.String()\n}", "func SystemCodePageToUtf8(text string) (s string, e error) {\r\n\te = ErrInvalidEncoding\r\n\tstr := C.CString(text)\r\n\tdefer C.free(unsafe.Pointer(str)) // #nosec\r\n\r\n\tif wcACPStr, err := mbToWide(C.CP_ACP, str); err == nil {\r\n\t\tif utf8Str, err := wideToMB(C.CP_UTF8, wcACPStr); err == nil {\r\n\t\t\ts, e = utf8Str, nil\r\n\t\t}\r\n\t}\r\n\r\n\treturn\r\n}", "func Sanitize(text string) string {\n sanitized := rePhoto.ReplaceAllString(text, \"/photo\")\n sanitized = reRetweet.ReplaceAllString(sanitized, \"$1 \")\n sanitized = reMention.ReplaceAllString(sanitized, \"$1 $2\")\n sanitized = reLink.ReplaceAllString(sanitized, \"$1 $2$3\")\n\n sanitized = reEllipsis.ReplaceAllString(sanitized, \"$1 \")\n sanitized = reHyphen.ReplaceAllString(sanitized, \"$1 \")\n sanitized = reComma.ReplaceAllString(sanitized, \"$1$2 $3\")\n\n sanitized = strings.Replace(sanitized, \"&amp;\", \"&\", -1)\n sanitized = strings.Replace(sanitized, \"&gt;\", \">\", -1)\n sanitized = strings.Replace(sanitized, \"&lt;\", \"<\", -1)\n\n sanitized = strings.Replace(sanitized, \"#\", \"#\", -1)\n sanitized = strings.Replace(sanitized, \"#\", \" #\", -1)\n\n return sanitized\n}", "func beautify(text string) string {\n\tvar b strings.Builder\n\tfor _, runeValue := range text {\n\t\ts := charMap[string(runeValue)]\n\t\tif s == \"\" {\n\t\t\tif runeValue < 32 || runeValue > 126 {\n\t\t\t\tlogger.Trace(\"Illegal rune:\", runeValue, string(runeValue))\n\t\t\t} else {\n\t\t\t\tb.WriteRune(runeValue)\n\t\t\t}\n\t\t} else {\n\t\t\tb.WriteString(s)\n\t\t}\n\t}\n\ttext = b.String()\n\tif *camelCasePtr {\n\t\tif !isOnlyLowerCase(text) {\n\t\t\tcct := strings.Title(strings.ToLower(text))\n\t\t\tidx := strings.Index(cct, \"'\")\n\t\t\tif idx > 0 && idx < len(cct)-1 {\n\t\t\t\tcct = cct[:idx+1] + strings.ToLower(string(cct[idx+1])) + cct[idx+2:]\n\t\t\t}\n\t\t\treturn cct\n\t\t}\n\t}\n\treturn text\n}", "func init() {\n\treplaceSet = []string{\n\t\t\" \", \"-\",\n\t\t\"'\", \"\",\n\t\t\"ı\", \"i\",\n\t\t\",\", \"\",\n\t\t\".\", \"\",\n\t\t\"#\", \"\",\n\t\t\"!\", \"\",\n\t}\n}", "func scrubIllegalChars(str string) string {\n\tstr = strings.Replace(str, \"&\", \"_\", -1)\n\tstr = strings.Replace(str, \"*\", \"_\", -1)\n\tstr = strings.Replace(str, \"/\", \"_\", -1)\n\tstr = strings.Replace(str, \":\", \"_\", -1)\n\tstr = strings.Replace(str, \"`\", \"_\", -1)\n\tstr = strings.Replace(str, \"<\", \"_\", -1)\n\tstr = strings.Replace(str, \">\", \"_\", -1)\n\tstr = strings.Replace(str, \"?\", \"_\", -1)\n\tstr = strings.Replace(str, \"|\", \"_\", -1)\n\tstr = strings.Replace(str, \"#\", \"_\", -1)\n\tstr = strings.Replace(str, \"%\", \"_\", -1)\n\treturn str\n}", "func NormalizedName(s string) string {\n\treturn strings.Map(normalizedChar, s)\n}", "func Utf8ToEntity(entity string) string {\n\tif rune, size := utf8.DecodeRuneInString(entity); size != 0 {\n\t\treturn fmt.Sprintf(\"&#%d;\", rune)\n\t}\n\treturn entity\n}", "func Normalize(s string) string {\n\ts = reDeleteCriterion.ReplaceAllString(s, \"\")\n\treturn s\n}", "func ScrubWord(s string) string {\n\treturn strings.Map(func(r rune) rune {\n\t\tswitch {\n\t\tcase unicode.IsPunct(r):\n\t\t\treturn -1\n\t\tcase unicode.IsUpper(r):\n\t\t\treturn unicode.ToLower(r)\n\t\tdefault:\n\t\t\treturn r\n\t\t}\n\t}, s)\n}", "func normalize(s string) string {\n\treturn strings.Replace(s, \"_\", \"-\", -1)\n}", "func Decode(str string) string {\n\tfor _, s := range strings.Fields(str) {\n\t\tus := m.FindAllString(s, -1)\n\t\tfor i := 0; i < len(us); i++ {\n\t\t\tif isSurrogate(us[i]) {\n\t\t\t\te := convertToUTF16(us[i], us[i+1])\n\t\t\t\tstr = strings.Replace(str, us[i], e, 1)\n\t\t\t\tstr = strings.Replace(str, us[i+1], \"\", 1)\n\t\t\t\ti++\n\t\t\t} else {\n\t\t\t\te := html.UnescapeString(\"&#x\" + strings.ToLower(us[i][2:]) + \";\")\n\t\t\t\tstr = strings.Replace(str, us[i], e, 1)\n\t\t\t}\n\t\t}\n\t}\n\treturn str\n}", "func normalizeJSONStrings(s string) string {\n\treturn unescapeHTMLRegexp.ReplaceAllStringFunc(s, func(s string) string {\n\t\tswitch s {\n\t\tcase `\\u003c`:\n\t\t\treturn \"<\"\n\t\tcase `\\u003e`:\n\t\t\treturn \">\"\n\t\tcase `\\u0026`:\n\t\t\treturn \"&\"\n\t\tcase `\\u2028`:\n\t\t\treturn \"\\u2028\"\n\t\tcase `\\u2029`:\n\t\t\treturn \"\\u2029\"\n\t\t}\n\t\treturn s\n\t})\n}", "func iso88591ToUTF8(s []byte) []byte {\n\trunes := make([]rune, len(s))\n\tfor i := range s {\n\t\trunes[i] = rune(s[i])\n\t}\n\treturn []byte(string(runes))\n}", "func changeAccents(s string) string {\n\ts = stringReplace(s, \"&eacute;\", \"é\")\n\ts = stringReplace(s, \"&agrave;\", \"à\")\n\ts = stringReplace(s, \"&egrave;\", \"è\")\n\ts = stringReplace(s, \"&#39;\", \"'\")\n\ts = stringReplace(s, \"&#34;\", \"\\\"\")\n\treturn s\n}", "func ToLowerSpecial(c unicode.SpecialCase) MapFunc {\n\treturn func(s string) string { return strings.ToLowerSpecial(c, s) }\n}", "func (self Text) UnicodeString() string {\n\t/* We have to make the inverse conversion from UTF-32 to UTF-8 */\n\ts := C.sfText_getUnicodeString(self.Cref)\n\tlength := C._UnicodeStringSize(s)\n\tb := C.GoBytes(unsafe.Pointer(s), length*4)\n\tbuf := make([]byte, length*4)\n\tfor i := 0; i < len(b); i += 4 {\n\t\tn := uint(b[i]) + uint(b[i+1]) << 8 + uint(b[i+2]) << 16 + uint(b[i+3]) << 24\n\t\tutf8.EncodeRune(buf[i:], rune(n))\n\t}\n\t\t\n\treturn string(buf)\n}", "func main() {\n\tstrings := []string{\n\t\t\"cool\",\n\t\t\"güzel\",\n\t\t\"jīntiān\",\n\t\t\"今天\",\n\t\t\"read 🤓\",\n\t}\n\n\t_ = strings\n\n\t// Print the byte and rune length of the strings\n\t// Hint: Use len and utf8.RuneCountInString\n\tfor _, s := range strings {\n\t\tfmt.Printf(\"Byte length: %-3d, rune length: %-3d\\n\", len(s),\n\t\t\tutf8.RuneCountInString(s))\n\t}\n\n\t// Print the bytes of the strings in hexadecimal\n\t// Hint: Use % x verb\n\n\t// Print the runes of the strings in hexadecimal\n\t// Hint: Use % x verb\n\n\t// Print the runes of the strings as rune literals\n\t// Hint: Use for range\n\n\t// Print the first rune and its byte size of the strings\n\t// Hint: Use utf8.DecodeRuneInString\n\n\t// Print the last rune of the strings\n\t// Hint: Use utf8.DecodeLastRuneInString\n\n\t// Slice and print the first two runes of the strings\n\n\t// Slice and print the last two runes of the strings\n\n\t// Convert the string to []rune\n\t// Print the first and last two runes\n}", "func UTFLetter(str string) bool {\n\tfor _, v := range str {\n\t\tif !unicode.IsLetter(v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n\n}", "func normalizeStr(v string) string {\n\tv = strings.TrimSpace(v)\n\tv = regexp.MustCompile(`[^\\S\\r\\n]+`).ReplaceAllString(v, \" \")\n\tv = regexp.MustCompile(`[\\r\\n]+`).ReplaceAllString(v, \"\\n\")\n\n\treturn v\n}", "func Sanitize(s string) string {\n\treg := regexp.MustCompile(\"[^A-Za-z0-9]\")\n\treturn strings.ToLower(reg.ReplaceAllString(s, \"\"))\n}", "func tagUnkown(word string) string {\n\n\t// perform an N for loop checking for integer ascii value\n\tvar i int\n\tfor i = 0; i < len(word); i++ {\n\t\tif word[i] > 47 && word[i] < 58 {\n\t\t\treturn \"cd\"\n\t\t}\n\t}\n\n\tloWord := strings.ToLower(word)\n\n\tswitch {\n\tcase strings.HasSuffix(loWord, \"able\"):\n\t\treturn \"jj\"\n\tcase strings.HasSuffix(loWord, \"ible\"):\n\t\treturn \"jj\"\n\tcase strings.HasSuffix(loWord, \"ic\"):\n\t\treturn \"jj\"\n\tcase strings.HasSuffix(loWord, \"ous\"):\n\t\treturn \"jj\"\n\tcase strings.HasSuffix(loWord, \"al\"):\n\t\treturn \"jj\"\n\tcase strings.HasSuffix(loWord, \"ful\"):\n\t\treturn \"jj\"\n\tcase strings.HasSuffix(loWord, \"less\"):\n\t\treturn \"jj\"\n\tcase strings.HasSuffix(loWord, \"ly\"):\n\t\treturn \"rb\"\n\tcase strings.HasSuffix(loWord, \"ate\"):\n\t\treturn \"vb\"\n\tcase strings.HasSuffix(loWord, \"fy\"):\n\t\treturn \"vb\"\n\tcase strings.HasSuffix(loWord, \"ize\"):\n\t\treturn \"vb\"\n\t}\n\n\t// perform an N for loop checking for capital letter\n\tfor i = 0; i < len(word); i++ {\n\t\tif word[i] > 64 && word[i] < 91 {\n\t\t\treturn \"np\"\n\t\t}\n\t}\n\n\tswitch {\n\tcase strings.HasSuffix(loWord, \"ion\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"ess\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"ment\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"er\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"or\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"ist\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"ism\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"ship\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"hood\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"ology\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"ty\"):\n\t\treturn \"nn\"\n\tcase strings.HasSuffix(loWord, \"y\"):\n\t\treturn \"nn\"\n\tdefault:\n\t\treturn \"fw\"\n\t}\n}", "func SpecialCaseToTitle(special unicode.SpecialCase, r rune) rune", "func Normalize(dataArray []byte) string {\n\tdata := strings.ReplaceAll(string(dataArray), \"\\r\", \" \")\n\tdata = strings.ReplaceAll(data, \"\\n\", \" \")\n\tdata = strings.ReplaceAll(data, \"\\t\", \" \")\n\tfor strings.Index(data, \" \") >= 0 {\n\t\tdata = strings.ReplaceAll(data, \" \", \" \")\n\t}\n\treturn strings.TrimSpace(data)\n}", "func Decode(s string) string {\n\tfor _, v := range consonants {\n\t\ts = strings.Replace(strings.ToLower(s), fmt.Sprintf(\"%co%c\", v, v), string(v), -1)\n\t}\n\treturn s\n}", "func ProcessCode(code string) string {\n\tcode = strings.Replace(code, \" \", \"-\", -1)\n\treturn strings.ToLower(code)\n}", "func NormalizeLabel(label string) string {\n\n\t// Trivial case\n\tif len(label) == 0 {\n\t\treturn label\n\t}\n\n\t// Replace all non-alphanumeric runes with underscores\n\tlabel = strings.Map(sanitizeRune, label)\n\n\t// If label starts with a number, prepend with \"key_\"\n\tif unicode.IsDigit(rune(label[0])) {\n\t\tlabel = \"key_\" + label\n\t} else if strings.HasPrefix(label, \"_\") && !strings.HasPrefix(label, \"__\") && !featuregate.GetRegistry().IsEnabled(dropSanitizationGate.ID) {\n\t\tlabel = \"key\" + label\n\t}\n\n\treturn label\n}", "func playground() {\n\ts := \"Grüezi\"\n\tfmt.Println(s)\n\tfor i, r := range s {\n\t\tfmt.Printf(\"%d %s \", i, string(r))\n\t}\n\tfmt.Println()\n\tfmt.Println(string(65))\n}", "func cleanRune(r rune) rune {\n\tswitch {\n\tcase r >= 'a' && r <= 'z':\n\t\tfallthrough\n\tcase r >= '0' && r <= '9':\n\t\tfallthrough\n\tcase r == '-' || r == ' ':\n\t\treturn r\n\tcase r == '_':\n\t\treturn '-'\n\tcase r >= 'A' && r <= 'Z':\n\t\treturn r - ('A' - 'a')\n\tdefault:\n\t\treturn '\\000'\n\t}\n}", "func (f *Fuzzy) Fix(word string) string {\n\tcorrect := f.model.SpellCheck(word)\n\tre := regexp.MustCompile(`[^A-Za-z0-9]`)\n\tif correct != \"\" {\n\t\treturn correct\n\t}\n\treturn re.ReplaceAllString(word, \"\")\n}", "func filterBeforeMappingFromIPA(ss SymbolSet, trans string) (string, error) {\n\t// IPA: ˈba`ŋ.ka => ˈ`baŋ.ka\"\n\t// IPA: ˈɑ̀ː.pa => ˈ`ɑː.pa\n\ttrans = strings.Replace(trans, ipaAccentII+ipaLength, ipaLength+ipaAccentII, -1)\n\ts := ipaAccentI + \"(\" + ss.ipaPhonemeRe.String() + \"+)\" + ipaAccentII\n\trepl, err := regexp.Compile(s)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"couldn't compile regexp from string '%s' : %v\", s, err)\n\t}\n\tres := repl.ReplaceAllString(trans, ipaAccentI+ipaAccentII+\"$1\")\n\treturn res, nil\n}", "func UnicodeTranscode(scope *Scope, input tf.Output, input_encoding string, output_encoding string, optional ...UnicodeTranscodeAttr) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{\"input_encoding\": input_encoding, \"output_encoding\": output_encoding}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"UnicodeTranscode\",\n\t\tInput: []tf.Input{\n\t\t\tinput,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func UnicodeScript(scope *Scope, input tf.Output) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"UnicodeScript\",\n\t\tInput: []tf.Input{\n\t\t\tinput,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func ToTitle(r rune) rune", "func Normalize(input string) (result string, err error) {\n\treturn parser.Normalize(input)\n}", "func FilterLang(text, lang string) (new string) {\n\tfor _, value := range text {\n\t\tif unicode.IsLetter(value) || unicode.Is(unicode.Scripts[lang], value) {\n\t\t\tnew += string(value)\n\t\t}\n\t}\n\n\treturn\n}", "func correctEncodingToUtf8(text []byte) []byte {\n\tr, err := charset.NewReader(bytes.NewBuffer(text), \"application/xml\")\n\tif err != nil {\n\t\tfmt.Println(\"Error converting encoding:\", err)\n\t\treturn nil\n\t}\n\ttext, _ = ioutil.ReadAll(r)\n\treturn text\n}" ]
[ "0.7090795", "0.69785357", "0.6881222", "0.6424226", "0.63889015", "0.6321549", "0.6171422", "0.61526954", "0.61423665", "0.6102332", "0.6089447", "0.6084689", "0.6068356", "0.604763", "0.5991562", "0.59173185", "0.59055555", "0.58606905", "0.584883", "0.5733122", "0.5714558", "0.5713095", "0.5708541", "0.5702024", "0.5670002", "0.5662765", "0.5656597", "0.5655261", "0.56516623", "0.56365633", "0.56285805", "0.56167823", "0.5616333", "0.5609596", "0.5607015", "0.55751497", "0.5544231", "0.5533535", "0.5501727", "0.5413079", "0.5408677", "0.5407813", "0.5403875", "0.53982323", "0.53833073", "0.5367372", "0.536404", "0.5355371", "0.5354684", "0.5348123", "0.5347078", "0.5339973", "0.5310952", "0.5307727", "0.5306447", "0.5296745", "0.52906346", "0.5288743", "0.52662945", "0.526415", "0.5257662", "0.5246864", "0.52408373", "0.52039075", "0.51954633", "0.51885945", "0.518091", "0.51673347", "0.51616746", "0.5161013", "0.515815", "0.5157557", "0.5156193", "0.51442444", "0.51441807", "0.514161", "0.5130879", "0.51230335", "0.51177585", "0.5115213", "0.5115161", "0.51105773", "0.51091284", "0.51087326", "0.5107705", "0.510324", "0.50986373", "0.50966144", "0.50930136", "0.50762177", "0.5068428", "0.5065785", "0.50611615", "0.50595236", "0.50588185", "0.5049563", "0.5046798", "0.50457686", "0.5040902", "0.50312245" ]
0.6182329
6
Builds a person description, can return just person object or completed one with full list of profiles and faces. Flags: includeDetails: whether description will include faces and profiles (true), or not (false) includeFields: whether to include profiles meta data (true), or not (false).
func (dc *dta_controller) DescribePerson(aCtx auth.Context, pId string, includeDetails, includeMeta bool) (*PersonDesc, error) { pp, err := dc.Persister.GetPartitionTx("FAKE") if err != nil { return nil, err } //transaction err = pp.Begin() if err != nil { return nil, err } defer pp.Commit() person, err := pp.GetPersonById(pId) if err != nil { return nil, err } err = aCtx.AuthZCamAccess(person.CamId, auth.AUTHZ_LEVEL_OU) if err != nil { return nil, err } if !includeDetails { return &PersonDesc{Person: person}, nil } // collecting faces faces, err := pp.FindFaces(&model.FacesQuery{PersonIds: []string{pId}, Short: true}) if err != nil { return nil, err } prfArr := []int64{} if person.MatchGroup > 0 { prof2MGs, err := pp.GetProfilesByMGs([]int64{person.MatchGroup}) if err != nil { return nil, err } for pid, _ := range prof2MGs { prfArr = append(prfArr, pid) } } if person.ProfileId > 0 { prfArr = append(prfArr, person.ProfileId) } profs, err := pp.GetProfiles(&model.ProfileQuery{ProfileIds: prfArr, AllMeta: includeMeta}) if err != nil { return nil, err } profiles := make(map[int64]*model.Profile) for _, p := range profs { profiles[p.Id] = p } res := new(PersonDesc) res.Faces = faces res.Person = person res.Profiles = profiles return res, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetPersonCard(p Person) SubCard {\n\treturn SubCard{\n\t\tTitle: \"Name: \" + p.Name,\n\t\tSubTitle: \"Height: \" + p.Height,\n\t\tSubTitle2: \"Mass: \" + p.Mass + \" KG\",\n\t\tBody: \"Gender: \" + p.Gender,\n\t\tURL: \"/person?id=\" + lib.GetIDFromString(p.URL),\n\t}\n}", "func (u *User) describe() string { // se define como un prototype en la estructura\n description := fmt.Sprintf(\"Name: %s %s, Email: %s, ID: %d\", u.FirstName, u.LastName, u.Email, u.ID)\n return description \n}", "func GeneratePerson() *Person {\n\tlanguage := GenerateLanguage()\n\tfirstName := randomNoun(language)\n\tfirstPartOfLastName := randomVerb(language)\n\tsecondPartOfLastName := randomNoun(language)\n\n\treturn &Person{\n\t\tFirstName: strings.Title(firstName.Word),\n\t\tLastName: strings.Title(firstPartOfLastName.Word + secondPartOfLastName.Word),\n\t\tFirstNameMeaning: strings.Title(firstName.Meaning),\n\t\tLastNameMeaning: strings.Title(nounFromVerb(firstPartOfLastName.Meaning) + \" of \" + pluralizeNoun(secondPartOfLastName.Meaning)),\n\t}\n}", "func (*EducationRecord) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{10}\n}", "func (*PersonName_PersonStructuredName) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*PersonName) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{3}\n}", "func describeUser(u User) string {\n description := fmt.Sprintf(\"Name: %s %s, Email: %s, ID: %d\", u.FirstName, u.LastName, u.Email, u.ID)\n return description \n}", "func (*AdditionalContactInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{8}\n}", "func makeDescription(draconResult map[string]string, extras []string) string {\n\tdesc := \"This issue was automatically generated by the Dracon security pipeline.\\n\\n\" +\n\t\t\"*\" + draconResult[\"description\"] + \"*\" + \"\\n\\n\"\n\n\t// Append the extra fields to the description\n\tif len(extras) > 0 {\n\t\tdesc = desc + \"{code:}\" + \"\\n\"\n\t\tfor _, s := range extras {\n\t\t\tdesc = desc + fmt.Sprintf(\"%s: %*s\\n\", s, 25-len(s)+len(draconResult[s]), draconResult[s])\n\t\t}\n\t\tdesc = desc + \"{code}\" + \"\\n\"\n\t}\n\treturn desc\n}", "func composeProfile(p *golinkedin.Profile) *Profile {\n\tprof := &Profile{\n\t\tID: extractID(p.EntityUrn),\n\t\tUsername: p.PublicIdentifier,\n\t\tFirstName: p.FirstName,\n\t\tLastName: p.LastName,\n\t\tHeadline: p.Headline,\n\t\tAbout: p.Summary,\n\t\tPremium: p.Premium,\n\t\tInfluencer: p.Influencer,\n\t\tMemorialized: p.Memorialized,\n\t\tIsFullProfile: true,\n\t}\n\n\t// extract profile picture\n\tif p.ProfilePicture != nil {\n\t\tprof.ProfilePicture = composeImage(p.ProfilePicture.DisplayImageReference.VectorImage)\n\t}\n\n\t// extract background picture\n\tif p.BackgroundPicture != nil {\n\t\tif p.BackgroundPicture.DisplayImageReference != nil {\n\t\t\tprof.BackgroundPicture = &Image{\n\t\t\t\tURL: p.BackgroundPicture.DisplayImageReference.URL,\n\t\t\t}\n\t\t}\n\t}\n\n\t// extract location\n\tif p.GeoLocation != nil {\n\t\tprof.Location = composeLocationFromGeo(&p.GeoLocation.Geo)\n\t}\n\n\t// extract experience\n\tif p.ProfilePositionGroups != nil {\n\t\tfor _, post := range p.ProfilePositionGroups.Elements {\n\t\t\tprof.Experience = append(prof.Experience, *composePositionGroup(&post))\n\t\t}\n\t}\n\n\t// extract educations\n\tif p.ProfileEducations != nil {\n\t\tfor _, edu := range p.ProfileEducations.Elements {\n\t\t\tprof.Educations = append(prof.Educations, *composeEducation(&edu))\n\t\t}\n\t}\n\n\t// extract certifications\n\tif p.ProfileCertifications != nil {\n\t\tfor _, cert := range p.ProfileCertifications.Elements {\n\t\t\tprof.Certifications = append(prof.Certifications, *composeCertification(&cert))\n\t\t}\n\t}\n\n\t// extract skills\n\tif p.ProfileSkills != nil {\n\t\tfor _, skill := range p.ProfileSkills.Elements {\n\t\t\tprof.Skills = append(prof.Skills, *composeSkill(&skill))\n\t\t}\n\t}\n\n\treturn prof\n}", "func CreateDetails(names, points string) string {\n\treturn names + \" \" + points\n}", "func (*Publication) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{13}\n}", "func (p *Person) Info() string {\n return fmt.Sprintf(\"%v %v (%v)\", p.Firstname, p.Lastname, p.Phone)\n}", "func createDescriptionSection(stepData *config.StepData) string {\n\tlibraryName, binaryName := getNames(stepData.Metadata.Name)\n\n\tdescription := \"\"\n\tdescription += headlineDescription + stepData.Metadata.LongDescription + \"\\n\\n\"\n\tdescription += headlineUsage\n\tdescription += configRecommendation + \"\\n\\n\"\n\tdescription += `!!! tip \"\"` + \"\\n\\n\"\n\t// add Jenkins-specific information\n\tdescription += headlineJenkinsPipeline\n\tdescription += fmt.Sprintf(\"%v```groovy\\n\", spacingTabBox)\n\tdescription += fmt.Sprintf(\"%vlibrary('%s')\\n\\n\", spacingTabBox, libraryName)\n\tdescription += fmt.Sprintf(\"%v%v script: this\\n\", spacingTabBox, stepData.Metadata.Name)\n\tdescription += fmt.Sprintf(\"%v```\\n\\n\", spacingTabBox)\n\n\t// add Azure-specific information if activated\n\tif includeAzure {\n\t\tdescription += headlineAzure\n\t\tdescription += fmt.Sprintf(\"%v```\\n\", spacingTabBox)\n\t\tdescription += fmt.Sprintf(\"%vsteps:\\n\", spacingTabBox)\n\t\tdescription += fmt.Sprintf(\"%v - task: piper@1\\n\", spacingTabBox)\n\t\tdescription += fmt.Sprintf(\"%v name: %v\\n\", spacingTabBox, stepData.Metadata.Name)\n\t\tdescription += fmt.Sprintf(\"%v inputs:\\n\", spacingTabBox)\n\t\tdescription += fmt.Sprintf(\"%v stepName: %v\\n\", spacingTabBox, stepData.Metadata.Name)\n\t\tdescription += fmt.Sprintf(\"%v```\\n\\n\", spacingTabBox)\n\t}\n\n\t// add command line information\n\tdescription += headlineCommandLine\n\tdescription += fmt.Sprintf(\"%v```sh\\n\", spacingTabBox)\n\tdescription += fmt.Sprintf(\"%v%s %v\\n\", spacingTabBox, binaryName, stepData.Metadata.Name)\n\tdescription += fmt.Sprintf(\"%v```\\n\\n\", spacingTabBox)\n\n\tdescription += stepOutputs(stepData)\n\treturn description\n}", "func (*Profile) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{0}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_protos_face_recognition_service_proto_rawDescGZIP(), []int{4}\n}", "func (*Phone) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{6}\n}", "func (*AddPersonResponse) Descriptor() ([]byte, []int) {\n\treturn file_protos_face_recognition_service_proto_rawDescGZIP(), []int{1}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_protos_person_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{3}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_proto_person_person_proto_rawDescGZIP(), []int{0}\n}", "func Person() *PersonInfo { return person(globalFaker.Rand) }", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_person_person_proto_rawDescGZIP(), []int{1}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_tutorial_proto_rawDescGZIP(), []int{0}\n}", "func (*EmploymentRecord) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{9}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_proto_person_proto_rawDescGZIP(), []int{0}\n}", "func (f *Faker) Person() *PersonInfo { return person(f.Rand) }", "func populatePerson(scanner scanner) (*Person, error) {\n\tout := &Person{}\n\terr := scanner(&out.ID, &out.FullName, &out.Phone, &out.Currency, &out.Price)\n\treturn out, err\n}", "func (o LookupExperienceResultOutput) Description() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupExperienceResult) string { return v.Description }).(pulumi.StringOutput)\n}", "func (*FindPersonResponse) Descriptor() ([]byte, []int) {\n\treturn file_protos_face_recognition_service_proto_rawDescGZIP(), []int{3}\n}", "func (*Profile) Descriptor() ([]byte, []int) {\n\treturn file_odpf_meta_User_proto_rawDescGZIP(), []int{3}\n}", "func (*CMsgDOTARealtimeGameStatsTerse_BuildingDetails) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_common_proto_rawDescGZIP(), []int{28, 2}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_chargehive_chtype_contact_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateNewOrganizationResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{20}\n}", "func (s TestingSingleton) Description(desc string) TestingBuildParams {\n\treturn buildParamsFromDescription(s.provider, desc)\n}", "func NewPerson(vres *peopleviews.Person) *Person {\n\tvar res *Person\n\tswitch vres.View {\n\tcase \"default\", \"\":\n\t\tres = newPerson(vres.Projected)\n\tcase \"full\":\n\t\tres = newPersonFull(vres.Projected)\n\t}\n\treturn res\n}", "func (o NetworkProfileOutput) Description() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *NetworkProfile) pulumi.StringPtrOutput { return v.Description }).(pulumi.StringPtrOutput)\n}", "func CreatePerson() *Person {\n\tvar newPerson Person\n\tnewPerson.Name = \"mario\"\n\tnewPerson.Address = \"rome\"\n\tnewPerson.Phone = 349121454\n\treturn &newPerson\n}", "func (m *ConnectedOrganizationMembers) GetDescription()(*string) {\n return m.description\n}", "func (*AddPersonRequest) Descriptor() ([]byte, []int) {\n\treturn file_protos_face_recognition_service_proto_rawDescGZIP(), []int{0}\n}", "func (*ParticipantProfile) Descriptor() ([]byte, []int) {\n\treturn file_quiz_proto_rawDescGZIP(), []int{2}\n}", "func (o ProxyAuthOutput) Description() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ProxyAuth) *string { return v.Description }).(pulumi.StringPtrOutput)\n}", "func getBiography(age int, name string, status string) (string, string){\n\tageNow := strconv.Itoa(age)\n\n\treturn name + \" adalah seorang \"+ status,\n\t\t \"umurnya \"+ ageNow\n\n\n}", "func (p person) String() string {\n\treturn p.firstname + \" \" + p.lastname\n}", "func (*Profile) Descriptor() ([]byte, []int) {\n\treturn file_examplepb_example_proto_rawDescGZIP(), []int{7}\n}", "func ShowPerson(aPerson *Person) {\n\tfmt.Printf(\"Name: %s\\n\", aPerson.Name)\n\tfmt.Printf(\"Address: %s\\n\", aPerson.Address)\n\tfmt.Printf(\"Phone: %d\\n\", aPerson.Phone)\n}", "func CreatePersonName(firstName, lastName string) PersonName {\n\treturn PersonName{\n\t\tNameNumber: \"1.1\", // sabre example\n\t\tNameReference: \"ABC123\", // sabre example\n\t\tPassengerType: \"ADT\", // sabre example\n\t\tFirst: &GivenName{Val: firstName},\n\t\tLast: Surname{Val: lastName},\n\t}\n}", "func (*CMsgDOTARealtimeGameStats_BuildingDetails) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_common_proto_rawDescGZIP(), []int{27, 6}\n}", "func (builder *RoomBuilder) BuildPeople(p *PeopleI) {\n\t*p = builder.people\n}", "func (p ByName) Description() string { return p.description }", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_basic_basic_proto_rawDescGZIP(), []int{0}\n}", "func CreatePerson(w http.ResponseWriter, r *http.Request) {\n\tvar person model.Person\n\t/*\n\t\tTo print the response to string\n\t*/\n\tbodyBytes, _ := ioutil.ReadAll(r.Body)\n\tbodyString := string(bodyBytes)\n\tfmt.Println(bodyString)\n\n\t/*\n\t\tParse JSON object without struct\n\t*/\n\tm := map[string]interface{}{}\n\terr := json.Unmarshal(bodyBytes, &m)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(m)\n\tfmt.Println(m[\"firstname\"])\n\n\tjson.Unmarshal(bodyBytes, &person) // parse JSON to person struct object\n\tfmt.Println(person.Firstname)\n\tpeople = append(people, person)\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tjson.NewEncoder(w).Encode(people)\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{2}\n}", "func (*PersonalUri) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{7}\n}", "func (deity Deity) Describe() string {\n\tvar relationship string\n\n\tdescription := deity.Name\n\n\tif len(deity.Domains) > 0 {\n\t\tdescription += \" is the god\"\n\t\tif deity.Gender.Name == \"female\" {\n\t\t\tdescription += \"dess\"\n\t\t}\n\t\tdescription += \" of \" + words.CombinePhrases(deity.getDomainNames()) + \". \"\n\t} else {\n\t\tdescription += \" is a god\"\n\t\tif deity.Gender.Name == \"female\" {\n\t\t\tdescription += \"dess\"\n\t\t}\n\t\tdescription += \" of no particular domain. \"\n\t}\n\n\tdescription += words.Title(deity.Gender.SubjectPronoun) + \" is \" + deity.Appearance + \". \"\n\n\tdescription += words.Title(deity.Gender.SubjectPronoun) + \" is \" + words.CombinePhrases(deity.PersonalityTraits) + \". \"\n\n\tdescription += words.Title(deity.Gender.PossessivePronoun) + \" holy item is \" + words.Pronoun(deity.HolyItem) + \" \" + deity.HolyItem + \", and \"\n\tdescription += deity.Gender.PossessivePronoun + \" holy symbol is \" + words.Pronoun(deity.HolySymbol) + \" \" + deity.HolySymbol + \". \"\n\n\trelationships := []string{}\n\n\tif len(deity.Relationships) > 0 {\n\t\tfor _, r := range deity.Relationships {\n\t\t\trelationship = r.Descriptor + \" \" + r.Target\n\t\t\trelationships = append(relationships, relationship)\n\t\t}\n\n\t\tdescription += deity.Name + \" \" + words.CombinePhrases(relationships) + \".\"\n\t}\n\n\treturn description\n}", "func (p *Person) String() string {\n\tvar fields []string\n\n\tif p.FirstName != \"\" {\n\t\tfields = append(fields, p.FirstName)\n\t}\n\n\tif p.LastName != \"\" {\n\t\tfields = append(fields, p.LastName)\n\t}\n\n\tif p.FirstNameMeaning != \"\" {\n\t\tfields = append(fields, fmt.Sprintf(\"(%s, %s)\", p.FirstNameMeaning, p.LastNameMeaning))\n\t}\n\n\treturn strings.Join(fields, \" \")\n}", "func (p person) String() string {\n\treturn fmt.Sprintf(\"Object %s: %d\", p.Name, p.Age)\n}", "func (*Activity) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{12}\n}", "func (dc *dta_controller) DescribePersonsByProfile(aCtx auth.Context, prfId int64) ([]*PersonDesc, error) {\n\tpp, err := dc.Persister.GetPartitionTx(\"FAKE\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//transaction\n\terr = pp.Begin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer pp.Commit()\n\n\tpers, err := pp.FindPersons(&model.PersonsQuery{ProfileId: &prfId})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := make([]*PersonDesc, 0, len(pers))\n\tif len(pers) == 0 {\n\t\treturn res, nil\n\t}\n\n\terr = aCtx.AuthZCamAccess(pers[0].CamId, auth.AUTHZ_LEVEL_OU)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\n\t// Fake profile\n\tprf := &model.Profile{Id: prfId}\n\tpm := map[int64]*model.Profile{prfId: prf}\n\n\tprsIds := make([]string, len(pers))\n\tpersMap := make(map[string]*PersonDesc)\n\tfor i, p := range pers {\n\t\tprsIds[i] = p.Id\n\t\tpd := new(PersonDesc)\n\t\tpd.Faces = make([]*model.Face, 0, 1)\n\t\tpd.Person = p\n\t\tpd.Profiles = pm\n\t\tpersMap[p.Id] = pd\n\t\tres = append(res, pd)\n\t}\n\n\tfaces, err := pp.FindFaces(&model.FacesQuery{PersonIds: prsIds})\n\tif err != nil {\n\t\treturn res, err\n\t}\n\n\tfor _, f := range faces {\n\t\tpd, ok := persMap[f.PersonId]\n\t\tif ok {\n\t\t\tpd.Faces = append(pd.Faces, f)\n\t\t} else {\n\t\t\tdc.logger.Error(\"DescribePersonsByProfile(): very strange - there is no descriptor for personId=\", f.PersonId, \", but there is a face=\", f)\n\t\t}\n\t}\n\n\treturn res, nil\n}", "func NewProfile(uid int64, firstName, lastName NullString,\n\thighScore NullInt64, gender, img NullString,\n\tbirthDate NullTime) (*Profile, error) {\n\treturn &Profile{uid,\n\t\tfirstName,\n\t\tlastName,\n\t\thighScore,\n\t\tgender,\n\t\timg,\n\t\tbirthDate,\n\t\ttime.Now(),\n\t}, nil\n}", "func (*People) Descriptor() ([]byte, []int) {\n\treturn file_people_proto_rawDescGZIP(), []int{0}\n}", "func (*FindPersonRequest) Descriptor() ([]byte, []int) {\n\treturn file_protos_face_recognition_service_proto_rawDescGZIP(), []int{2}\n}", "func Description(w io.Writer, intro, description string) {\n\tfmt.Fprintf(w, \"%s:\\n\\t%s\\n\\n\", intro, Bold(description))\n}", "func (*Profile) Descriptor() ([]byte, []int) {\n\treturn file_service_account_proto_entities_entities_proto_rawDescGZIP(), []int{1}\n}", "func (o LookupSecureCredentialResultOutput) Description() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupSecureCredentialResult) string { return v.Description }).(pulumi.StringOutput)\n}", "func (*Profile) Descriptor() ([]byte, []int) {\n\treturn file_ims_proto_rawDescGZIP(), []int{4}\n}", "func generate(p *models.Project) string {\n\tvar builder strings.Builder\n\tbuilder.WriteString(fmt.Sprintf(\"# %s\\n\", p.Name))\n\tplugins := []Plugin{description}\n\tif len(p.Badges) > 0 {\n\t\tplugins = append(plugins, addBadges)\n\t\tfor _, plugin := range plugins {\n\t\t\tplugin(&builder, p)\n\t\t}\n\t}\n\tbuilder.WriteString(fmt.Sprintf(\"### Author\\n\"))\n\tbuilder.WriteString(fmt.Sprintf(\"%s\\n\", p.Author))\n\tbuilder.WriteString(fmt.Sprintf(\"### LICENCE\\n\"))\n\treturn builder.String()\n}", "func (*People) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{31}\n}", "func (*Patent) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{14}\n}", "func MakePerson() Person {\n\t// Generate ID\n\tid := strings.Builder{}\n\tidLength := 5\n\n\tr := rand.New(\n\t\trand.NewSource(time.Now().UnixNano()),\n\t)\n\n\tcharset := strings.Join(\n\t\t[]string{\n\t\t\t\"abcdefghijklmnopqrstuvwxyz\",\n\t\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\",\n\t\t\t\"1234567890\",\n\t\t},\n\t\t\"\",\n\t)\n\n\tfor i := 0; i < idLength; i++ {\n\t\tid.WriteByte(charset[r.Intn(len(charset))])\n\t}\n\n\t// Generate Name\n\trand.Seed(time.Now().UnixNano())\n\tindex := rand.Intn(len(names))\n\n\t// Offset from initial location\n\tcoordinates := Coordinates{\n\t\tLatitude: initialLocation.Latitude + (utils.RandomNoise() * 15),\n\t\tLongitude: initialLocation.Longitude + (utils.RandomNoise() * 15),\n\t}\n\n\t// Return the person\n\treturn Person{\n\t\tID: id.String(),\n\t\tName: names[index],\n\t\tCoordinates: coordinates,\n\t}\n}", "func NewPerson(opts ...PersonOption) *Person {\n\tp := &Person{}\n\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\n\treturn p\n}", "func (a API) Profile(client *http.Client, user_id string, fields Fields) (j map[string]interface{}, err error) {\r\n\treturn a.request(client, \"profile\", map[string]string{\r\n\t\t\"id\": getUserIdString(user_id),\r\n\t\t\"fields\": fields.Encode(),\r\n\t}, nil)\r\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_view_grpc_blog_api_proto_rawDescGZIP(), []int{3}\n}", "func (*PersonExercise) Descriptor() ([]byte, []int) {\n\treturn file_protos_person_proto_rawDescGZIP(), []int{4}\n}", "func (*GetUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{7}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_protomessage_proto_rawDescGZIP(), []int{0}\n}", "func composeMiniProfile(m *golinkedin.MiniProfile) *Profile {\n\tprof := &Profile{\n\t\tID: extractID(m.EntityUrn),\n\t\tUsername: m.PublicIdentifier,\n\t\tFirstName: m.FirstName,\n\t\tLastName: m.LastName,\n\t\tHeadline: m.Occupation,\n\t\tIsMiniProfile: true,\n\t}\n\n\t// extract background picture\n\tif m.BackgroundImage != nil {\n\t\tprof.BackgroundPicture = composeImage(&m.BackgroundImage.COMLinkedinCommonVectorImage)\n\t}\n\n\t// extract profile picture\n\tif m.Picture != nil {\n\t\tprof.ProfilePicture = composeImage(m.Picture.COMLinkedinCommonVectorImage)\n\t}\n\n\treturn prof\n}", "func (o RealmOutput) Description() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Realm) pulumi.StringPtrOutput { return v.Description }).(pulumi.StringPtrOutput)\n}", "func NewProfile(info profileutil.ProvisioningProfileInfoModel, content []byte) autocodesign.Profile {\n\treturn Profile{\n\t\tattributes: appstoreconnect.ProfileAttributes{\n\t\t\tName: info.Name,\n\t\t\tUUID: info.UUID,\n\t\t\tProfileContent: content,\n\t\t\tPlatform: getBundleIDPlatform(info.Type),\n\t\t\tExpirationDate: time.Time(info.ExpirationDate),\n\t\t},\n\t\tid: \"\", // only in case of Developer Portal Profiles\n\t\tbundleID: info.BundleID,\n\t\tcertificateIDs: nil, // only in case of Developer Portal Profiles\n\t\tdeviceIDs: nil, // only in case of Developer Portal Profiles\n\t}\n}", "func (repository *Repository) Create(\n\tprofileID string,\n\taccountID string,\n\temail string,\n\tgender string,\n\tfileID string,\n\tinterestedField string,\n\tinterestedFieldDetail []string,\n) (entity.Profile, error) {\n\tprofileEntity := entity.Profile{\n\t\tID: profileID,\n\t\tAccountID: accountID,\n\t\tEmail: email,\n\t\tGender: gender,\n\t\tFileID: fileID,\n\t\tInterestedField: interestedField,\n\t\tInterestedFieldDetail: interestedFieldDetail,\n\t\tCreatedAt: time.Now(),\n\t\tUpdatedAt: time.Now(),\n\t}\n\tinsertResult, err := repository.mongo.InsertOne(\n\t\tcontext.TODO(),\n\t\tprofileEntity,\n\t)\n\tif err != nil || insertResult == nil {\n\t\tpanic(err)\n\t}\n\trepository.setCache(profileID, &profileEntity)\n\treturn profileEntity, nil\n}", "func (ulw Wrapper) Description() string {\n\tvar count int\n\tfor row := range ulw.ul.Results {\n\t\tif ulw.ul.Results[row].Username != \"\" {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"Activity by Username (processlist) %d rows\", count)\n}", "func (*CreateOrganizationResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_auth_proto_rawDescGZIP(), []int{7}\n}", "func (*Email) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{5}\n}", "func (cli *Client) PeopleProfile(id string, fields []string) (r map[string]interface{}, e error) {\n\tvar opt map[string]interface{}\n\n\tif len(fields) > 0 {\n\t\topt = map[string]interface{}{\n\t\t\t\"fields\": fields,\n\t\t}\n\t}\n\n\tr, e = cli.call(\"GET\", \"people\", id, \"\", opt)\n\n\treturn r, e\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{8}\n}", "func GenerateDescription(region *string) string {\n\treturn fmt.Sprintf(\"Bigshot lambda worker in %s\", *region)\n}", "func (o LookupServicePerimeterResultOutput) Description() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupServicePerimeterResult) string { return v.Description }).(pulumi.StringOutput)\n}", "func NewDescription(moniker, identity, website, details string) Description {\n\treturn Description{\n\t\tMoniker: moniker,\n\t\tIdentity: identity,\n\t\tWebsite: website,\n\t\tDetails: details,\n\t}\n}", "func (*CreateNewOrganizationRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{19}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{3}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_address_book_addressbook_proto_rawDescGZIP(), []int{1}\n}", "func (l *LegalEntity) AppendDetails(values *RequestValues) {\n\tif len(l.Type) > 0 {\n\t\tvalues.Add(\"legal_entity[type]\", string(l.Type))\n\t}\n\n\tif len(l.BusinessName) > 0 {\n\t\tvalues.Add(\"legal_entity[business_name]\", l.BusinessName)\n\t}\n\n\tif len(l.BusinessNameKana) > 0 {\n\t\tvalues.Add(\"legal_entity[business_name_kana]\", l.BusinessNameKana)\n\t}\n\n\tif len(l.BusinessNameKanji) > 0 {\n\t\tvalues.Add(\"legal_entity[business_name_kanji]\", l.BusinessNameKanji)\n\t}\n\n\tif len(l.First) > 0 {\n\t\tvalues.Add(\"legal_entity[first_name]\", l.First)\n\t}\n\n\tif len(l.FirstKana) > 0 {\n\t\tvalues.Add(\"legal_entity[first_name_kana]\", l.FirstKana)\n\t}\n\n\tif len(l.FirstKanji) > 0 {\n\t\tvalues.Add(\"legal_entity[first_name_kanji]\", l.FirstKanji)\n\t}\n\n\tif len(l.Gender) > 0 {\n\t\tvalues.Add(\"legal_entity[gender]\", string(l.Gender))\n\t}\n\n\tif len(l.Last) > 0 {\n\t\tvalues.Add(\"legal_entity[last_name]\", l.Last)\n\t}\n\n\tif len(l.LastKana) > 0 {\n\t\tvalues.Add(\"legal_entity[last_name_kana]\", l.LastKana)\n\t}\n\n\tif len(l.LastKanji) > 0 {\n\t\tvalues.Add(\"legal_entity[last_name_kanji]\", l.LastKanji)\n\t}\n\n\tif len(l.MaidenName) > 0 {\n\t\tvalues.Add(\"legal_entity[maiden_name]\", l.MaidenName)\n\t}\n\n\tif l.DOB.Day > 0 {\n\t\tvalues.Add(\"legal_entity[dob][day]\", strconv.Itoa(l.DOB.Day))\n\t}\n\n\tif l.DOB.Month > 0 {\n\t\tvalues.Add(\"legal_entity[dob][month]\", strconv.Itoa(l.DOB.Month))\n\t}\n\n\tif l.DOB.Year > 0 {\n\t\tvalues.Add(\"legal_entity[dob][year]\", strconv.Itoa(l.DOB.Year))\n\t}\n\n\tif len(l.SSN) > 0 {\n\t\tvalues.Add(\"legal_entity[ssn_last_4]\", l.SSN)\n\t}\n\n\tif len(l.PersonalID) > 0 {\n\t\tvalues.Add(\"legal_entity[personal_id_number]\", l.PersonalID)\n\t}\n\n\tif len(l.PhoneNumber) > 0 {\n\t\tvalues.Add(\"legal_entity[phone_number]\", l.PhoneNumber)\n\t}\n\n\tif len(l.BusinessTaxID) > 0 {\n\t\tvalues.Add(\"legal_entity[business_tax_id]\", l.BusinessTaxID)\n\t}\n\n\tif len(l.BusinessVatID) > 0 {\n\t\tvalues.Add(\"legal_entity[business_vat_id]\", l.BusinessVatID)\n\t}\n\n\tif l.Verification.Document != nil {\n\t\tvalues.Add(\"legal_entity[verification][document]\", l.Verification.Document.ID)\n\t}\n\n\tl.Address.AppendDetails(values, \"legal_entity[address]\")\n\tl.AddressKana.AppendDetails(values, \"legal_entity[address_kana]\")\n\tl.AddressKanji.AppendDetails(values, \"legal_entity[address_kanji]\")\n\n\tl.PersonalAddress.AppendDetails(values, \"legal_entity[personal_address]\")\n\tl.PersonalAddressKana.AppendDetails(values, \"legal_entity[personal_address_kana]\")\n\tl.PersonalAddressKanji.AppendDetails(values, \"legal_entity[personal_address_kanji]\")\n\n\tfor i, owner := range l.AdditionalOwners {\n\t\tif len(owner.First) > 0 {\n\t\t\tvalues.Add(fmt.Sprintf(\"legal_entity[additional_owners][%v][first_name]\", i), owner.First)\n\t\t}\n\n\t\tif len(owner.Last) > 0 {\n\t\t\tvalues.Add(fmt.Sprintf(\"legal_entity[additional_owners][%v][last_name]\", i), owner.Last)\n\t\t}\n\n\t\tif owner.DOB.Day > 0 {\n\t\t\tvalues.Add(fmt.Sprintf(\"legal_entity[additional_owners][%v][dob][day]\", i), strconv.Itoa(owner.DOB.Day))\n\t\t}\n\n\t\tif owner.DOB.Month > 0 {\n\t\t\tvalues.Add(fmt.Sprintf(\"legal_entity[additional_owners][%v][dob][month]\", i), strconv.Itoa(owner.DOB.Month))\n\t\t}\n\n\t\tif owner.DOB.Year > 0 {\n\t\t\tvalues.Add(fmt.Sprintf(\"legal_entity[additional_owners][%v][dob][year]\", i), strconv.Itoa(owner.DOB.Year))\n\t\t}\n\n\t\towner.Address.AppendDetails(values, fmt.Sprintf(\"legal_entity[additional_owners][%v][address]\", i))\n\t}\n}", "func (e *DriedEar) Description() {\n\tfmt.Printf(\"Hi, i'm a size %d Pork Ear\\n\", e.size)\n}", "func (s *PersonFieldsService) Create(ctx context.Context, opt *PersonFieldCreateOptions) (*ProductFieldResponse, *Response, error) {\n\treq, err := s.client.NewRequest(http.MethodPost, \"/personFields\", nil, opt)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar record *ProductFieldResponse\n\n\tresp, err := s.client.Do(ctx, req, &record)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn record, resp, nil\n}", "func (o TargetProjectOutput) Description() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *TargetProject) pulumi.StringOutput { return v.Description }).(pulumi.StringOutput)\n}", "func (d *Details) Description() string {\n\treturn d.Stringv(\"description\")\n}", "func (*Degree) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_talent_v4beta1_profile_proto_rawDescGZIP(), []int{11}\n}", "func newPersonFull(vres *peopleviews.PersonView) *Person {\n\tres := &Person{\n\t\tID: vres.ID,\n\t\tName: vres.Name,\n\t\tMemo: vres.Memo,\n\t}\n\treturn res\n}", "func (p Person) String() string {\n return \"(name:\" + p.name + \" - age: \" + strconv.Itoa(p.age)+\"years)\"\n }", "func (o LookupOrganizationResultOutput) Description() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupOrganizationResult) string { return v.Description }).(pulumi.StringOutput)\n}" ]
[ "0.53405565", "0.53214055", "0.53019834", "0.52893215", "0.5285766", "0.5262536", "0.5256847", "0.5167916", "0.51579344", "0.5156347", "0.510197", "0.5032917", "0.49925327", "0.4950455", "0.487419", "0.48429468", "0.4841958", "0.48322546", "0.48281252", "0.48076427", "0.4798168", "0.47900638", "0.47885796", "0.47881287", "0.47798696", "0.47668898", "0.4754021", "0.47393653", "0.4710277", "0.47102532", "0.4700091", "0.4687356", "0.46540505", "0.46476805", "0.4642768", "0.4634902", "0.46332243", "0.46230882", "0.46069878", "0.46028998", "0.45899943", "0.45890966", "0.45782036", "0.45780474", "0.45762706", "0.4563649", "0.45493668", "0.45467952", "0.45421326", "0.45416752", "0.45389402", "0.45329368", "0.45315927", "0.4527037", "0.45265096", "0.45244375", "0.45183715", "0.4515896", "0.45149586", "0.45123756", "0.4510964", "0.45096704", "0.45042026", "0.44952917", "0.44915465", "0.44899526", "0.44875124", "0.4484829", "0.44745836", "0.4470075", "0.44677958", "0.44675595", "0.44636962", "0.4460426", "0.44552773", "0.4450535", "0.44466272", "0.4446313", "0.44450513", "0.44439492", "0.44380382", "0.44362912", "0.44343603", "0.4429636", "0.44288903", "0.44229344", "0.44222987", "0.44209877", "0.44148633", "0.4413178", "0.44100434", "0.44077393", "0.4405444", "0.44026786", "0.4401485", "0.4399383", "0.43956244", "0.43953928", "0.43940237", "0.43927112" ]
0.63413006
0
get all persons associated with the profile, persons will contain only person data and faces
func (dc *dta_controller) DescribePersonsByProfile(aCtx auth.Context, prfId int64) ([]*PersonDesc, error) { pp, err := dc.Persister.GetPartitionTx("FAKE") if err != nil { return nil, err } //transaction err = pp.Begin() if err != nil { return nil, err } defer pp.Commit() pers, err := pp.FindPersons(&model.PersonsQuery{ProfileId: &prfId}) if err != nil { return nil, err } res := make([]*PersonDesc, 0, len(pers)) if len(pers) == 0 { return res, nil } err = aCtx.AuthZCamAccess(pers[0].CamId, auth.AUTHZ_LEVEL_OU) if err != nil { return res, err } // Fake profile prf := &model.Profile{Id: prfId} pm := map[int64]*model.Profile{prfId: prf} prsIds := make([]string, len(pers)) persMap := make(map[string]*PersonDesc) for i, p := range pers { prsIds[i] = p.Id pd := new(PersonDesc) pd.Faces = make([]*model.Face, 0, 1) pd.Person = p pd.Profiles = pm persMap[p.Id] = pd res = append(res, pd) } faces, err := pp.FindFaces(&model.FacesQuery{PersonIds: prsIds}) if err != nil { return res, err } for _, f := range faces { pd, ok := persMap[f.PersonId] if ok { pd.Faces = append(pd.Faces, f) } else { dc.logger.Error("DescribePersonsByProfile(): very strange - there is no descriptor for personId=", f.PersonId, ", but there is a face=", f) } } return res, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cli *Client) PeopleProfile(id string, fields []string) (r map[string]interface{}, e error) {\n\tvar opt map[string]interface{}\n\n\tif len(fields) > 0 {\n\t\topt = map[string]interface{}{\n\t\t\t\"fields\": fields,\n\t\t}\n\t}\n\n\tr, e = cli.call(\"GET\", \"people\", id, \"\", opt)\n\n\treturn r, e\n}", "func (m *User) GetPeople()([]Personable) {\n return m.people\n}", "func getSpecificPersons(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Println(\"Get Specific HIT\")\n\tparams := mux.Vars(r)\n\tresult, err := db.Query(\"SELECT pAge,pName FROM Persons WHERE pAge >= ?\", params[\"age\"])\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer result.Close()\n\tvar pers []Person\n\tfor result.Next() {\n\t\tvar per Person\n\t\terr := result.Scan(&per.Age, &per.Name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tpers = append(pers, per)\n\t}\n\tjson.NewEncoder(w).Encode(pers)\n}", "func GetPeople() models.People { return people }", "func GetPeople(dbConnection *gorm.DB, username string) []model.Person {\n\tvar persons []model.Person\n\tquery := dbConnection.Preload(\"Gists.Files\").Table(\"people\")\n\tif username != \"\" {\n\t\tquery = query.Where(\"github_username = ?\", username)\n\t}\n\tquery.Find(&persons)\n\treturn persons\n}", "func GetAllPerson(c *gin.Context) {\n\tperson, _ := models.LoadPeople()\n\tc.JSON(http.StatusOK, person)\n\n}", "func GetPersons(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar Persons []db.Person\n\trows, err := Db.Query(\"SELECT * FROM people\")\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Error in fetching all Persons %s\", err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfor rows.Next() {\n\t\tvar r db.Person\n\t\terr := rows.Scan(&r.Id, &r.Name, &r.City, &r.ContactNo, &r.PhotoUrl)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Error in fetching all Persons %s\", err.Error()), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tPersons = append(Persons, r)\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(Persons)\n}", "func GetPersons(w http.ResponseWriter, r *http.Request) {\n\tsession := utils.GetMongoSession()\n\tdefer session.Close()\n\n\tsession.SetMode(mgo.Monotonic, true)\n\n\tvar errDB error\n\tc := session.DB(\"test_db\").C(\"persons\")\n\n\tresults := make([]interface{}, 0)\n\terrDB = c.Find(bson.M{}).Select(bson.M{\"_id\": 0}).Sort(\"-name\").All(&results)\n\n\tif errDB != nil {\n\t\tpanic(errDB)\n\t}\n\n\tutils.SendJSONResponse(w, 0, \"Success\", results)\n}", "func GetAll() *[]Person {\n\treturn &persons\n}", "func GetPerson(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t//PopulateInitialData()\n\tparams := mux.Vars(r)\n\tfor _, item := range people {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tjson.NewEncoder(os.Stdout).Encode(item)\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(&Person{})\n}", "func GetPersonRelations(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tid := r.URL.Query().Get(\"id\")\n\tif id == \"\" {\n\t\thttp.Error(w, \"id parameter is not found\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tvar relations []db.Relation\n\trows, err := Db.Query(\"SELECT * FROM relations where p1=?\", id)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Error in fetching all Persons %s\", err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfor rows.Next() {\n\t\tvar r db.Relation\n\t\terr := rows.Scan(&r.P1, &r.P2, &r.Name)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Error in fetching all Persons %s\", err.Error()), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\trelations = append(relations, r)\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(relations)\n}", "func GetPeople() []Person {\n\n\tsession := getSession()\n\tdefer session.Close()\n\n\tconnect := session.DB(\"people\").C(\"People\")\n\n\tvar persons []Person\n\n\t// TODO: Fix your problem here, why did you not check if there was an error\n\t// and why did you not log it.\n\terr := connect.Find(bson.M{}).All(&persons)\n\n\tfmt.Println(\"All :\", &persons)\n\n\treturn persons\n}", "func GetPeopleByPage(w http.ResponseWriter, r *http.Request) {\n\tmyDb, err := db.StartDB(\"mydb.db\")\n\tif err != nil {\n\t\tfmt.Printf(\"Fail in open database: %v\\n\", err)\n\t\treturn\n\t}\n\n\t// Verify token\n\ttoken := r.Header.Get(\"AuthToken\")\n\tif (!myDb.IsLogIn([]byte(token))) {\n\t\tfmt.Printf(\"Unauthorized: %v\\n\", err)\n\t\t// 401: Unauthorized\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\t\n\t// Get people by page\n\tr.ParseForm()\n\tpage, err := strconv.Atoi(r.Form[\"page\"][0])\n\n\tdata := myDb.SearchByPage(\"people\", page)\n\tif data != nil {\n\t\tfmt.Printf(\"Read body error: %v\\n\", err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n}", "func getPeople(w http.ResponseWriter, r *http.Request) {\r\n\tw.Header().Set(\"Content-Type\", \"application/json\")\r\n\r\n\tvar people []models.Person\r\n\tcollection := models.ConnectDB()\r\n\tcursor, err := collection.Find(context.TODO(), bson.M{})\r\n\tif err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n\tdefer cursor.Close(context.TODO())\r\n\tfor cursor.Next(context.TODO()) {\r\n\t\tvar person models.Person\r\n\t\tcursor.Decode(&person)\r\n\t\tpeople = append(people, person)\r\n\t}\r\n\tif err := cursor.Err(); err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n\tjson.NewEncoder(w).Encode(people)\r\n}", "func getPerson(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Println(\"GET HIT\")\n\tvar persons []Person\n\tresult, err := db.Query(\"SELECT * FROM Persons\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer result.Close()\n\tfor result.Next() {\n\t\tvar person Person\n\t\terr := result.Scan(&person.Age, &person.Name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tpersons = append(persons, person)\n\t}\n\tfmt.Println(\"Response from db\", persons)\n\tjson.NewEncoder(w).Encode(persons)\n}", "func (ps *PersonService) List(ids []int, opts ...FuncOption) ([]*Person, error) {\n\turl, err := ps.client.multiURL(PersonEndpoint, ids, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar p []*Person\n\n\terr = ps.client.get(url, &p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}", "func (m MariaDB) All(ctx context.Context) ([]entity.PersonalData, error) {\n\tsqlQuery := fmt.Sprintf(\"SELECT * FROM person\")\n\tvar p personalData\n\tvar persons []entity.PersonalData\n\trows, err := m.Person.QueryContext(ctx, sqlQuery)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not make query\")\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr = rows.Scan(&p.ID, &p.Name, &p.LastName, &p.Phone, &p.Email, &p.YearOfBirth)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"could not scan rows\")\n\t\t}\n\t\tpersons = append(persons, p.transmit())\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"rows error\")\n\t}\n\treturn persons, nil\n}", "func GetPersons(db *sql.DB) []Person {\n\treturn []Person{}\n}", "func (rs *PersonResultSet) All() ([]*Person, error) {\n\tvar result []*Person\n\tfor rs.Next() {\n\t\trecord, err := rs.Get()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, record)\n\t}\n\treturn result, nil\n}", "func getPerson(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t// fmt.Fprintf(w, \"hello, %s!\\n\", ps.ByName(\"name\"))\n\tpersonID := ps.ByName(\"id\")\n\tfor _, item := range people {\n\t\tif item.ID == personID {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Fprintf(w, \"<h1>No DATA</h1>\")\n}", "func GetPeople() []Person {\n\treturn all()\n}", "func AllPeople() []*Person {\n\treturn people\n}", "func (ps *PersonService) Persons(query string) ([]*Person, error) {\n\tvar pp = make([]*Person, 0, 0)\n\tif err := ps.SQL(query).QueryStructs(&pp); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pp, nil\n}", "func (u User) Profiles(t database.Transaction) []goth.User {\n\tif u.IsAnonymous() {\n\t\treturn nil\n\t}\n\tvar profileSlice []goth.User\n\tprofiles := make(map[string]map[string]goth.User)\n\terr := t.Read(u.ID()+\"/profiles\", &profiles)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor provider := range profiles {\n\t\tfor account := range profiles[provider] {\n\t\t\tprofileSlice = append(profileSlice, profiles[provider][account])\n\t\t}\n\t}\n\treturn profileSlice\n}", "func (a *api) h_POST_persons_persId_profiles(c *gin.Context) {\n\tpersId := c.Param(\"persId\")\n\ta.logger.Info(\"POST /persons/\", persId, \"/profiles\")\n\n\tvar p Profile\n\tif a.errorResponse(c, bindAppJson(c, &p)) {\n\t\treturn\n\t}\n\n\tprf, err := a.profile2mprofile(&p)\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\taCtx := a.getAuthContext(c)\n\tif a.errorResponse(c, aCtx.AuthZHasOrgLevel(prf.OrgId, auth.AUTHZ_LEVEL_OU)) {\n\t\treturn\n\t}\n\n\tpid, err := a.Dc.InsertProfile(prf)\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\tdesc, err := a.Dc.DescribePerson(aCtx, persId, false, false)\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\tdesc.Person.ProfileId = pid\n\tif a.errorResponse(c, a.Dc.UpdatePerson(desc.Person)) {\n\t\treturn\n\t}\n\tc.Status(http.StatusNoContent)\n}", "func GetPerson(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tfor _, item := range people {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tjson.NewEncoder(w).Encode(&model.Person{})\n\t//r = mux.Vars(r)\n}", "func GetPeople(db *gorm.DB) func(c echo.Context) error {\n return func(c echo.Context) error {\n // get user\n if user, err := GetUser(c, db); err == nil {\n people := []models.User{}\n // get search key\n key := c.QueryParam(\"key\")\n if len(key) > 0 {\n key := \"%\" + key + \"%\"\n // search for other people but user\n db.Where(\"user_name LIKE ? OR email LIKE ? OR first_name LIKE ? OR last_name LIKE ?\", key, key, key, key).\n Not(\"id = ?\", user.ID).\n Find(&people)\n }\n return c.JSON(http.StatusOK, people)\n } else {\n return c.JSON(http.StatusBadRequest, map[string]string{\"message\": err.Error()})\n }\n }\n}", "func GetPeople(w http.ResponseWriter, r *http.Request) {\n\tvar people []model.Person\n\n\tvar db = database.DB()\n\n\tif err := db.Find(&people).Error; err != nil {\n\t\tfmt.Printf(\"can not get all people from db: %v\\n\", err)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(errors.ErrorMsg{\"can not get all people from db\"})\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(&people)\n}", "func getPerson(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(r)\n\tfor _, item := range people {\n\t\tif item.Socialsecurity == params[\"socialsecurity\"] {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(people)\n}", "func GetPerson(w http.ResponseWriter, r *http.Request) error {\n\tparams := mux.Vars(r)\n\tfor _, item := range people {\n\t\tif item.ID == params[\"id\"] {\n\t\t\treturn writeJSON(w, http.StatusOK, item)\n\t\t}\n\t}\n\treturn nil\n}", "func (p *PersonServer) ListPerson(query *higrpc.PersonQuery, stream higrpc.PersonRoute_ListPersonServer) error {\n\tfmt.Println(\"PersonServer::ListPerson is called. id :\", query.Id, \", name :\", query.Name)\n\n\t// ignore query fields, only send hard coded responses\n\tfor i := 0; i < 5; i++ {\n\t\t_ = stream.Send(&higrpc.PersonResponse{\n\t\t\tId: int64(i),\n\t\t\tName: \"Person\" + strconv.Itoa(i),\n\t\t\tAge: int32(i),\n\t\t})\n\t}\n\treturn nil\n}", "func getAll(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\n\t// sending query over db object and storing respose in var result\n\tresult, err := db.Query(\"SELECT fname, lname, email, pword, id FROM person\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer result.Close()\n\n\t// to fetch one record at a time from result\n\tfor result.Next() {\n\n\t\t// creating a variable person to store the and then show it\n\t\tvar person Person\n\t\terr := result.Scan(&person.Fname, &person.Lname, &person.Email, &person.Pword, &person.Id)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tpeople = append(people, person)\n\t}\n\t// Encode json to be sent to client machine\n\tjson.NewEncoder(w).Encode(people)\n}", "func (s *StorageMemory) GetPeople() ([]Person, error) {\n\treturn s.People, nil\n}", "func GetPerson(writer http.ResponseWriter, requets *http.Request, ps httprouter.Params) {\n\tid := ps.ByName(\"id\")\n\tfor _, item := range people {\n\t\tif item.ID == id { // If this condition teue than it returns item\n\t\t\tjson.NewEncoder(writer).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(writer).Encode(&Person{})\n}", "func (p *PeopleAPI) Get(textFilter string) ([]types.Person, error) {\n\treturn p.db.FindPeople(textFilter)\n}", "func (ps *PersonService) Search(qry string, opts ...FuncOption) ([]*Person, error) {\n\turl, err := ps.client.searchURL(PersonEndpoint, qry, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar p []*Person\n\n\terr = ps.client.get(url, &p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}", "func (c *Client) GetPersons(\n\tpersonGroupId string,\n) (processResult []cognitive.FaceGetPersonsResult, err error) {\n\treturn cognitive.FaceGetPersons(\n\t\tc.Location,\n\t\tc.ApiKey,\n\t\tpersonGroupId,\n\t)\n}", "func GetPerson(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(r)\n\tid, err := strconv.Atoi(params[\"id\"])\n\tif err != nil {\n\t\tfmt.Println(\"Oops, \", err)\n\t}\n\tfor _, searchID := range models.People {\n\t\tif searchID.ID == id {\n\t\t\tjson.NewEncoder(w).Encode(searchID)\n\t\t}\n\t}\n}", "func GetPersonEndpoint(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tfor _, item := range people {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(&Person{})\n}", "func (repository *Datastore)GetProfile(username string)(*user.Person,error){\n\tperson := newUser() //initialize user.Person and will used to store profile info\n\tquery := `SELECT * FROM userRepository WHERE username = ?`\n\terr := repository.Db.Get(&person, query, username) //get person profile details\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &person, nil\n}", "func (o *User) GetPeople() []MicrosoftGraphPerson {\n\tif o == nil || o.People == nil {\n\t\tvar ret []MicrosoftGraphPerson\n\t\treturn ret\n\t}\n\treturn *o.People\n}", "func People(engine *gin.Engine, midlewares ...gin.HandlerFunc) {\n\tpersonGroup := engine.Group(\"rs/crud/person\")\n\n\tpersonGroup.GET(\"/:id\", controllers.GetPerson)\n\tpersonGroup.GET(\"/\", controllers.GetPagePerson)\n\tpersonGroup.PUT(\"/:id\", controllers.PutPerson)\n\tpersonGroup.DELETE(\"/:id\", controllers.DeletePerson)\n}", "func GetPeopleEndpoint(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tvar people []Person\n\tcollection := client.Database(\"villagepeople\").Collection(\"people\")\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\n\tcursor, err := collection.Find(ctx, bson.M{})\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{ \"message\": \"` + err.Error() + `\" }`))\n\t\treturn\n\t}\n\n\tdefer cursor.Close(ctx)\n\tfor cursor.Next(ctx) {\n\t\tvar person Person\n\t\tcursor.Decode(&person)\n\t\tpeople = append(people, person)\n\t}\n\tif err := cursor.Err(); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{ \"message\": \"` + err.Error() + `\" }`))\n\t\treturn\n\t}\n\tjson.NewEncoder(w).Encode(people)\n}", "func (user *User) VisitProfile() (*Profile, error) {\n\tp := &Profile{User: user}\n\n\twg := &sync.WaitGroup{}\n\tinfo := &sync.WaitGroup{}\n\terrChan := make(chan error, 10)\n\n\t// Fetch Profile Info\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tinfo.Add(1)\n\t\tdefer wg.Done()\n\t\tdefer info.Done()\n\t\tif err := user.Info(\"entry_point\", \"profile\", \"from_module\", \"blended_search\"); err != nil {\n\t\t\terrChan <- err\n\t\t}\n\t}(wg)\n\n\t// Fetch Friendship\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tdefer wg.Done()\n\t\tfr, err := user.GetFriendship()\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t}\n\t\tp.Friendship = fr\n\t}(wg)\n\n\t// Fetch Feed\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tdefer wg.Done()\n\t\tfeed := user.Feed()\n\t\tp.Feed = feed\n\t\tif !feed.Next() && feed.Error() != ErrNoMore {\n\t\t\terrChan <- feed.Error()\n\t\t}\n\t}(wg)\n\n\t// Fetch Stories\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tdefer wg.Done()\n\t\tstories, err := user.Stories()\n\t\tp.Stories = stories\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t}\n\t}(wg)\n\n\t// Fetch Highlights\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tdefer wg.Done()\n\t\th, err := user.Highlights()\n\t\tp.Highlights = h\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t}\n\t}(wg)\n\n\t// Fetch Featured Accounts\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tdefer wg.Done()\n\t\t_, err := user.GetFeaturedAccounts()\n\t\tif err != nil {\n\t\t\tuser.insta.warnHandler(err)\n\t\t}\n\t}(wg)\n\n\t// Fetch IGTV\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tdefer wg.Done()\n\t\tinfo.Wait()\n\t\tif user.IGTVCount > 0 {\n\t\t\tigtv, err := user.IGTV()\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t\tp.IGTV = igtv\n\t\t}\n\t}(wg)\n\n\twg.Wait()\n\tselect {\n\tcase err := <-errChan:\n\t\treturn p, err\n\tdefault:\n\t\treturn p, nil\n\t}\n}", "func Read(client *mongo.Client, firstname string) (*[]Person, error) {\r\n\tvar m bson.M\r\n\r\n\tpersons := make([]Person, 0, 10)\r\n\r\n\tpersonCollection := client.Database(\"MyApp\").Collection(\"person\")\r\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\r\n\tfilterCursor, err := personCollection.Find(ctx, bson.M{\"firstname\": firstname})\r\n\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\terr = filterCursor.All(ctx, &persons)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tbsonBytes, _ := bson.Marshal(m)\r\n\tbson.Unmarshal(bsonBytes, &persons)\r\n\r\n\treturn &persons, err\r\n}", "func (m *User) GetPhotos()([]ProfilePhotoable) {\n return m.photos\n}", "func (user *User) GetAnotherProfile(r *http.Request) ([]Post, User, error) {\n\n\t//userQR := DB.QueryRow(\"SELECT * FROM users WHERE id = ?\", user.Temp)\n\tu := User{}\n\tpostsU := []Post{}\n\n\t//err = userQR.Scan(&u.ID, &u.FullName, &u.Email, &u.Password, &u.IsAdmin, &u.Age, &u.Sex, &u.CreatedTime, &u.City, &u.Image)\n\terr = DB.QueryRow(\"SELECT id, full_name, email, isAdmin, age, sex, created_time, city, image FROM users WHERE id = ?\", user.Temp).Scan(&u.ID, &u.FullName, &u.Email, &u.IsAdmin, &u.Age, &u.Sex, &u.CreatedTime, &u.City, &u.Image)\n\tif u.Image[0] == 60 {\n\t\tu.SVG = true\n\t}\n\tu.ImageHTML = base64.StdEncoding.EncodeToString(u.Image)\n\tpsu, err := DB.Query(\"SELECT * FROM posts WHERE creator_id=?\", u.ID)\n\n\tdefer psu.Close()\n\n\tfor psu.Next() {\n\n\t\terr = psu.Scan(&post.ID, &post.Title, &post.Content, &post.CreatorID, &post.CreateTime, &post.UpdateTime, &post.Image, &post.Like, &post.Dislike)\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t}\n\t\t//AuthorForPost\n\t\tpost.Time = post.CreateTime.Format(\"2006 Jan _2 15:04:05\")\n\t\tpostsU = append(postsU, post)\n\t}\n\tif err != nil {\n\t\treturn nil, u, err\n\t}\n\treturn postsU, u, nil\n}", "func getPeople(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tjson.NewEncoder(w).Encode(people)\n}", "func (d *database) getProfile(person string) (profile string, err error) {\n\tquery := fmt.Sprintf(\"SELECT letter_content FROM letters WHERE opened == 1 AND letter_purpose == '%s' AND sender == '%s' ORDER BY time DESC;\", purpose.ActionProfile, person)\n\tlogger.Log.Debug(query)\n\trows, err := d.db.Query(query)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"getProfile\")\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\t// loop through rows\n\tfor rows.Next() {\n\t\terr = rows.Scan(&profile)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"getProfile\")\n\t\t\treturn\n\t\t}\n\t\tbreak\n\t}\n\n\terr = rows.Err()\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"getProfile\")\n\t}\n\treturn\n}", "func DefaultListProfile(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*Profile, error) {\n\tin := Profile{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &ProfileORM{}, &Profile{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []ProfileORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*Profile{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func GetPropiedadesByUser(c *gin.Context) {\n\tvar prop []Models.Propiedad\n\tparams, ok := c.Request.URL.Query()[\"userId\"]\n\n\tif(!ok){\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusBadRequest,\n\t\t\t\"message\": \"Invalid param\",\n\t\t}})\n\t\tc.AbortWithStatus(http.StatusBadRequest)\n\t}\n\terr := Models.GetAllPropiedadesByUser(&prop, string(params[0]))\n\tif (err != nil || !ok ) {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": err.Error(),\n\t\t}})\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tfmt.Println(c.Request.URL.Query())\n\t\t page, _ := strconv.Atoi(c.DefaultQuery(\"page\", \"1\"))\n\t\t limit, _ := strconv.Atoi(c.DefaultQuery(\"limit\", \"15\"))\n\t\n\t\t paginator := pagination.Paging(&pagination.Param{\n\t\t\tDB: Config.DB.Model(&prop).Preload(\"SharedAreas\").Select(\"*\").Joins(\"inner join PropiedadUsuario on PropiedadUsuario.propiedad_id = Propiedades.id\").Where(\"PropiedadUsuario.user_id = ?\", params).Find(&prop),\n\t\t\tPage: page,\n\t\t\tLimit: limit,\n\t\t\tOrderBy: []string{\"id\"},\n\t\t\tShowSQL: true,\n\t\t}, &prop)\n\t\tc.JSON(http.StatusOK, paginator)\n\t}\n}", "func FetchProfiles() map[string]string {\n\ttoken := auth.NewToken()\n\tquery := queryPayload{\"SELECT Id, ProfileName FROM CommunicationProfile\"}\n\tpayload, err := json.Marshal(query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treq, err := http.NewRequest(\"POST\", viper.GetString(\"baseurl\")+\"/v1/action/query\", bytes.NewBuffer(payload))\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \"+token.Val)\n\n\tresponse, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != 200 {\n\t\tbody, err := ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Fatal(string(body))\n\t}\n\n\tdec := json.NewDecoder(response.Body)\n\tvar body profilesQueryResponse\n\tif err = dec.Decode(&body); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif !body.Done {\n\t\tlog.Fatalln(\"there are more communication profile to query\")\n\t}\n\n\tresult := make(map[string]string)\n\tfor _, p := range body.Records {\n\t\tresult[p.Name] = p.ID\n\t}\n\n\treturn result\n}", "func GetPerson(id string) (*Person, error) {\n\tfor _, p := range all() {\n\t\tif p.ID == id {\n\t\t\treturn &p, nil\n\t\t}\n\t}\n\n\treturn nil, errors.New(\"Person not found\")\n}", "func GetPeople(writer http.ResponseWriter, requets *http.Request, _ httprouter.Params) {\n\tjson.NewEncoder(writer).Encode(people)\n}", "func (i *Image) People(width, height int) string {\n\treturn i.Image().ImageURL(width, height, \"people\")\n}", "func GetPersonas(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tjson.NewEncoder(w).Encode(personas)\n}", "func (mpRepo *ManageProfileRepository) Profiles() ([]entity.Profile, []error) {\n\tprfs := []entity.Profile{}\n\terrs := mpRepo.conn.Find(&prfs).GetErrors()\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn prfs, errs\n}", "func GetAllPeople(w http.ResponseWriter, r *http.Request) {\n\tpeople := defaultPeople()\n\tresponseJSON(w, 200, people)\n}", "func UsersByLastName(c *gin.Context) {\n\tlastName := c.Param(\"name\")\n\tusers := user.GetByLastName(lastName)\n\tc.JSON(200, gin.H{\n\t\t\"data\": users,\n\t})\n}", "func (pq *PersonQuery) All(ctx context.Context) ([]*Person, error) {\n\tif err := pq.prepareQuery(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pq.sqlAll(ctx)\n}", "func GetPeople(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t//PopulateInitialData()\n\tjson.NewEncoder(w).Encode(people)\n}", "func (c *Client) GetPersonGroups(\n\tstart string,\n\ttop int,\n) (processResult []cognitive.FaceGetPersonGroupsResult, err error) {\n\treturn cognitive.FaceGetPersonGroups(\n\t\tc.Location,\n\t\tc.ApiKey,\n\t\tstart,\n\t\ttop,\n\t)\n}", "func getFullProfile(client *http.Client, optional ...string) (FacebookPublicProfile, error) {\n\turl := getAPIUrl(\"/me?fields=name,locale,age_range,gender\")\n\tif len(optional) == 1 {\n\t\turl = optional[0]\n\t}\n\tresp, err := client.Get(url)\n\tdefer resp.Body.Close()\n\tdecoder := json.NewDecoder(resp.Body)\n\tvar profile FacebookPublicProfile\n\terr = decoder.Decode(&profile)\n\tif err != nil {\n\t\treturn FacebookPublicProfile{}, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn FacebookPublicProfile{}, errors.New(\"Unathorized. Check token.\")\n\t}\n\treturn profile, nil\n}", "func (m *Group) GetPhotos()([]ProfilePhotoable) {\n return m.photos\n}", "func GetPeople(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tjson.NewEncoder(w).Encode(people)\n}", "func (e *Event) People() []*Person {\n\tpeople := make([]*Person, len(e.people))\n\tcopy(people, e.people)\n\treturn people\n}", "func Show(w http.ResponseWriter, r *http.Request) {\r\n\tdb := dbconn()\r\n\tnper := r.URL.Query().Get(\"firstname\")\r\n\tselDB, err := db.Query(\"SELECT * FROM person WHERE firstname=?\", nper)\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tper := person{}\r\n\tfor selDB.Next() {\r\n\t\tvar age int\r\n\t\tvar firstname, lastname, bloodgroup string\r\n\t\terr = selDB.Scan(&firstname, &lastname, &age, &bloodgroup)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err.Error())\r\n\t\t}\r\n\t\tper.firstname = firstname\r\n\t\tper.lastname = lastname\r\n\t\tper.age = age\r\n\t\tper.bloodgroup = bloodgroup\r\n\t}\r\n\ttmpl.ExecuteTemplate(w, \"Show\", per)\r\n\tdefer db.Close()\r\n}", "func (p *PersonServer) GetPerson(ctx context.Context, query *higrpc.PersonQuery) (*higrpc.PersonResponse, error) {\n\tfmt.Println(\"PersonServer::GetPerson is called. id :\", query.Id, \", name :\", query.Name)\n\n\tif rand.Intn(100) < 5 {\n\t\treturn &higrpc.PersonResponse{}, nil\n\t}\n\n\tname := query.Name\n\tif name == \"\" {\n\t\tname = \"Zaccoding\"\n\t}\n\n\treturn &higrpc.PersonResponse{\n\t\tId: query.Id,\n\t\tName: name,\n\t\tAge: 15,\n\t}, nil\n}", "func All() ([]Person, error) {\n\n\tpersons := []Person{}\n\tfor key, element := range PersonMap {\n\t\tpersons = append(persons, element)\n\t\tfmt.Println(\"Key:\", key, \"=>\", \"Element:\", element)\n\t}\n\n\treturn persons, nil\n}", "func (r *QueryResolver) Person(args struct{ ID graphql.ID }) *PersonResolver {\n\tif p := peopleData[args.ID]; p != nil {\n\t\treturn &PersonResolver{p}\n\t}\n\treturn nil\n}", "func (a *App) GetAllProfiles(w http.ResponseWriter, r *http.Request) {\n\thandler.GetAllProfiles(a.DB, w, r)\n}", "func Index(w http.ResponseWriter, r *http.Request) {\r\n\tdb := dbconn()\r\n\tselDB, err := db.Query(\"SELECT * FROM person ORDER BY firstname DESC\")\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tper := person{}\r\n\tres := []person{}\r\n\tfor selDB.Next() {\r\n\t\tvar firstname, lastname, bloodgroup string\r\n\t\tvar age int\r\n\t\terr = selDB.Scan(&firstname, &lastname, &age, &bloodgroup)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err.Error())\r\n\t\t}\r\n\t\tper.firstname = firstname\r\n\t\tper.lastname = lastname\r\n\t\tper.age = age\r\n\t\tper.bloodgroup = bloodgroup\r\n\t\tres = append(res, per)\r\n\t}\r\n\ttmpl.ExecuteTemplate(w, \"Index\", res)\r\n\tdefer db.Close()\r\n}", "func (r *RestAPI) ListUsers(desiredAttrs []string) ([]internal.Person, error) {\n\terrLog := make(chan string, 1000)\n\tpeople := make(chan internal.Person, 20000)\n\tvar wg sync.WaitGroup\n\tr.logHttpTimeout()\n\n\tattributesToRead := internal.AddStringToSlice(r.IDAttribute, desiredAttrs)\n\tfor _, f := range r.Filters {\n\t\tattributesToRead = internal.AddStringToSlice(f.Attribute, attributesToRead)\n\t}\n\tfor _, p := range r.setConfig.Paths {\n\t\twg.Add(1)\n\t\tgo r.listUsersForPath(attributesToRead, p, &wg, people, errLog)\n\t}\n\n\twg.Wait()\n\tclose(people)\n\tclose(errLog)\n\n\tif len(errLog) > 0 {\n\t\tvar errs []string\n\t\tfor msg := range errLog {\n\t\t\terrs = append(errs, msg)\n\t\t}\n\t\treturn []internal.Person{}, fmt.Errorf(\"errors listing users from %s: %s\", r.BaseURL, strings.Join(errs, \",\"))\n\t}\n\n\treturn r.filterPeople(people)\n}", "func (p *PersonServer) GetPersonChat(stream higrpc.PersonRoute_GetPersonChatServer) error {\n\tfmt.Println(\"PersonServer::GetPersonChat is called.\")\n\n\tfor {\n\t\tin, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\">> Id :\", in.Id, \", Name:\", in.Name)\n\t\t_ = stream.Send(&higrpc.PersonResponse{\n\t\t\tId: in.Id,\n\t\t\tName: in.Name,\n\t\t\tAge: int32(in.Id),\n\t\t})\n\t}\n\tfmt.Println(\">>> Complete\")\n\treturn nil\n}", "func PersonsIndex(c *gin.Context) {\r\n\tvar lis []models.Person\r\n\r\n\tdb, _ := c.Get(\"db\")\r\n\tconn := db.(gorm.DB)\r\n\r\n\t// Migrate the schema\r\n\tconn.AutoMigrate(&models.Person{})\r\n\r\n\tconn.Find(&lis)\r\n\tc.JSON(http.StatusOK, gin.H{\r\n\t\t\"msg\": \"thank you\",\r\n\t\t\"r\": lis,\r\n\t})\r\n\r\n}", "func (s *SmartContract) QueryAllProfiles(ctx contractapi.TransactionContextInterface) ([]QueryProfileResult, error) {\n\tstartKey := \"\"\n\tendKey := \"\"\n\n\tresultsIterator, err := ctx.GetStub().GetStateByRange(startKey, endKey)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tresults := []QueryProfileResult{}\n\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err := resultsIterator.Next()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif strings.Contains(queryResponse.Key, \"profile-\"){\n\t\t\tprofile := new(UserProfile)\n\t\t\t_ = json.Unmarshal(queryResponse.Value, profile)\n\n\t\t\tqueryResult := QueryProfileResult{Key: queryResponse.Key, Record: profile}\n\t\t\tresults = append(results, queryResult)\n\t\t}\n\t\t\n\t}\n\n\treturn results, nil\n}", "func (a API) Profile(client *http.Client, user_id string, fields Fields) (j map[string]interface{}, err error) {\r\n\treturn a.request(client, \"profile\", map[string]string{\r\n\t\t\"id\": getUserIdString(user_id),\r\n\t\t\"fields\": fields.Encode(),\r\n\t}, nil)\r\n}", "func FindPeople(c *gin.Context) {\n query := c.Request.URL.Query()\n\n var people []models.Person\n\n if len(query) == 0 {\n models.DB.Find(&people)\n } else if query.Get(\"id\") != \"\" {\n if err := models.DB.Find(&people, \"id = ?\", query.Get(\"id\")).Error; err != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": \"Record not found!\"})\n return\n }\n } else if query.Get(\"created_by\") != \"\" {\n var searchPerson SearchPersonInput\n\n if bindErr := c.BindQuery(&searchPerson); bindErr != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": bindErr.Error()})\n return\n }\n\n if err := models.DB.Where(&models.Person{FirstName: searchPerson.FirstName, LastName: searchPerson.LastName, Email: searchPerson.Email, Phone: searchPerson.Phone, Birthday: searchPerson.Birthday, Title: searchPerson.Title, Department: searchPerson.Department, Self: searchPerson.Self}).Find(&people, \"created_by = ?\", query.Get(\"created_by\")).Error; err != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": \"Record not found!\"})\n return\n }\n } else {\n var searchPerson SearchPersonInput\n\n if bindErr := c.BindQuery(&searchPerson); bindErr != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": bindErr.Error()})\n return\n }\n\n if err := models.DB.Where(&models.Person{FirstName: searchPerson.FirstName, LastName: searchPerson.LastName, Email: searchPerson.Email, Phone: searchPerson.Phone, Birthday: searchPerson.Birthday, Title: searchPerson.Title, Self: searchPerson.Self}).Find(&people).Error; err != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": \"Record not found!\"})\n return\n }\n }\n\n c.JSON(http.StatusOK, gin.H{\"data\": people})\n}", "func getPeople(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(people)\n}", "func (a *API) Profiles(ctx context.Context, pageNum, pageSize int) (*ProfileResp, error) {\n\n\tall := pageNum < 1\n\tif all {\n\t\tpageSize = 100\n\t\tpageNum = 1\n\t}\n\n\tvar resp ProfileResp\n\tif err := a.Get(ctx, fmt.Sprintf(\"/api/v1/profile?pageNumber=%d&pageSize=%d\", pageNum, getPageSize(pageSize)), &resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif all {\n\t\tfor i := 1; i < resp.TotalPageCount; i++ {\n\t\t\tpg, err := a.Profiles(ctx, i+1, pageSize)\n\t\t\tif err != nil {\n\t\t\t\treturn &resp, err\n\t\t\t}\n\t\t\tresp.Profiles = append(resp.Profiles, pg.Profiles...)\n\t\t}\n\t\tresp.TotalPageCount = 1\n\t\tresp.TotalCount = len(resp.Profiles)\n\t\tresp.Count = resp.TotalCount\n\t\tresp.NextPageURL = \"\"\n\t\tresp.LastPageURL = \"\"\n\t}\n\n\treturn &resp, nil\n}", "func All() ([]Profession, error) {\n\tvar professions Data\n\tvar result []Profession\n\n\tjsonFile, err := os.Open(config.Cfg.WorldDataDirectory + \"/data/professions.json\")\n\tif err != nil {\n\t\terr = fmt.Errorf(\"could not open data file: %w\", err)\n\t\treturn []Profession{}, err\n\t}\n\n\tdefer jsonFile.Close()\n\n\tbyteValue, _ := ioutil.ReadAll(jsonFile)\n\n\tjson.Unmarshal(byteValue, &professions)\n\n\tall := professions.Professions\n\n\tif len(all) == 0 {\n\t\terr = fmt.Errorf(\"no professions returned from database\")\n\t\treturn []Profession{}, err\n\t}\n\n\tnone := Profession{\n\t\tName: \"none\",\n\t\tDescription: \"no profession\",\n\t}\n\tall = append(all, none)\n\n\tfor _, p := range all {\n\t\tp.Tags = append(p.Tags, p.Name)\n\t\tresult = append(result, p)\n\t}\n\n\treturn result, nil\n}", "func LoadAll(ctx context.Context, cfg Config) ([]*Person, error) {\n\tdb, err := getDB(cfg)\n\tif err != nil {\n\t\tcfg.Logger().Error(\"failed to get DB connection. err: %s\", err)\n\t\treturn nil, err\n\t}\n\n\t// set latency budget for the database call\n\tsubCtx, cancel := context.WithTimeout(ctx, 1*time.Second)\n\tdefer cancel()\n\n\t// perform DB select\n\trows, err := db.QueryContext(subCtx, sqlLoadAll)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\t_ = rows.Close()\n\t}()\n\n\tvar out []*Person\n\n\tfor rows.Next() {\n\t\t// retrieve columns and populate the person object\n\t\trecord, err := populatePerson(rows.Scan)\n\t\tif err != nil {\n\t\t\tcfg.Logger().Error(\"failed to convert query result. err: %s\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, record)\n\t}\n\n\tif len(out) == 0 {\n\t\tcfg.Logger().Warn(\"no people found in the database.\")\n\t\treturn nil, ErrNotFound\n\t}\n\n\treturn out, nil\n}", "func GetAllUsers(person *[]Person) (err error) {\n\tif err = Config.DB.Find(person).Error; err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p *Profiler) All() []Profile {\n\tif p == nil {\n\t\t// If the profiler instance doesn't exist, then don't attempt to operate on it.\n\t\treturn []Profile{}\n\t}\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\treturn p.profiles\n}", "func getUserProfile(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tvar body user\n\te := json.NewDecoder(r.Body).Decode(&body)\n\tif e != nil {\n\n\t\tfmt.Print(e)\n\t}\n\tvar result primitive.M // an unordered representation of a BSON document which is a Map\n\terr := userCollection.FindOne(context.TODO(), bson.D{{\"name\", body.Name}}).Decode(&result)\n\tif err != nil {\n\n\t\tfmt.Println(err)\n\n\t}\n\tjson.NewEncoder(w).Encode(result) // returns a Map containing document\n\n}", "func (dc *dta_controller) DescribePerson(aCtx auth.Context, pId string, includeDetails, includeMeta bool) (*PersonDesc, error) {\n\tpp, err := dc.Persister.GetPartitionTx(\"FAKE\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//transaction\n\terr = pp.Begin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer pp.Commit()\n\n\tperson, err := pp.GetPersonById(pId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = aCtx.AuthZCamAccess(person.CamId, auth.AUTHZ_LEVEL_OU)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !includeDetails {\n\t\treturn &PersonDesc{Person: person}, nil\n\t}\n\n\t// collecting faces\n\tfaces, err := pp.FindFaces(&model.FacesQuery{PersonIds: []string{pId}, Short: true})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprfArr := []int64{}\n\tif person.MatchGroup > 0 {\n\t\tprof2MGs, err := pp.GetProfilesByMGs([]int64{person.MatchGroup})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor pid, _ := range prof2MGs {\n\t\t\tprfArr = append(prfArr, pid)\n\t\t}\n\t}\n\tif person.ProfileId > 0 {\n\t\tprfArr = append(prfArr, person.ProfileId)\n\t}\n\n\tprofs, err := pp.GetProfiles(&model.ProfileQuery{ProfileIds: prfArr, AllMeta: includeMeta})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprofiles := make(map[int64]*model.Profile)\n\tfor _, p := range profs {\n\t\tprofiles[p.Id] = p\n\t}\n\tres := new(PersonDesc)\n\tres.Faces = faces\n\tres.Person = person\n\tres.Profiles = profiles\n\treturn res, nil\n}", "func (s *Storer) ListByProfile(ctx context.Context, profileID string) ([]accounts.Account, error) {\n\tquery := listByProfileSQL(ctx, profileID)\n\tqueryStr, err := query.PostgreSQLString()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trows, err := s.db.Query(queryStr, query.Args()...) //nolint:sqlclosecheck // the closeRows helper isn't picked up\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer closeRows(ctx, rows)\n\tvar accts []accounts.Account\n\tfor rows.Next() {\n\t\tvar account Account\n\t\terr = pan.Unmarshal(rows, &account)\n\t\tif err != nil {\n\t\t\treturn accts, err\n\t\t}\n\t\taccts = append(accts, fromPostgres(account))\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\taccounts.ByLastUsedDesc(accts)\n\treturn accts, nil\n}", "func GetPeople(w http.ResponseWriter, r *http.Request) error {\n\treturn writeJSON(w, http.StatusOK, people)\n}", "func (a *API) ProfileSearch(ctx context.Context, searchID string, pageNum int) (*ProfileResp, error) {\n\n\tall := pageNum < 1\n\tif all || pageNum < 1 {\n\t\tpageNum = 1\n\t}\n\n\tvar resp ProfileResp\n\turlStr := fmt.Sprintf(\"/api/v1/profile?searchId=%s&pageSize=100&pageNumber=%d\", searchID, pageNum)\n\tif err := a.Get(ctx, urlStr, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif all && resp.TotalPageCount > 1 {\n\t\tfor i := 1; i < resp.TotalPageCount; i++ {\n\t\t\tpg, err := a.ProfileSearch(ctx, searchID, i+1)\n\t\t\tif err != nil {\n\t\t\t\treturn &resp, err\n\t\t\t}\n\t\t\tresp.Profiles = append(resp.Profiles, pg.Profiles...)\n\t\t}\n\t}\n\n\treturn &resp, nil\n}", "func (s *grpcServer) GetAllKeyPersons(ctx context.Context, req *pb.GetAllKeyPersonsRequest) (*pb.GetAllKeyPersonsResponse, error) {\n\t_, rep, err := s.getAllKeyPersons.ServeGRPC(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rep.(*pb.GetAllKeyPersonsResponse), nil\n}", "func GetPeopleById(w http.ResponseWriter, r *http.Request) {\n\tmyDb, err := db.StartDB(\"mydb.db\")\n\tif err != nil {\n\t\tfmt.Printf(\"Fail in open database: %v\\n\", err)\n\t\treturn\n\t}\n\n\t// Verify token\n\ttoken := r.Header.Get(\"AuthToken\")\n\tif (!myDb.IsLogIn([]byte(token))) {\n\t\tfmt.Printf(\"Unauthorized: %v\\n\", err)\n\t\t// 401: Unauthorized\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// Get people by ID\n\tvars := mux.Vars(r)\n\tpeopleId, err := strconv.Atoi(vars[\"peopleId\"])\n\n\tdata := myDb.SearchByID(\"people\", []byte(strconv.Itoa(peopleId)))\n\tif data != nil {\n\t\tfmt.Printf(\"Read body error: %v\\n\", err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\n\t// Write information to response\n\tw.Write(data)\n}", "func (s *PersonService) FindAll() []models.Person {\n\tvar persons []models.Person\n\tpersons = s.PersonRepository.FindAll()\n\n\treturn persons\n}", "func (p *ProfilesClient) ListByName(ctx context.Context, name string) (*apitypes.Profile, error) {\n\tresult := &apitypes.Profile{}\n\terr := p.client.RoundTrip(ctx, \"GET\", \"/profiles/\"+name, nil, nil, &result)\n\treturn result, err\n}", "func RetrieveProfileData(token string, params string) map[string]interface{} {\n\tendpoint := \"https://api.linkedin.com/v1/people/~\"\n\n\tif params == \"\" {\n\t\tparams = profileDataFullParams\n\t}\n\n\tlinkedInClient := LinkedInClient{\n\t\tToken: token,\n\t\tEndpoint: endpoint,\n\t\tParams: params,\n\t\tMethod: GET,\n\t}\n\n\treturn linkedInClient.Retrieve()\n}", "func TestRead(t *testing.T) {\r\n\tpersonsFiltered, err := Read(client, firstname)\r\n\tif err != nil {\r\n\t\tt.Fatalf(\"Failed read test :%s\", err)\r\n\t}\r\n\r\n\tfor _, value := range *personsFiltered {\r\n\t\tif value.ID == _id {\r\n\t\t\tt.Log(\"person exists :\", value.ID)\r\n\t\t\tbreak\r\n\t\t} else {\r\n\t\t\tt.Fatalf(\"Failed read test. \")\r\n\t\t}\r\n\t}\r\n}", "func FindPeopleByName(firstName, lastName string) []*Person {\n\tresult := make([]*Person, 0)\n\n\tfor _, person := range people {\n\t\tif person.FirstName == firstName && person.LastName == lastName {\n\t\t\tresult = append(result, person)\n\t\t}\n\t}\n\n\treturn result\n}", "func (v *Validator) GetPeopleVA(id int, page int, limit int) ([]model.VoiceActor, map[string]interface{}, int, error) {\n\tif id <= 0 {\n\t\treturn nil, nil, http.StatusBadRequest, errors.ErrInvalidID\n\t}\n\tif page <= 0 {\n\t\treturn nil, nil, http.StatusBadRequest, errors.ErrInvalidPage\n\t}\n\tif limit <= 0 {\n\t\treturn nil, nil, http.StatusBadRequest, errors.ErrInvalidLimit\n\t}\n\n\t// Get data.\n\tdata, meta, code, err := v.api.GetPeopleVA(id, page, limit)\n\tif err != nil {\n\t\treturn nil, nil, code, err\n\t}\n\n\t// Handle pagination.\n\tstart, current := limit*(page-1), len(data)-(page-1)*limit\n\tif current <= 0 {\n\t\tdata = []model.VoiceActor{}\n\t} else {\n\t\tif current < limit {\n\t\t\tlimit = current\n\t\t}\n\t\tdata = data[start : start+limit]\n\t}\n\n\treturn data, meta, http.StatusOK, nil\n}", "func GetProfile(c *fiber.Ctx) error {\n\tID := c.Query(\"id\")\n\n\tif len(ID) < 1 {\n\t\tc.Send(ID)\n\t\tc.Send(\"Parameter ID is required\")\n\t\tc.SendStatus(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tprofile, err := database.SearchProfile(ID)\n\tif err != nil {\n\t\tc.Send(\"Error Occurred\" + err.Error())\n\t\tc.SendStatus(http.StatusBadRequest)\n\t\treturn\n\t}\n\tif err := c.JSON(profile); err != nil {\n\t\tc.Status(500).Send(err)\n\t\treturn\n\t}\n\tc.Accepts(\"application/json\")\n\tc.SendStatus(http.StatusAccepted)\n}", "func (s *Service) Users(ctx context.Context, search string, first int, after string) ([]UserProfile, error) {\n\tsearch = strings.TrimSpace(search)\n\tafter = strings.TrimSpace(after)\n\tfirst = normalizePage(first)\n\tuid, auth := ctx.Value(KeyAuthUserID).(int64)\n\tquery, args, err := buildQuery(`\n\t\tSELECT id, email, username, avatar, followers_count, followees_count\n\t\t{{if .auth}}\n\t\t\t, followers.follower_id IS NOT NULL as following\n\t\t\t, followees.followee_id IS NOT NULL as followeed\n\t\t{{end}}\n\t\tFROM users\n\t\t{{if .auth}}\n\t\t\tLEFT JOIN follows AS followers ON followers.follower_id = @uid AND followers.followee_id = users.id\n\t\t\tLEFT JOIN follows AS followees ON followees.follower_id = users.id AND followees.followee_id = @uid\n\t\t{{end}}\n\t\t{{if or .search .after}}WHERE{{end}}\n\t\t{{if .search}}username ILIKE '%' || @search || '%'{{end}}\n\t\t{{if and .search .after}}AND{{end}}\n\t\t{{if .after}}username > @after{{end}}\n\t\tORDER BY username ASC\n\t\tLIMIT @first`, map[string]interface{}{\n\t\t\"auth\": auth,\n\t\t\"uid\": uid,\n\t\t\"search\": search,\n\t\t\"first\": first,\n\t\t\"after\": after,\n\t})\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not build users sql query: %v\", err)\n\t}\n\n\trows, err := s.db.QueryContext(ctx, query, args...)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not query select users: %v\", err)\n\t}\n\n\tdefer rows.Close()\n\tuu := make([]UserProfile, 0, first)\n\n\tfor rows.Next() {\n\t\tvar u UserProfile\n\t\tvar avatar sql.NullString\n\t\tdest := []interface{}{&u.ID, &u.Email, &u.Username, &avatar, &u.FollowersCount, &u.FolloweesCount}\n\n\t\tif auth {\n\t\t\tdest = append(dest, &u.Following, &u.Followeed)\n\t\t}\n\n\t\tif err = rows.Scan(dest...); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not scan user: %v\", err)\n\t\t}\n\n\t\tu.Me = auth && uid == u.ID\n\t\tif !u.Me {\n\t\t\tu.ID = 0\n\t\t\tu.Email = \"\"\n\t\t}\n\n\t\tif avatar.Valid {\n\t\t\tavatarURL := s.origin + \"/img/avatars/\" + avatar.String\n\t\t\tu.AvatarURL = &avatarURL\n\t\t}\n\n\t\tuu = append(uu, u)\n\t}\n\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not iterate user rows: %v\", err)\n\t}\n\n\treturn uu, nil\n}", "func RetrieveFriends(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Set(\"Content-Type\", \"text/javascript\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tif r.Method != \"POST\" {\n\t\tfmt.Fprintln(w, \"bad request\")\n\t\treturn\n\t}\n\n\ttype Profile struct {\n\t\tID string\n\t\tName string\n\t\tAvatar string\n\t}\n\tr.ParseForm()\n\tID := r.Form[\"id\"][0]\n\tVC := r.Form[\"vc\"][0]\n\tvar user, friend structs.User\n\n\tvar friendsProfile []Profile\n\n\tcollection := session.DB(\"bkbfbtpiza46rc3\").C(\"users\")\n\n\tfindErr := collection.FindId(bson.ObjectIdHex(ID)).One(&user)\n\n\tif findErr == mgo.ErrNotFound || VC != user.Vc {\n\t\tfmt.Fprintln(w, \"-1\")\n\t\treturn\n\t}\n\n\tif findErr != nil {\n\t\tfmt.Fprintln(w, \"0\")\n\t\treturn\n\t}\n\n\tfor _, f := range user.FriendList {\n\n\t\tErr := collection.FindId(f).One(&friend)\n\n\t\tif Err == nil {\n\t\t\tfriendsProfile = append(friendsProfile, Profile{ID: friend.ID.Hex(), Name: friend.Name, Avatar: friend.Avatar})\n\t\t}\n\n\t}\n\n\tb, _ := json.Marshal(friendsProfile)\n\tresp := string(b)\n\n\tfmt.Fprintln(w, resp)\n\treturn\n\n}" ]
[ "0.66491324", "0.6552405", "0.6547704", "0.64154756", "0.63581175", "0.63417", "0.62925184", "0.6266934", "0.6181966", "0.60785556", "0.60619473", "0.60523766", "0.60416675", "0.59929335", "0.5971067", "0.5969846", "0.59448874", "0.5923718", "0.58933425", "0.58771753", "0.5872169", "0.58187795", "0.5817106", "0.5813549", "0.5801362", "0.5795458", "0.5781938", "0.57305133", "0.5721658", "0.56792146", "0.56694037", "0.5658424", "0.56439555", "0.5643401", "0.5620017", "0.5598256", "0.55912495", "0.55740553", "0.5543971", "0.5542546", "0.5526395", "0.5525065", "0.5495859", "0.54703087", "0.5459671", "0.5451645", "0.54425246", "0.54256845", "0.54084826", "0.53815836", "0.5381573", "0.53739136", "0.5360857", "0.5344617", "0.53425324", "0.53339034", "0.5323858", "0.53211665", "0.53119785", "0.53104556", "0.5306938", "0.52920926", "0.528283", "0.52770555", "0.5264736", "0.5259318", "0.52411526", "0.52301437", "0.5229449", "0.522527", "0.52172726", "0.521552", "0.5212793", "0.5211334", "0.5198928", "0.5198335", "0.51919925", "0.5165909", "0.5152852", "0.51513636", "0.51509786", "0.5136851", "0.513548", "0.5116406", "0.51136565", "0.5110929", "0.51014066", "0.51007247", "0.5100682", "0.5100665", "0.50944096", "0.5088989", "0.50861645", "0.5085456", "0.5079727", "0.507775", "0.5077419", "0.5071346", "0.50673646", "0.5061381" ]
0.67043453
0
Main is an action that calculates an expressions
func Main(args map[string]interface{}) map[string]interface{} { expr, ok := args["expr"].(string) if !ok { return mkMap("error", "no parameter expr") } op, a, b, err := parser.Parse(expr) if err != nil { return mkMap("error", err.Error()) } switch op { case "+": return mkMap("result", ops.Add(a, b)) case "*": return mkMap("result", ops.Mul(a, b)) default: return mkMap("error", "Unsupported Operation") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func main() {\n\tfmt.Println(\"ex1 :\", calculate(\"3+2*2\"))\n\tfmt.Println(\"ex2 :\", calculate(\"3/2\"))\n\tfmt.Println(\"ex3 :\", calculate(\"3+5/2\"))\n}", "func EvalMain(filePtr *string, src interface{}, parserOpts parser.Mode, TRACE bool, DEBUG bool, PRINT bool) SEXPItf{\n\tvar returnExpression SEXPItf\n\n\tfset := token.NewFileSet() // positions are relative to fset\n\n\tp, errp := parser.ParseInit(fset, *filePtr, src, parserOpts)\n\tif errp != nil {\n\t\tpanic(errp)\n\t}\n\tev, erre := EvalInit(fset, *filePtr, src, parser.AllErrors, TRACE, DEBUG)\n\tif erre != nil {\n\t\tpanic(erre)\n\t}\n\n\tfor true {\n\t\tstmt, tok := parser.ParseIter(p) \t// main iterator calls parse.stmt\n\t\tif tok == token.EOF {\n\t\t\tif DEBUG {\n\t\t\t\tprintln(\"EOF token found\")\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif stmt==nil {\n\t\t\tpanic(\"EvalMain: stmt==nil\")\n\t\t}\n\t\tsexp := EvalStmt(ev, stmt)\n\t\tif sexp != nil {\n\t\t\tif ev.Invisible { \t\t\t\t// invisibility is stored in the evaluator and is set during assignment\n\t\t\t\tev.Invisible = false\t\t// unsetting invisiblity again\n\t\t\t} else if PRINT{\n\t\t\t\tPrintResult(sexp)\n\t\t\t}\n\t\t\treturnExpression = sexp\n\t\t\tif ev.state == eofState {\n\t\t\t\tif DEBUG {\n\t\t\t\t\tprintln(\"terminating...\")\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn returnExpression\n}", "func evaluate(expression []string, actions ActionTable, stack *Stack) interface{} {\n\tfor _, t := range expression {\n\t\tvar action ActionFunc\n\t\tif _, err := strconv.ParseFloat(t, 64); err == nil {\n\t\t\taction = actions[\"NUMBER\"]\n\t\t} else {\n\t\t\tvar ok bool\n\t\t\tif action, ok = actions[t]; !ok {\n\t\t\t\taction = actions[\"__DEFAULT__\"]\n\t\t\t}\n\t\t}\n\t\taction(t, stack)\n\t}\n\treturn stack.Pop()\n}", "func main() {\n\t// Paren pass\n\t// split by mult\n\t// eval parts\n\t// multiply the rest\n\n\tbytes, _ := ioutil.ReadFile(\"2020/18/input.txt\")\n\tsum := 0\n\tfor _, line := range strings.Split(string(bytes), \"\\n\") {\n\t\tequation := strings.ReplaceAll(line, \" \", \"\")\n\t\tn, _ := strconv.Atoi(eval(equation))\n\t\tsum += n\n\t}\n\tfmt.Println(sum)\n}", "func main() {\r\n\r\n\t// initialize logger\r\n\toptimizer.InitLoggers()\r\n\r\n\t// get command line arguments\r\n\targs := os.Args[1:]\r\n\r\n\t// check for file name argument\r\n\tif len(args) >= 1 {\r\n\r\n\t\t// read in file\r\n\t\tdat, err := ioutil.ReadFile(args[0])\r\n\t\tif err != nil {\r\n\t\t\tfmt.Printf(\"%s\\n\", err.Error())\r\n\t\t\tos.Exit(1)\r\n\t\t}\r\n\r\n\t\tfmt.Println(\"========== TOKENIZER OUTPUT ==========\")\r\n\r\n\t\t// convert file contents into linked list of tokens\r\n\t\ttokens := optimizer.Tokenize(string(dat))\r\n\r\n\t\t// define expression function arities\r\n\t\tfuncDefs := make(map[string]int)\r\n\t\tfuncDefs[\"sin\"] = 1\r\n\r\n\t\t\r\n\t\t/*token := tokens.Front()\r\n\t\tfor token != nil {\r\n\t\t\tprint(token.Value.(optimizer.Token).ID, \"\\t\", token.Value.(optimizer.Token).Position, \"\\t\\\"\", token.Value.(optimizer.Token).Content, \"\\\"\\n\")\r\n\t\t\ttoken = token.Next()\r\n\t\t}*/\r\n\r\n\t\tfmt.Println(\"========== PARSER OUTPUT ==========\")\r\n\t\ttree, err := optimizer.Parse(tokens, funcDefs)\r\n\t\tif err != nil {\r\n\t\t\tfmt.Printf(\"parse error: %s\\n\", err.Error())\r\n\t\t\tos.Exit(1)\r\n\t\t}\r\n\r\n\t\t/*for i, v := range tree.Children() {\r\n\t\t\tunit, ok := v.(optimizer.ParseTreeUnit)\r\n\t\t\tif ok {\r\n\t\t\t\tfmt.Printf(\"%d: %s\\n\", i, unit.Name)\r\n\t\t\t}\r\n\r\n\t\t}*/\r\n\r\n\t\t//log.SetOutput(ioutil.Discard)\r\n\r\n\t\tfmt.Println(\"========== SOLVER OUTPUT ==========\")\r\n\r\n\t\tprops := optimizer.GetUnboundProperties(tree)\r\n\t\tfmt.Printf(\"Unbound properties: %s\\n\", props)\r\n\r\n\t\tenv := optimizer.CreateEnvironment(tree)\r\n\t\terr = optimizer.Solve(tree, env)\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatal(err)\r\n\t\t}\r\n\t\t/*for k, v := range env.GetMap() {\r\n\t\t\tfmt.Printf(\"property \\\"%s\\\" = \\\"%.20f\\\"\\n\", k, v)\r\n\t\t}*/\r\n\r\n\t\tfmt.Println(\"========== DONE ==========\")\r\n\t} else {\r\n\t\tfmt.Printf(\"not enough arguments! usage: ./optimizer file_name\\n\")\r\n\t}\r\n\r\n}", "func main() {\n\tapp := app.New()\n\tapp.SetIcon(resourceIconPng)\n\n\tc := newCalculator()\n\tc.loadUI(app)\n\tapp.Run()\n}", "func computing(input string) int {\n\t// Setup the input\n\tis := antlr.NewInputStream(input)\n\n\t// Create the Lexer\n\tlexer := parser.NewTQLLexer(is)\n\tstream := antlr.NewCommonTokenStream(lexer, antlr.TokenDefaultChannel)\n\n\t// Create the Parser\n\tp := parser.NewTQLParser(stream)\n\n\t// Finally parse the expression (by walking the tree)\n\tvar listener Listener\n\tantlr.ParseTreeWalkerDefault.Walk(&listener, p.Computing())\n\t// log.Info(\"========results: \\n\", listener.pop())\n\treturn listener.pop()\n}", "func evaluateExpression(c *Context, exp interface{}) interface{} {\r\n var val interface{}\r\n\r\n // fmt.Printf(\"Evaluating type %T, \\n\", exp)\r\n switch t := exp.(type) {\r\n case int:\r\n // fmt.Printf(\"Returning int %d\\n\", t)\r\n val = t\r\n case *Integer:\r\n val = t.Number\r\n case *StringPrimitive:\r\n val = t.str\r\n case string:\r\n val = t\r\n case []interface{}:\r\n val = t\r\n case *InfixExpression:\r\n // fmt.Printf(\"Evaluating infix expresison %T l: %T, r:%T\\n\", t,t.leftNode.Exp, t.rightNode.Exp)\r\n //Get the value of the left node and right\r\n lVal := evaluateExpression(c, t.leftNode.Exp)\r\n rVal := evaluateExpression(c, t.rightNode.Exp)\r\n\r\n\r\n //then apply the correct infix operator to the values\r\n val = evaluateInfixExpression(c, t.opType, lVal, rVal)\r\n\r\n case *Identifier:\r\n // fmt.Printf(\"Was identifier returning %v\\n\", t.id)\r\n if(t.id == \"nil\") {\r\n val = NewNil(0)\r\n } else {\r\n // fmt.Printf(\"Posssible indeitEifer %T\\n\", c.values[t.id])\r\n val = evaluateExpression(c, c.values[t.id])\r\n }\r\n case *CallExpression:\r\n // fmt.Printf(\"Evaluation call to %s\\n\",t.callee)\r\n\r\n //get declaration of call\r\n callDec := c.lookup(t.callee).(*FuncDeclaration)\r\n if(callDec.returnType == \"\") { //no rreturn type = unit\r\n val = &UnitType{}\r\n } else { //Evaluate the expression of the body for a value\r\n //This should produce a value and will execute all\r\n //of the code of the body as well\r\n for i, _ := range callDec.paramNodes {\r\n paramDec := callDec.paramNodes[i].Exp.(*Param)\r\n paramValue := t.paramNodes[i].Exp\r\n c.values[paramDec.id] = evaluateExpression(c, paramValue)\r\n val = c.values[paramDec.id]\r\n }\r\n\r\n }\r\n\r\n if(t.callee == \"printi\") {\r\n invokePrintI(c, t)\r\n } else if(t.callee == \"print\") {\r\n invokePrint(c, t)\r\n } else if(t.callee == \"not\") {\r\n invokeNot(c, t)\r\n } else { //Regular other user defined function do your thing!\r\n //invoke the body\r\n //Get the declaration of the calling function so we can execute it\r\n callDec := c.lookup(t.callee).(*FuncDeclaration)\r\n // fmt.Printf(\"Invoking random func \\n\")\r\n evaluateExpression(c, callDec.body.Exp)\r\n }\r\n case *IfThenElseExpression:\r\n condition := evaluateExpression(c, t.condNode.Exp).(bool)\r\n // fmt.Printf(\"Cond was %v \\n\", condition)\r\n //If else is nil then its an IfThen Exp\r\n if(t.elseNode == nil) {\r\n val = &UnitType{}\r\n if(condition) { //if the condition is true evaluatie the code inside\r\n evaluateExpression(c, t.thenNode.Exp)\r\n }\r\n } else { //otherwise its and ifThenElse\r\n if(condition) {\r\n val = evaluateExpression(c, t.thenNode.Exp)\r\n } else {\r\n val = evaluateExpression(c, t.elseNode.Exp)\r\n }\r\n }\r\n case *SeqExpression:\r\n // Value is equivalent to the last node of the seqence expression\r\n if(len(t.nodes) == 0) {\r\n val = &UnitType{}\r\n } else {\r\n // fmt.Printf(\"Seq type was %T\\n\", t.nodes[len(t.nodes)-1].Exp)\r\n val = evaluateExpression(c, t.nodes[len(t.nodes)-1].Exp)\r\n }\r\n case *Nil:\r\n val = NewNil(0)\r\n case *ArrayExp:\r\n arrType := getType(c, c.lookup(t.typeId)).(*Identifier)\r\n val = c.lookup(arrType.id)\r\n case *ForExpression:\r\n val = &UnitType{}\r\n case *LetExpression:\r\n if(len(t.exps) == 0) {\r\n val = &UnitType{}\r\n } else {\r\n // fmt.Printf(\"%T is last exp type\\n\", t.exps[len(t.exps)-1].Exp)\r\n // val = getType(c, t.exps[len(t.exps)-1].Exp)\r\n }\r\n case *Assignment:\r\n val = &UnitType{}\r\n case *RecordExp:\r\n var slc []interface{}\r\n for _, fcNode := range t.fieldCreateNodes {\r\n if b, isABinding := fcNode.Exp.(*Binding); isABinding {\r\n slc = append(slc, evaluateExpression(c, b.exp.Exp))\r\n }\r\n }\r\n val = slc\r\n default:\r\n fmt.Fprintf(os.Stderr, \"Could not evaluate exp %T\\n\", t)\r\n os.Exit(4)\r\n }\r\n\r\n return val\r\n}", "func Calculate(equation string) string {\n\tequation = strings.Join(strings.Fields(equation), \"\")\n\n\toperator := regexp.MustCompile(`[+*/-]`).FindAllString(equation, -1)[0]\n\ts := regexp.MustCompile(`[+*/-]`).Split(equation, -1)\n\tif len(s) < 2 {\n\n\t}\n\tfirst, _ := strconv.Atoi(s[0])\n\tsecond, _ := strconv.Atoi(s[1])\n\tresult := 0\n\tswitch operator {\n\tcase \"+\":\n\t\tresult = first + second\n\t\tfmt.Printf(\"Result: %d\\n\", result)\n\tcase \"-\":\n\t\tresult = first - second\n\t\tfmt.Printf(\"Result: %d\\n\", result)\n\tcase \"*\":\n\t\tresult = first * second\n\t\tfmt.Printf(\"Result: %d\\n\", result)\n\tcase \"/\":\n\t\tresult = first / second\n\t\tfmt.Printf(\"Result: %d\\n\", result)\n\t}\n\n\tout := strconv.Itoa(result)\n\n\tfmt.Println(out)\n\treturn out\n}", "func Expression() {\n\tif IsAddop(Look) {\n\t\tEmitLn(\"CLR D0\")\n\t} else {\n\t\tTerm()\n\t}\n\tfor (Look == '+') || (Look == '-') {\n\t\tEmitLn(\"MOVE D0,-(SP)\")\n\t\tswitch Look {\n\t\tcase '+':\n\t\t\tAdd()\n\t\tcase '-':\n\t\t\tSubtract()\n\t\t}\n\t}\n}", "func calc(input string) {\r\n\t// Setup the input\r\n\tis := antlr.NewInputStream(input)\r\n\r\n\t// Create the Lexer\r\n\tlexer := parser.NewCalcLexer(is)\r\n\tstream := antlr.NewCommonTokenStream(lexer, antlr.TokenDefaultChannel)\r\n\r\n\t// Create the Parser\r\n\tp := parser.NewCalcParser(stream)\r\n\r\n\t// Finally parse the expression (by walking the tree)\r\n\tlistener := NewCalcListener()\r\n\tantlr.ParseTreeWalkerDefault.Walk(&listener, p.Start())\r\n}", "func main() {\n\tfmt.Println(\"Halo, belajar golang\")\n\t// sentence := TestAja()\n\n\t// fmt.Println(sentence)\n\n\tresult := calculaion.Add(26, 29)\n\tfmt.Println(result)\n}", "func evaluate(arg1 *vector.Vector, oper *vector.Vector, arg2 *vector.Vector) *vector.Vector {\n\t//Store the operator in a temp string, to save typing it out\n\tvar operS string\n\toperS = oper.At(0).(string)\n\tvar val1, val2 int \n\tvar err1, err2 os.Error\n\tval1, err1 = strconv.Atoi(arg1.At(0).(string))\n\tval2, err2 = strconv.Atoi(arg2.At(0).(string))\n\t//screens for consecutive operators\n\tif(err1 != nil || err2 != nil){\n\t\tfmt.Println(\"expr: syntax error\")\n\t\tos.Exit(-2)\n\t}\n\tvar result int = -1\n\t//Evaluate based on the operator\n\tif operS == \"+\" {\n\t\tresult = val1 + val2\n\t} else if operS == \"-\" {\n\t\tresult = val1 - val2\n\t} else if operS == \"/\" {\n\t\tresult = val1 / val2\n\t} else if operS == \"*\" {\n\t\tresult = val1 * val2\n\t} else if operS == \"%\" {\n\t\tresult = val1 % val2\n\t}\n\t//Clear the arg1 vector and add the result to it, then return\n\t//(saves memory by not creating a new vector)\n\targ1.Cut(0, arg1.Len())\n\targ1.Push(strconv.Itoa(result))\n\treturn arg1\n}", "func main() {\n\tfmt.Println(\"2 + 3 =\", mymath.Sum (2, 3))\n\tfmt.Println(\"4 + 7 =\", mymath.Sum (4, 7))\n\tfmt.Println(\"5 + 9 =\", mymath.Sum (5, 9))\n}", "func (s *BasearithmeticListener) EnterExpression(ctx *ExpressionContext) {}", "func (r *Resolver) Evaluate(args struct{ Expr string }) (Result, error) {\n\tvar result Result\n\tamount, err := calc.CalculateAmount(args.Expr)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tunitName := amount.Units\n\tunit, err := NewUnit(unitName)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tresult = Result{amount.Value, unit, args.Expr}\n\tlog.Info(fmt.Sprintf(\"evaluate(%s) = %.2f %s\", args.Expr, result.value, result.units.pluralName))\n\treturn result, nil\n}", "func (opcode Sum) Execute() {\n\tfmt.Println(\"Execute Sum\")\n}", "func main() {\n\texpvars()\n\tservice.Run()\n}", "func main() {\n\texecute.Execute()\n}", "func (a *AddActivity) Eval(context activity.Context) (done bool, err error) {\n\n\t//mv := context.GetInput(ivMessage)\n\tnum1, _ := context.GetInput(ivNum1).(int)\n\tnum2, _ := context.GetInput(ivNum2).(int)\n\n\tactivityLog.Info(fmt.Sprintf(\"Num1: %d, Num2: %d\", num1, num2))\n\tactivityLog.Info(fmt.Sprintf(\"Addition is : %d\", num1+num2))\n\tcontext.SetOutput(ovAddition, num1+num2)\n\n\treturn true, nil\n}", "func main() {\n\tswitch len(os.Args) {\n\tcase 1:\n\t\tsolve()\n\tdefault:\n\t\tsolvePE()\n\t}\n}", "func Main(m rv.RenderModel) {\n\tmain(m)\n}", "func (ev *evaluator) eval(expr Expr) model.Value {\n\t// This is the top-level evaluation method.\n\t// Thus, we check for timeout/cancellation here.\n\tif err := contextDone(ev.ctx, \"expression evaluation\"); err != nil {\n\t\tev.error(err)\n\t}\n\n\tswitch e := expr.(type) {\n\tcase *AggregateExpr:\n\t\tvector := ev.evalVector(e.Expr)\n\t\treturn ev.aggregation(e.Op, e.Grouping, e.Without, e.KeepCommonLabels, e.Param, vector)\n\n\tcase *BinaryExpr:\n\t\tlhs := ev.evalOneOf(e.LHS, model.ValScalar, model.ValVector)\n\t\trhs := ev.evalOneOf(e.RHS, model.ValScalar, model.ValVector)\n\n\t\tswitch lt, rt := lhs.Type(), rhs.Type(); {\n\t\tcase lt == model.ValScalar && rt == model.ValScalar:\n\t\t\treturn &model.Scalar{\n\t\t\t\tValue: scalarBinop(e.Op, lhs.(*model.Scalar).Value, rhs.(*model.Scalar).Value),\n\t\t\t\tTimestamp: ev.Timestamp,\n\t\t\t}\n\n\t\tcase lt == model.ValVector && rt == model.ValVector:\n\t\t\tswitch e.Op {\n\t\t\tcase itemLAND:\n\t\t\t\treturn ev.vectorAnd(lhs.(vector), rhs.(vector), e.VectorMatching)\n\t\t\tcase itemLOR:\n\t\t\t\treturn ev.vectorOr(lhs.(vector), rhs.(vector), e.VectorMatching)\n\t\t\tcase itemLUnless:\n\t\t\t\treturn ev.vectorUnless(lhs.(vector), rhs.(vector), e.VectorMatching)\n\t\t\tdefault:\n\t\t\t\treturn ev.vectorBinop(e.Op, lhs.(vector), rhs.(vector), e.VectorMatching, e.ReturnBool)\n\t\t\t}\n\t\tcase lt == model.ValVector && rt == model.ValScalar:\n\t\t\treturn ev.vectorScalarBinop(e.Op, lhs.(vector), rhs.(*model.Scalar), false, e.ReturnBool)\n\n\t\tcase lt == model.ValScalar && rt == model.ValVector:\n\t\t\treturn ev.vectorScalarBinop(e.Op, rhs.(vector), lhs.(*model.Scalar), true, e.ReturnBool)\n\t\t}\n\n\tcase *Call:\n\t\treturn e.Func.Call(ev, e.Args)\n\n\tcase *MatrixSelector:\n\t\treturn ev.matrixSelector(e)\n\n\tcase *NumberLiteral:\n\t\treturn &model.Scalar{Value: e.Val, Timestamp: ev.Timestamp}\n\n\tcase *ParenExpr:\n\t\treturn ev.eval(e.Expr)\n\n\tcase *StringLiteral:\n\t\treturn &model.String{Value: e.Val, Timestamp: ev.Timestamp}\n\n\tcase *UnaryExpr:\n\t\tse := ev.evalOneOf(e.Expr, model.ValScalar, model.ValVector)\n\t\t// Only + and - are possible operators.\n\t\tif e.Op == itemSUB {\n\t\t\tswitch v := se.(type) {\n\t\t\tcase *model.Scalar:\n\t\t\t\tv.Value = -v.Value\n\t\t\tcase vector:\n\t\t\t\tfor i, sv := range v {\n\t\t\t\t\tv[i].Value = -sv.Value\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn se\n\n\tcase *VectorSelector:\n\t\treturn ev.vectorSelector(e)\n\t}\n\tpanic(fmt.Errorf(\"unhandled expression of type: %T\", expr))\n}", "func main() {\n\tfmt.Println(\"starting main function\")\n\tdisplayMessage(\"hi towfeeq\")\n\tdisplayValue(20)\n\tfmt.Println(\"ending main function\")\n\n\n}", "func main() {\n\tprojecteuler.Timed(calc)\n}", "func main() {\n\tprojecteuler.Timed(calc)\n}", "func CalculateHandler(bodyBytes []byte) float64 {\n\tvar data map[string]interface{}\n\terr := json.Unmarshal(bodyBytes, &data)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tvar eq equation\n\terr = util.FillStruct(&eq, data)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn util.Calculate(eq)\n}", "func TestEvaluatorArithmetic(t *testing.T) {\n\tvar values = make(map[string]int)\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tname: \"short expression\",\n\t\t\texpression: \"1+2*3\",\n\t\t\texpectedValue: 7,\n\t\t},\n\t\t{\n\t\t\tname: \"long expression\",\n\t\t\texpression: \"4/2-1+5%2\",\n\t\t\texpectedValue: 2,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult, err := evaluator.Evaluate(tc.expression, values)\n\t\t\tassert.NoError(t, err, \"unexpected error\")\n\t\t\tassert.Equal(t, tc.expectedValue, result)\n\t\t})\n\t}\n}", "func eval(expression TokenStream) (value int) {\n\ts := stack.New()\n\n\tfor _, token := range expression {\n\t\tif token.kind == OPERAND {\n\t\t\ts.Push(token)\n\t\t} else {\n\t\t\top1 := s.Pop().(Token)\n\t\t\top2 := s.Pop().(Token)\n\t\t\tvar result int\n\t\t\tswitch token.sValue {\n\t\t\tcase \"+\":\n\t\t\t\tresult = op1.iValue + op2.iValue\n\t\t\tcase \"*\":\n\t\t\t\tresult = op1.iValue * op2.iValue\n\t\t\t}\n\t\t\ts.Push(Token{kind: OPERAND, iValue: result})\n\t\t}\n\t}\n\n\tt := s.Pop().(Token)\n\tvalue = t.iValue\n\n\treturn\n}", "func (s *BasearithmeticListener) ExitExpression(ctx *ExpressionContext) {}", "func (expr Expression) Calculate() (float64, error) {\n\tresult, resultError := calc(expr)\n\tif resultError != nil {\n\t\treturn 0, resultError\n\t}\n\n\tfmt.Printf(\"%.2f %s %.2f = %.2f\\n\", expr.FirstOperand.Value, expr.Operator.Value, expr.SecondOperand.Value, result)\n\treturn result, nil\n}", "func ExampleEval() {\n\tfmt.Println(Eval(\"5\"))\n\tfmt.Println(Eval(\"1 + 2\"))\n\tfmt.Println(Eval(\"1 - 2 + 3\"))\n\tfmt.Println(Eval(\"3 * ( 3 + 1 * 3 ) / 2\"))\n\tfmt.Println(Eval(\"3 * ( ( 3 + 1 ) * 3 ) / 2\"))\n\t//OutPut:\n\t//5\n\t//3\n\t//2\n\t//9\n\t//18\n}", "func mathCommand(ctx *dgc.Ctx) {\n\t// Validate the arguments\n\tcodeblock := ctx.Arguments.AsCodeblock()\n\tif codeblock == nil {\n\t\tctx.RespondEmbed(embeds.InvalidUsage(ctx.Command.Usage))\n\t\treturn\n\t}\n\n\t// Check the rate limiter\n\tif !ctx.Command.NotifyRateLimiter(ctx) {\n\t\treturn\n\t}\n\n\t// Respond with a loading embed\n\tmsg, _ := ctx.Session.ChannelMessageSendEmbed(ctx.Event.ChannelID, embeds.Loading())\n\n\t// Evaluate the expression and respond with the result\n\tresult, err := utils.EvaluateMathematicalExpression(codeblock.Content)\n\tif err != nil {\n\t\tctx.RespondEmbed(embeds.Error(err.Error()))\n\t\treturn\n\t}\n\tctx.Session.ChannelMessageEditEmbed(ctx.Event.ChannelID, msg.ID, embeds.Success(result))\n}", "func main() {\n\twf.Run(run)\n}", "func main() {\n\n\tres := plus(1, 2) //Invoking a function normally\n\tfmt.Println(\"1+2 =\", res)\n\n\tres = plusPlus(1, 2, 3)\n\tfmt.Println(\"1+2+3 =\", res)\n}", "func main() {\n\terr := app.Execute()\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Error while Execute lookatch\")\n\t}\n}", "func execParseExpr(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := parser.ParseExpr(args[0].(string))\n\tp.Ret(1, ret, ret1)\n}", "func main() {\n\tfmt.Printf(\"Solution 1: %d\\n\", solve1())\n}", "func main() {\n\tfmt.Println(\"add(42, 13)=\", add(42, 13))\n\n\tfmt.Println(\"addThree(23, 45, 5.6):\", addThree(23, 45, 5.6))\n}", "func Calculate(substitutedExpression string) (float64, error) {\n\tvar node Node\n\tvar result Number\n\tvar p *Parser\n\tvar parseOk, evalOk bool\n\n\tsubstitutedExpression = NegativeNumberParser(substitutedExpression)\n\tp = new(Parser).Init(substitutedExpression)\n\tp.AddOperator('+', 1)\n\tp.AddOperator('-', 1)\n\tp.AddOperator('*', 2)\n\tp.AddOperator('/', 2)\n\tnode, parseOk = p.Parse()\n\tif parseOk {\n\t\tresult, evalOk = node.Eval()\n\t\tif evalOk {\n\t\t\treturn float64(result), nil // %v = default format\n\t\t} else {\n\t\t\treturn float64(result), fmt.Errorf(\"%s = Invalid Evaluation error\\n\", substitutedExpression)\n\t\t}\n\t} else {\n\t\treturn 0.0, fmt.Errorf(\"%s = Invalid Syntax error\\n\", substitutedExpression)\n\t}\n}", "func (x *ExprImpl) expr() {}", "func main() {\n\tx := 42\n\ty := 13\n\n\tprintln(x + y) // this should be add(x,y)!\n}", "func main() {\n\t// This expression should print 20\n\tfmt.Println(10 + 5 - (5 - 10))\n\n\t// This expression should print -16\n\tfmt.Println(-10 + 0.5 - (1 + 5.5))\n\n\t// This expression should print -25\n\tfmt.Println(5 + 10*(2-5))\n\n\t// This expression should print 0.5\n\tfmt.Println(0.5 * (2 - 1))\n\n\t// This expression should print 24\n\tfmt.Println((3+1)/2*10 + 4)\n\n\t// This expression should print 15\n\tfmt.Println(10 / 2 * (10 % 7))\n\n\t// This expression should print 40\n\tfmt.Println(100 / (5.0 / 2))\n}", "func RpnCalculateEquation(ctx context.Context, rpnCtx *RpnCtx, s string) (float64, error) {\n\t// funcname := \"RpnCalculateEquation\"\n\n\tvar (\n\t\terr error\n\t)\n\n\t// fmt.Printf(\"%s: entered\\n\", funcname)\n\tt := strings.Split(s, \" \")\n\t// fmt.Printf(\"%s: t = %#v\\n\", funcname, t)\n\n\tfor i := 0; i < len(t); i++ {\n\t\ts = t[i]\n\t\t// fmt.Printf(\"\\n%s: for loop parsing: %s\\n\", funcname, s)\n\t\tif len(s) > 0 {\n\t\t\tif s[0] == '$' { // is it a special notation?\n\t\t\t\tm := rpnVariable.FindStringSubmatchIndex(s)\n\t\t\t\tif m != nil {\n\t\t\t\t\tmatch := s[m[2]:m[3]]\n\t\t\t\t\tn, err := varResolve(ctx, rpnCtx, match)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn float64(0), err\n\t\t\t\t\t}\n\n\t\t\t\t\trpnCtx.stack = append(rpnCtx.stack, n)\n\t\t\t\t}\n\t\t\t} else if s[0] == '_' {\n\t\t\t\t// fmt.Printf(\"%s: found '_', pushing rpnCtx.amount = %8.2f\\n\", funcname, rpnCtx.amount)\n\t\t\t\trpnPush(rpnCtx, rpnCtx.amount)\n\t\t\t} else if ('0' <= s[0] && s[0] <= '9') || '.' == s[0] { // is it a number?\n\t\t\t\tm := rpnNumber.FindStringSubmatchIndex(s)\n\t\t\t\tmatch := s[m[0]:m[1]]\n\t\t\t\tn, _ := strconv.ParseFloat(match, 64)\n\t\t\t\trpnCtx.stack = append(rpnCtx.stack, n*rpnCtx.pf)\n\t\t\t} else if len(s) > 1 && s[0] == '-' && (('0' <= s[1] && s[1] <= '9') || '.' == s[1]) {\n\t\t\t\tm := rpnNumber.FindStringSubmatchIndex(s)\n\t\t\t\tmatch := s[m[0]:m[1]]\n\t\t\t\tn, _ := strconv.ParseFloat(match, 64)\n\t\t\t\trpnCtx.stack = append(rpnCtx.stack, n*rpnCtx.pf)\n\t\t\t} else if s[0] == '-' || s[0] == '+' || s[0] == '*' || s[0] == '/' { // is it an operator?\n\t\t\t\top := s[0:1]\n\t\t\t\tvar x, y float64\n\t\t\t\ty = rpnPop(rpnCtx)\n\t\t\t\tx = rpnPop(rpnCtx)\n\t\t\t\tswitch op {\n\t\t\t\tcase \"+\":\n\t\t\t\t\trpnCtx.stack = append(rpnCtx.stack, x+y)\n\t\t\t\tcase \"-\":\n\t\t\t\t\trpnCtx.stack = append(rpnCtx.stack, x-y)\n\t\t\t\tcase \"*\":\n\t\t\t\t\trpnCtx.stack = append(rpnCtx.stack, x*y)\n\t\t\t\tcase \"/\":\n\t\t\t\t\trpnCtx.stack = append(rpnCtx.stack, x/y)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// rpnPrintStack(rpnCtx)\n\t}\n\treturn rpnPop(rpnCtx), err\n}", "func main() {\n\tprintln(\"add:\", add(2, 3))\n}", "func fnCalc(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) != 1 {\n\t\tctx.Log().Error(\"error_type\", \"func_calc\", \"op\", \"calc\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to calc function\"), \"calc\", params})\n\t\treturn nil\n\t}\n\tfunctions := map[string]govaluate.ExpressionFunction{\n\t\t\"now\": func(args ...interface{}) (interface{}, error) {\n\t\t\treturn float64(time.Now().UnixNano() / 1e6), nil\n\t\t},\n\t}\n\texpr, err := govaluate.NewEvaluableExpressionWithFunctions(extractStringParam(params[0]), functions)\n\tif err != nil {\n\t\tctx.Log().Error(\"error_type\", \"func_calc\", \"op\", \"calc\", \"cause\", \"invalid_expression\", \"params\", params, \"error\", err.Error())\n\t\tstats.IncErrors()\n\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"invalid expression in call to calc function: %s\", err.Error()), \"calc\", params})\n\t\treturn nil\n\t}\n\tres, err := expr.Evaluate(nil)\n\tif err != nil {\n\t\tctx.Log().Error(\"error_type\", \"func_calc\", \"op\", \"calc\", \"cause\", \"invalid_evaluation\", \"params\", params, \"error\", err.Error())\n\t\tstats.IncErrors()\n\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"invalid evaluation in call to calc function: %s\", err.Error()), \"calc\", params})\n\t\treturn nil\n\t}\n\treturn res\n}", "func main() {\n\tapp := getRootCmd()\n\n\t// ignore error so we don't exit non-zero and break gfmrun README example tests\n\t_ = app.Execute()\n}", "func (c *ArithmeticController) Action() {\n\n\tvar err error\n\tvar x, y, answer int\n\tvar action = c.Ctx.Input.Param(\":action\")\n\tvar arithmetic func(int, int) (int, error)\n\tvar ok bool\n\tcached := false\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tc.Data[\"json\"] = m.Result{Action: action, Error: err.Error()}\n\t\t\tc.Ctx.Output.SetStatus(500)\n\t\t\tlogs.Error(\"Error... %s\", err.Error())\n\t\t} else {\n\t\t\tc.Data[\"json\"] = m.Result{Action: action, Answer: answer, Cached: cached, X: x, Y: y}\n\t\t}\n\t\tc.configurer.ServeJSON(&c.Controller, true)\n\t}()\n\n\tif x, y, err = c.validate(); err != nil {\n\t\treturn\n\t} else if arithmetic, ok = actionMap[action]; !ok {\n\t\terr = fmt.Errorf(\"'%s' is not a recognizable action \", action)\n\t\treturn\n\t} else if answer, ok = resultCache[createCacheKey(arithmetic, x, y)]; ok {\n\t\tcached = true\n\t\treturn\n\t} else if answer, err = arithmetic(x, y); err != nil {\n\t\treturn\n\t}\n\n\tresultCache[createCacheKey(arithmetic, x, y)] = answer\n\tlogs.Info(\"%+v\", resultCache)\n\n}", "func main() {\n\n\t\n\t\n\tif len(os.Args) <= 1 {\n\t\tPrintf(\"\\nUso: solver <file>\\n\")\n\t\tos.Exit(1)\n\t}\n\t\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\ti,j,Fi,Ci,Dj,Distancias := load(os.Args[1])\n\t\n\tXij := NewDense(i,j,nil)\n\t\n\tfor Iteracoes := 100000000; Iteracoes > 0; Iteracoes-- {\n\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t}\n}", "func main() {\n\n\tif err := qml.Run(run); err != nil {\n\t\tfmt.Fprint(os.Stderr, \"error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func (m *ExpressionMachine) Run(w io.Writer) {\n\tfor m.Exp.Reducible() {\n\t\tfmt.Fprintln(w, m.Exp)\n\t\tm.Step()\n\t}\n\tfmt.Fprintln(w, m.Exp)\n}", "func (e *binaryExprEvaluator) start() {\n\te.lhs.start()\n\te.rhs.start()\n\tgo e.run()\n}", "func main() {\n\ta := ToFloat(Prompt(\"Primo semiasse?\"))\n\tb := ToFloat(Prompt(\"Secondo semiasse?\"))\n\n\tAlert(EllipseArea(a, b))\n}", "func (i Interactor) Exec(a, b, c bool, d float64, e, f int) (string, float64, error) {\n\th, err := i.hh.Exec(a, b, c)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\n\treturn h, i.kh.Calc(h, d, e, f), nil\n}", "func (g math) Run(job Job, ctx *Ctx) (interface{}, error) {\n\tin := job.Data().(input)\n\n\treturn in.First + in.Second, nil\n}", "func (e *Evaluator) Evaluate(node ast.Node, env *object.Environment) object.Object {\n\te.Ctxt = node.Context()\n\tswitch node.(type) {\n\tcase *ast.Program:\n\t\tres := &object.StmtResults{}\n\t\tres.Results = []object.Object{}\n\n\t\t// adding statements\n\t\tfor _, stmt := range node.(*ast.Program).Statements {\n\t\t\tif ret, ok := stmt.(*ast.ReturnStatement); ok {\n\t\t\t\treturn e.Evaluate(ret, env)\n\t\t\t}\n\t\t\tresult := e.Evaluate(stmt, env)\n\t\t\tres.Results = append(res.Results, result)\n\t\t}\n\n\t\t// adding functions\n\t\t//todo: this should function differently than closures\n\t\tfor _, fn := range node.(*ast.Program).Functions {\n\t\t\tbody := fn.Body\n\t\t\tparams := fn.Params\n\t\t\tenv.Data[fn.Name.Value] = &object.Function{\n\t\t\t\tParams: params,\n\t\t\t\tBody: body,\n\t\t\t\tEnv: env,\n\t\t\t}\n\t\t}\n\n\t\t//todo: adding classes\n\n\t\treturn res\n\n\tcase ast.Statement:\n\t\tstmt := node.(ast.Statement)\n\n\t\tswitch node.(ast.Statement).(type) {\n\t\tcase *ast.LetStatement:\n\t\t\tletstmt := stmt.(*ast.LetStatement)\n\t\t\tval := e.Evaluate(letstmt.Value, env)\n\t\t\tenv.Set(letstmt.Name.Value, val)\n\t\t\treturn NULL\n\n\t\tcase *ast.ExprStatement:\n\t\t\texpr := stmt.(*ast.ExprStatement)\n\t\t\treturn e.Evaluate(expr.Expression, env)\n\n\t\tcase *ast.ReturnStatement:\n\t\t\tretstmt := stmt.(*ast.ReturnStatement)\n\t\t\tres := e.Evaluate(retstmt.Value, env)\n\t\t\treturn &object.Return{Inner: res}\n\n\t\tcase *ast.WhileStatement:\n\t\t\te.loopcount++\n\t\t\twhilestmt := stmt.(*ast.WhileStatement)\n\n\t\t\tvar result object.Object\n\n\t\t\tfor {\n\t\t\t\tval := e.Evaluate(whilestmt.Condition, env)\n\t\t\t\tif !evaluateTruthiness(val) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tresult = e.evalBlockStmt(whilestmt.Body, env)\n\t\t\t\tif object.IsErr(result) || object.IsBreak(result) {\n\t\t\t\t\tif object.IsBreak(result) {\n\t\t\t\t\t\te.loopcount--\n\t\t\t\t\t\treturn NULL\n\t\t\t\t\t}\n\t\t\t\t\treturn result\n\t\t\t\t}\n\t\t\t}\n\n\t\t\te.loopcount--\n\t\t\treturn result\n\n\t\tcase *ast.BreakStatement:\n\t\t\tif e.loopcount == 0 {\n\t\t\t\treturn &object.Exception{\n\t\t\t\t\tMsg: \"Cannot use break outside of loop\",\n\t\t\t\t\tCon: node.(ast.Statement).Context(),\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn &object.Break{}\n\n\t\tcase *ast.BlockStatement:\n\t\t\tblkstmt := stmt.(*ast.BlockStatement)\n\t\t\treturn e.evalBlockStmt(blkstmt, env)\n\n\t\tdefault:\n\t\t\treturn NULL\n\t\t}\n\n\tcase ast.Expression:\n\t\texpr := node.(ast.Expression)\n\n\t\tswitch node.(ast.Expression).(type) {\n\t\tcase *ast.Identifier:\n\t\t\tident := expr.(*ast.Identifier)\n\t\t\tif data, ok := env.Get(ident.Value); ok {\n\t\t\t\treturn data\n\t\t\t}\n\t\t\tif bltn, ok := builtins[ident.Value]; ok {\n\t\t\t\treturn bltn\n\t\t\t}\n\t\t\treturn &object.Exception{\n\t\t\t\tMsg: fmt.Sprintf(\"Could not find symbol %s\", ident.Value),\n\t\t\t\tCon: ident.Context(),\n\t\t\t}\n\n\t\tcase *ast.PrefixExpr:\n\t\t\tpexpr := expr.(*ast.PrefixExpr)\n\t\t\treturn e.evalPrefixExpr(pexpr, env)\n\n\t\tcase *ast.InfixExpr:\n\t\t\tiexpr := expr.(*ast.InfixExpr)\n\t\t\treturn e.evalInfixExpr(iexpr, env)\n\n\t\tcase *ast.IfExpression:\n\t\t\tifexpr := expr.(*ast.IfExpression)\n\t\t\tcondition := e.Evaluate(ifexpr.Condition, env)\n\t\t\tif condition == nil {\n\t\t\t\treturn &object.Exception{\n\t\t\t\t\tMsg: \"If condition returned nil\",\n\t\t\t\t\tCon: ifexpr.Context(),\n\t\t\t\t}\n\t\t\t}\n\t\t\tif evaluateTruthiness(condition) {\n\t\t\t\treturn e.Evaluate(ifexpr.Result, env)\n\t\t\t}\n\t\t\tif ifexpr.Alternative != nil {\n\t\t\t\tswitch ifexpr.Alternative.(type) {\n\t\t\t\tcase *ast.BlockStatement:\n\t\t\t\t\treturn e.Evaluate(ifexpr.Alternative.(*ast.BlockStatement), env)\n\t\t\t\tcase *ast.IfExpression:\n\t\t\t\t\treturn e.Evaluate(ifexpr.Alternative.(*ast.IfExpression), env)\n\t\t\t\tdefault:\n\t\t\t\t\treturn &object.Exception{\n\t\t\t\t\t\tMsg: \"Invalid else branch\",\n\t\t\t\t\t\tCon: ifexpr.Alternative.Context(),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase *ast.FnLiteral:\n\t\t\tfnlit := expr.(*ast.FnLiteral)\n\t\t\tparams := fnlit.Params\n\t\t\tbody := fnlit.Body\n\t\t\treturn &object.Function{Params: params, Env: env, Body: body}\n\n\t\tcase *ast.FunctionCall:\n\t\t\t// asserting type\n\t\t\tfncall := expr.(*ast.FunctionCall)\n\n\t\t\t// resolving to object\n\t\t\tfunction := e.Evaluate(fncall.Ident, env)\n\t\t\tif object.IsErr(function) {\n\t\t\t\treturn function\n\t\t\t}\n\n\t\t\targs := e.evalExpressions(fncall.Params, env)\n\t\t\tif len(args) == 1 && object.IsErr(args[0]) {\n\t\t\t\treturn args[0]\n\t\t\t}\n\n\t\t\treturn e.applyFunction(function, args)\n\n\t\tcase *ast.DotExpression:\n\t\t\t//todo\n\t\t\treturn &object.Exception{\n\t\t\t\tMsg: \"DotExpr: unimplemented\",\n\t\t\t\tCon: node.Context(),\n\t\t\t}\n\n\t\tcase *ast.Int:\n\t\t\tintexpr := node.(ast.Expression).(*ast.Int)\n\t\t\treturn &object.Integer{Value: intexpr.Inner}\n\t\tcase *ast.Flt:\n\t\t\tfltexpr := node.(ast.Expression).(*ast.Flt)\n\t\t\treturn &object.Float{Value: fltexpr.Inner}\n\t\tcase *ast.Str:\n\t\t\tstrexpr := node.(ast.Expression).(*ast.Str)\n\t\t\treturn &object.String{Value: strexpr.Inner}\n\t\tcase *ast.Bool:\n\t\t\tboolexpr := node.(ast.Expression).(*ast.Bool)\n\t\t\treturn nativeBooltoObj(boolexpr.Inner)\n\t\tcase *ast.Array:\n\t\t\tarray := node.(ast.Expression).(*ast.Array)\n\t\t\tarr := &object.Array{}\n\n\t\t\t// preallocating so we don't have to waste cycles\n\t\t\t// reallocating every time we append\n\t\t\telements := make([]object.Object, 0, len(array.Elements))\n\n\t\t\tfor _, elem := range array.Elements {\n\t\t\t\telements = append(elements, e.Evaluate(elem, env))\n\t\t\t}\n\t\t\tarr.Elements = elements\n\n\t\t\treturn arr\n\n\t\tcase *ast.Map:\n\t\t\thash := node.(ast.Expression).(*ast.Map)\n\t\t\tnewmap := &object.Map{}\n\t\t\tnewmap.Elements = make(map[object.HashKey]object.Object)\n\n\t\t\tfor key, val := range hash.Elements {\n\t\t\t\tnkey, nval := e.Evaluate(key, env), e.Evaluate(val, env)\n\n\t\t\t\tif object.IsErr(nkey) {\n\t\t\t\t\treturn nkey\n\t\t\t\t}\n\t\t\t\tif object.IsErr(nval) {\n\t\t\t\t\treturn nval\n\t\t\t\t}\n\n\t\t\t\thashable, ok := nkey.(object.Hashable)\n\n\t\t\t\tif !ok {\n\t\t\t\t\treturn &object.Exception{\n\t\t\t\t\t\tMsg: fmt.Sprintf(\"Cannot use type %T as key for Map\", nkey),\n\t\t\t\t\t\tCon: hash.Context(),\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tnewmap.Elements[hashable.HashKey()] = nval\n\t\t\t}\n\n\t\t\treturn newmap\n\n\t\tcase *ast.IndexExpr:\n\t\t\tidx := node.(ast.Expression).(*ast.IndexExpr)\n\t\t\treturn e.evalIndexExpr(idx, env)\n\n\t\tdefault:\n\t\t\treturn NULL\n\t\t}\n\tdefault:\n\t\treturn &object.Exception{\n\t\t\tMsg: \"Unimplemented type\",\n\t\t\tCon: node.Context(),\n\t\t}\n\t}\n\treturn &object.Exception{\n\t\tMsg: fmt.Sprintf(\"Evaluate: unreachable code, got %T\", node),\n\t\tCon: node.Context(),\n\t}\n}", "func (s *Script) evaluate(tx *types.Transaction, txInIdx int) error {\n\tscript := *s\n\tscriptLen := len(script)\n\t// logger.Debugf(\"script len %d: %s\", scriptLen, s.Disasm())\n\n\tstack := newStack()\n\tfor pc, scriptPubKeyStart := 0, 0; pc < scriptLen; {\n\t\topCode, operand, newPc, err := s.parseNextOp(pc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpc = newPc\n\n\t\tif err := s.execOp(opCode, operand, tx, txInIdx, pc, &scriptPubKeyStart, stack); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Succeed if top stack item is true\n\treturn stack.validateTop()\n}", "func main() {\n\tlines := util.ReadLines()\n\n\tansP1, ansP2 := Exec(lines)\n\tfmt.Printf(\"Part1: %v\\n\", ansP1)\n\tfmt.Printf(\"Part2: %v\\n\", ansP2)\n}", "func main() {\n\t//Curency array.\n\tconst (\n\t\tEUR = iota\n\t\tGBP\n\t\tJPY\n\t)\n\n\t// Conversion rates.\n\trates := [...]float64{\n\t\tEUR: 0.11,\n\t\tGBP: 0.55,\n\t\tJPY: 122.22,\n\t}\n\t// Get the ammount to be converted.\n\ta := os.Args[1:]\n\tif l := len(a); l != 1 {\n\t\tfmt.Println(\"go run main.go [money ammount]\")\n\t\treturn\n\t}\n\n\t// Check if the passed argument is valid.\n\tif amount, err := strconv.ParseFloat(a[0], 64); err != nil {\n\t\tfmt.Println(\"Please provide a number!\")\n\t\treturn\n\t} else {\n\t\tfmt.Printf(\"%.2f USD is %.2f EUR\\n\", amount, rates[EUR]*amount)\n\t\tfmt.Printf(\"%.2f USD is %.2f GBP\\n\", amount, rates[GBP]*amount)\n\t\tfmt.Printf(\"%.2f USD is %.2f JPY\\n\", amount, rates[JPY]*amount)\n\t}\n\n}", "func (cal *Calculate) Run(value float64) (result float64) {\n\tswitch cal.Operator {\n\tcase \"add\":\n\t\tresult = cal.add(value)\n\n\tcase \"sub\":\n\t\tresult = cal.sub(value)\n\n\tcase \"mul\":\n\t\tresult = cal.mul(value)\n\n\tcase \"div\":\n\t\tresult = cal.div(value)\n\n\tcase \"sin\":\n\t\tresult = cal.sin(value)\n\n\tcase \"pow\":\n\t\treturn cal.pow(value)\n\t}\n\n\treturn result\n}", "func (s *BasearithmeticListener) EnterEquation(ctx *EquationContext) {}", "func main() {\n\tdefer contracts.ShowHelp(help)\n\tnProd, nCons, bufferSize := parseArguments()\n\n\tprogram(nProd, nCons, bufferSize)\n}", "func (a Abstraction) Evaluate() Expression {\n\treturn Abstraction{a.Argument, a.Body.Evaluate()}\n}", "func Evaluate(expression string) (float64, error) {\n\ttree, err := parse(expression)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn tree.evaluate()\n}", "func main() {\n\thelloWorld()\n\tfunctions()\n\tmathFunction()\n\tserver()\n}", "func main() {\n\n\tmultiplicationTables()\n}", "func run(input string) (interface{}, interface{}) {\n\treturn calculate(input, 4), calculate(input, 14)\n}", "func (p *Parser) processExpression(tks []obj.Token) uint8 {\n\tswitch firstTk := tks[0]; firstTk.Type {\n\tcase fract.Value, fract.Brace, fract.Name:\n\t\tif firstTk.Type == fract.Name {\n\t\t\tbraceCount := 0\n\t\t\tfor index, tk := range tks {\n\t\t\t\tif tk.Type == fract.Brace {\n\t\t\t\t\tswitch tk.Val {\n\t\t\t\t\tcase \" {\", \"[\", \"(\":\n\t\t\t\t\t\tbraceCount++\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbraceCount--\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif braceCount > 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif tk.Type == fract.Operator {\n\t\t\t\t\tswitch tk.Val {\n\t\t\t\t\tcase \"=\", \"+=\", \"-=\", \"*=\", \"/=\", \"%=\", \"^=\", \"<<=\", \">>=\", \"|=\", \"&=\":\n\t\t\t\t\t\tp.varset(tks)\n\t\t\t\t\t\treturn fract.NA\n\t\t\t\t\tcase \":=\":\n\t\t\t\t\t\tp.varsdec(tks, index)\n\t\t\t\t\t\treturn fract.NA\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Print value if live interpreting.\n\t\tif val := p.processValTokens(tks); fract.InteractiveShell {\n\t\t\tif val.Print() {\n\t\t\t\tprintln()\n\t\t\t}\n\t\t}\n\tcase fract.Var:\n\t\tp.vardec(tks)\n\tcase fract.If:\n\t\treturn p.processIf(tks)\n\tcase fract.Loop:\n\t\tp.loopCount++\n\t\tstate := p.processLoop(tks)\n\t\tp.loopCount--\n\t\treturn state\n\tcase fract.Break:\n\t\tif p.loopCount < 1 {\n\t\t\tfract.IPanic(firstTk, obj.SyntaxPanic, \"Break keyword only used in loops!\")\n\t\t}\n\t\treturn fract.LOOPBreak\n\tcase fract.Continue:\n\t\tif p.loopCount < 1 {\n\t\t\tfract.IPanic(firstTk, obj.SyntaxPanic, \"Continue keyword only used in loops!\")\n\t\t}\n\t\treturn fract.LOOPContinue\n\tcase fract.Return:\n\t\tif p.funcCount < 1 {\n\t\t\tfract.IPanic(firstTk, obj.SyntaxPanic, \"Return keyword only used in functions!\")\n\t\t}\n\t\tif len(tks) > 1 {\n\t\t\ttks = tks[1:]\n\t\t\tlist := oop.NewListModel()\n\t\t\tvar lastIndex int\n\t\t\tvar braceCount int\n\t\t\tfor index, tk := range tks {\n\t\t\t\tswitch tk.Type {\n\t\t\t\tcase fract.Brace:\n\t\t\t\t\tswitch tk.Val {\n\t\t\t\t\tcase \"{\", \"[\", \"(\":\n\t\t\t\t\t\tbraceCount++\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbraceCount--\n\t\t\t\t\t}\n\t\t\t\tcase fract.Comma:\n\t\t\t\t\tif braceCount > 0 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tlist.PushBack(*p.processValTokens(tks[lastIndex:index]))\n\t\t\t\t\tlastIndex = index + 1\n\t\t\t\t}\n\t\t\t}\n\t\t\tif lastIndex == 0 {\n\t\t\t\tlist = nil\n\t\t\t\tp.returnVal = p.processValTokens(tks)\n\t\t\t} else {\n\t\t\t\tif lastIndex < len(tks) {\n\t\t\t\t\tlist.PushBack(*p.processValTokens(tks[lastIndex:]))\n\t\t\t\t}\n\t\t\t\tp.returnVal = nil\n\t\t\t\tp.returnVal = new(oop.Val)\n\t\t\t\tp.returnVal.Data = list\n\t\t\t\tp.returnVal.Type = oop.List\n\t\t\t\tp.returnVal.Tag = \"function_multiple_returns\"\n\t\t\t}\n\t\t}\n\t\treturn fract.FUNCReturn\n\tcase fract.Func:\n\t\tp.funcdec(tks)\n\tcase fract.Try:\n\t\treturn p.processTryCatch(tks)\n\tcase fract.Import:\n\t\tp.processImport(tks)\n\tcase fract.Macro:\n\t\tp.processPragma(tks)\n\tcase fract.Struct:\n\t\tp.structdec(tks)\n\tcase fract.Class:\n\t\tp.classdec(tks)\n\tcase fract.Defer, fract.Go:\n\t\tif l := len(tks); l < 2 {\n\t\t\tfract.IPanic(tks[0], obj.SyntaxPanic, \"Function is not given!\")\n\t\t} else if t := tks[l-1]; t.Type != fract.Brace && t.Val != \")\" {\n\t\t\tfract.IPanicC(tks[0].File, tks[0].Line, tks[0].Column+len(tks[0].Val), obj.SyntaxPanic, \"Invalid syntax!\")\n\t\t}\n\t\tvar valTokens []obj.Token\n\t\tbraceCount := 0\n\t\tfor i := len(tks) - 1; i >= 0; i-- {\n\t\t\ttk := tks[i]\n\t\t\tif tk.Type != fract.Brace {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch tk.Val {\n\t\t\tcase \")\":\n\t\t\t\tbraceCount++\n\t\t\tcase \"(\":\n\t\t\t\tbraceCount--\n\t\t\t}\n\t\t\tif braceCount > 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvalTokens = tks[1:i]\n\t\t\tbreak\n\t\t}\n\t\tif len(valTokens) == 0 && braceCount == 0 {\n\t\t\tfract.IPanic(tks[1], obj.SyntaxPanic, \"Invalid syntax!\")\n\t\t}\n\t\t// Function call.\n\t\tval := p.processValuePart(valuePartInfo{tokens: valTokens})\n\t\tif val.Type != oop.Func {\n\t\t\tfract.IPanic(tks[len(valTokens)], obj.ValuePanic, \"Value is not function!\")\n\t\t}\n\t\tif firstTk.Type == fract.Defer {\n\t\t\tdefers = append(defers, p.funcCallModel(val.Data.(*oop.Fn), tks[len(valTokens):]))\n\t\t} else {\n\t\t\tgo p.funcCallModel(val.Data.(*oop.Fn), tks[len(valTokens):]).Call()\n\t\t}\n\tdefault:\n\t\tfract.IPanic(firstTk, obj.SyntaxPanic, \"Invalid syntax!\")\n\t}\n\treturn fract.NA\n}", "func calc(index string, a, b int) int {\n\tret := a + b\n\tfmt.Println(index, a, b, ret)\n\treturn ret\n}", "func (mcts *MCTS) Run() *Action {\n\t// while within budget\n\trand.Seed(time.Now().Unix())\n\tfmt.Println(\"--------------FRESH RUN----------------\")\n\tfor i := 0; i < 10; i++ {\n\t\tfront := mcts.selection(mcts.Root)\n\t\treward := mcts.playout(front.CurrentState)\n\t\tmcts.backPropagate(front, reward)\n\t}\n\tactions := mcts.exploitation(mcts.Root).CurrentState.Actions\n\t//reset expanded nodes\n\treturn actions[len(actions)-1]\n}", "func Evaluate(expression *[]string, dispatchTable DispatchTable, stack *Stack) interface{} {\n\n\tfor idx, token := range *expression {\n\t\tvar dispatchFunction DispatchFunc\n\n\t\tif _, err := strconv.ParseFloat(token, 64); err == nil {\n\t\t\tdispatchFunction = dispatchTable[\"FLOAT\"]\n\t\t} else {\n\t\t\tvar evalsOk bool\n\t\t\tif dispatchFunction, evalsOk = dispatchTable[token]; !evalsOk {\n\t\t\t\tdispatchFunction = dispatchTable[\"__DEFAULT__\"]\n\t\t\t\t// delete token from expression\n\t\t\t\tcopy((*expression)[idx:], (*expression)[idx+1:])\n\t\t\t\t(*expression)[len(*expression)-1] = \"\"\n\t\t\t\t(*expression) = (*expression)[:len(*expression)-1]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tdispatchFunction(token, stack)\n\t}\n\treturn stack.Pop()\n}", "func main() {\n\t// everything here is self-explanatory\n\ta := 12\n\tb := 60\n\t\n\tfmt.Println(a + b)\n\tfmt.Println(a - b)\n\tfmt.Println(a / b)\n\tfmt.Println(a * b)\n\tfmt.Println(a % b)\n}", "func (p *Parser) expression() {\n\tp.unary()\n}", "func main() {\n\tapp := &cli.App{\n\t\tName: \"RULEX, a lightweight iot data rule gateway\",\n\t\tUsage: \"http://rulex.ezlinker.cn\",\n\t\tCommands: []*cli.Command{\n\t\t\t{\n\t\t\t\tName: \"run\",\n\t\t\t\tUsage: \"rulex run [path of 'rulex.db']\",\n\t\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\t\tutils.ShowBanner()\n\t\t\t\t\tif c.Args().Len() > 0 {\n\t\t\t\t\t\tlog.Info(\"Use config db:\", c.Args().Get(0))\n\t\t\t\t\t\tengine.RunRulex(c.Args().Get(0))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tengine.RunRulex(\"rulex.db\")\n\t\t\t\t\t}\n\t\t\t\t\tlog.Debug(\"Run rulex successfully.\")\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\t\t\t// version\n\t\t\t{\n\t\t\t\tName: \"version\",\n\t\t\t\tUsage: \"rulex version\",\n\t\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\t\tfmt.Println(\"Current Version is: \" + typex.DefaultVersion.Version)\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (t *Target) Main(body usm.Block) {\n\tt.WriteStatement(\"func main() {\\n\")\n\tt.WriteStatement(\"\\tvar r = new(Runtime)\\n\")\n\tt.Indent(body)\n\tt.WriteStatement(\"}\\n\")\n}", "func (core *Core) main() {\n\n\t// Dequeue incoming events\n\tfor m := range core.in {\n\n\t\t// Dispatch event to related function\n\t\tswitch m.oper {\n\t\tcase OP_OPEN:\n\t\t\tcore.handleOpen(m)\n\t\tcase OP_CLOSE:\n\t\t\tcore.handleClose(m)\n\t\tcase OP_LOCK:\n\t\t\tcore.handleLock(m)\n\t\tcase OP_UNLOCK:\n\t\t\tcore.handleUnlock(m)\n\t\tcase OP_GET:\n\t\t\tcore.handleGet(m)\n\t\tcase OP_SET:\n\t\t\tcore.handleSet(m)\n\t\tcase OP_INCR:\n\t\t\tcore.handleIncr(m)\n\t\tdefault:\n\t\t\tm.clt.Reply(&MessageReply{Status: \"KO\", Error: \"Unknown operation\"})\n\t\t}\n\t\tatomic.AddInt64(&core.count, 1)\n\t}\n}", "func Main() {\n\t// delete temp files with substituted env vars when the program terminates\n\tdefer os.RemoveAll(tempFilesDir)\n\tdefer cleanup()\n\n\tsettings = s.Settings\n\t// set the kubecontext to be used Or create it if it does not exist\n\tif !setKubeContext(settings.KubeContext) {\n\t\tif r, msg := createContext(); !r {\n\t\t\tlog.Fatal(msg)\n\t\t}\n\t}\n\n\t// add repos -- fails if they are not valid\n\tif r, msg := addHelmRepos(s.HelmRepos); !r {\n\t\tlog.Fatal(msg)\n\t}\n\n\tif apply || dryRun || destroy {\n\t\t// add/validate namespaces\n\t\tif !noNs {\n\t\t\taddNamespaces(s.Namespaces)\n\t\t}\n\t}\n\n\tif !skipValidation {\n\t\t// validate charts-versions exist in defined repos\n\t\tif err := validateReleaseCharts(s.Apps); err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t}\n\t} else {\n\t\tlog.Info(\"Skipping charts' validation.\")\n\t}\n\n\tlog.Info(\"Preparing plan...\")\n\tif destroy {\n\t\tlog.Info(\"--destroy is enabled. Your releases will be deleted!\")\n\t}\n\n\tcs := buildState()\n\tp := cs.makePlan(&s)\n\tif !keepUntrackedReleases {\n\t\tcs.cleanUntrackedReleases()\n\t}\n\n\tp.sortPlan()\n\tp.printPlan()\n\tp.sendPlanToSlack()\n\n\tif apply || dryRun || destroy {\n\t\tp.execPlan()\n\t}\n}", "func main() {\n\tfmt.Println(\"Generating code\")\n\treduce.GenerateDispatcher(outputPath)\n\t//reduce.GenerateDispatcher(outputPath)\n}", "func main() {\n\n\tclient := connectToServer()\n\n\tnumbers := []float32{1, 5, 6}\n\taddNumbers(1, 5, client)\n\tsubtractNumbers(6, 8, client)\n\tcalculateAverage(numbers, client)\n}", "func (i *Interpreter) Expr() int {\n\ti.currentToken = i.getNextToken()\n\tresult, _ := strconv.Atoi(i.currentToken.Literal)\n\ti.eat(i.currentToken.Type)\n\n\tfor i.currentToken.Type == PLUS || i.currentToken.Type == MINUS {\n\t\tif i.currentToken.Type == PLUS {\n\t\t\ti.eat(PLUS) // Match '+' and getNextToken()\n\t\t\tnum, _ := strconv.Atoi(i.currentToken.Literal)\n\t\t\tresult += num\n\t\t} else if i.currentToken.Type == MINUS {\n\t\t\ti.eat(MINUS)\n\t\t\tnum, _ := strconv.Atoi(i.currentToken.Literal)\n\t\t\tresult -= num\n\t\t}\n\t\ti.eat(i.currentToken.Type)\n\t}\n\t/*\n\t\ti.currentToken = i.getNextToken()\n\t\tleft, _ := strconv.Atoi(i.currentToken.Literal)\n\t\ti.eat(INTEGER)\n\n\t\top := i.currentToken.Literal\n\t\ti.eat(PLUS)\n\n\t\tright, _ := strconv.Atoi(i.currentToken.Literal)\n\t\ti.eat(INTEGER)\n\n\t\tvar result int\n\t\tif op == \"+\" {\n\t\t\tresult = left + right\n\t\t}\n\t*/\n\n\treturn result\n}", "func Evaluator() {\n\tfor {\n\t\tinputch := <-Input_ch\t\t\t// Only one control flow is allowed after this point\n\t\t\n\t\tinput := string(inputch.Logentry.Command)\n\t\tconn := inputch.Conn\n\t\t\n\t\tinputs_ := strings.Split(input, \"\\r\\n\")\n\t\tinputs := strings.Split(inputs_[0], \" \")\n\n\t\tif inputs[0] == \"get\" {\n\t\t\t\traft.Get(input, conn)\n\t\t} else if (r.LastApplied<inputch.Logentry.SequenceNumber){\n\t\t\t\tif inputs[0] == \"set\" && r.LastApplied==inputch.Logentry.SequenceNumber-1 {\n\t\t\t\t\traft.Set(input, conn)\n\t\t\t\t\tr.LastApplied=inputch.Logentry.SequenceNumber\n\t\t\t\t}else if inputs[0] == \"getm\" && r.LastApplied==inputch.Logentry.SequenceNumber-1 {\n\t\t\t\t\traft.Getm(input, conn)\n\t\t\t\t\tr.LastApplied=inputch.Logentry.SequenceNumber\n\t\t\t\t}else if inputs[0] == \"cas\" && r.LastApplied==inputch.Logentry.SequenceNumber-1 {\n\t\t\t\t\traft.Cas(input, conn)\n\t\t\t\t\tr.LastApplied=inputch.Logentry.SequenceNumber\n\t\t\t\t} else if inputs[0] == \"delete\" && r.LastApplied==inputch.Logentry.SequenceNumber-1{\n\t\t\t\t\traft.Delete(input, conn)\n\t\t\t\t\tr.LastApplied=inputch.Logentry.SequenceNumber\n\t\t\t\t}else {\n\t\t\t\t\t//fmt.Println(\"Wrong Command input \",input)\n\t\t\t\t\tr.LastApplied=inputch.Logentry.SequenceNumber\n\t\t\t\t\traft.Output_ch <- raft.String_Conn{\"ERR_CMD_ERR\\r\\n\", conn}\n\t\t\t\t}\n\t\t}\n\t}\n}", "func main() {\n\tvar a, b int\n\n\tn, err := strconv.Atoi(os.Args[1])\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", os.Args[1], \"is not an int!\")\n\t\tos.Exit(1)\n\t}\n\n\ta = sumOfSquares(n)\n\tb = squareOfSum(n)\n\tfmt.Println(\"Difference between sum of squares and square of sums is\", b-a)\n}", "func main() {\n\n\t// a := 42\n\t// var b int = 77\n\n\tp1 := 1 * 2 * 3 * 4 * 5\n\tfmt.Println(p1)\n\n\tfmt.Println(factorial(5))\n\tfmt.Println(factorial2(5))\n\n\t//fmt.Println(produkt)\n\n\tfmt.Println(sumMultiples(3, 10))\n\tfmt.Println(sumMultiples(5, 27))\n\n\ttestSumMultiples()\n\n}", "func (e *ExpressionAtom) Evaluate(dataContext IDataContext, memory *WorkingMemory) (reflect.Value, error) {\n\tif e.Evaluated == true {\n\t\treturn e.Value, nil\n\t}\n\tif e.Variable != nil {\n\t\tval, err := e.Variable.Evaluate(dataContext, memory)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\te.Value = val\n\t\te.Evaluated = true\n\t\treturn val, err\n\t}\n\tif e.FunctionCall != nil {\n\t\tvalueNode := dataContext.Get(\"DEFUNC\")\n\t\targs, err := e.FunctionCall.EvaluateArgumentList(dataContext, memory)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\tret, err := valueNode.CallFunction(e.FunctionCall.FunctionName, args...)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\te.Value = ret\n\t\te.Evaluated = true\n\t\treturn ret, err\n\t}\n\tpanic(\"should not be reached\")\n}", "func RunMain() int {\n\n\t// Initialize a couple of \"vectors\" represented as slices.\n\tvectorA := mat.NewVecDense(3, []float64{11.0, 5.2, -1.3})\n\tvectorB := mat.NewVecDense(3, []float64{-7.2, 4.2, 5.1})\n\n\t// Compute the dot product of A and B\n\tdotProduct := mat.Dot(vectorA, vectorB)\n\tfmt.Printf(\"The dot product of A and B is: %0.2f\\n\", dotProduct)\n\n\t// Scale each element of A by 1.5.\n\tvectorA.ScaleVec(1.5, vectorA)\n\tfmt.Printf(\"Scaling A by 1.5 gives: %v\\n\", vectorA)\n\n\t// Compute the norm/length of B.\n\tnormB := blas64.Nrm2(3, vectorB.RawVector())\n\tfmt.Printf(\"The norm/length of B is: %0.2f\\n\", normB)\n\n\treturn 0\n}", "func main() {\n\tperform()\n}", "func Evaluate(input string) (decimal.Decimal, error) {\n\tvar stack []decimal.Decimal\n\tinputs := strings.Split(input, \" \")\n\n\tfor _, command := range inputs {\n\t\tswitch command {\n\t\tcase \"+\", \"-\", \"*\", \"/\", \"%\", \"^\":\n\t\t\tif len(stack) < 2 {\n\t\t\t\treturn decimal.Zero, errors.New(\"stack overflow\")\n\t\t\t}\n\t\t\tlhs := stack[len(stack)-2]\n\t\t\trhs := stack[len(stack)-1]\n\t\t\tstack = stack[:len(stack)-1]\n\t\t\tswitch command {\n\t\t\tcase \"+\":\n\t\t\t\trhs = lhs.Add(rhs)\n\t\t\tcase \"-\":\n\t\t\t\trhs = lhs.Sub(rhs)\n\t\t\tcase \"*\":\n\t\t\t\trhs = lhs.Mul(rhs)\n\t\t\tcase \"/\":\n\t\t\t\trhs = lhs.Div(rhs)\n\t\t\tcase \"%\":\n\t\t\t\trhs = lhs.Mod(rhs)\n\t\t\tcase \"^\":\n\t\t\t\trhs = lhs.Pow(rhs)\n\t\t\t}\n\t\t\tstack[len(stack)-1] = rhs\n\t\tcase \"abs\", \"atan\", \"ceil\", \"cos\", \"floor\", \"neg\", \"sin\", \"tan\":\n\t\t\tif len(stack) < 1 {\n\t\t\t\treturn decimal.Zero, errors.New(\"stack overflow\")\n\t\t\t}\n\t\t\tval := stack[len(stack)-1]\n\t\t\tswitch command {\n\t\t\tcase \"abs\":\n\t\t\t\tval = val.Abs()\n\t\t\tcase \"atan\":\n\t\t\t\tval = val.Atan()\n\t\t\tcase \"ceil\":\n\t\t\t\tval = val.Ceil()\n\t\t\tcase \"cos\":\n\t\t\t\tval = val.Cos()\n\t\t\tcase \"floor\":\n\t\t\t\tval = val.Floor()\n\t\t\tcase \"neg\":\n\t\t\t\tval = val.Neg()\n\t\t\tcase \"sin\":\n\t\t\t\tval = val.Sin()\n\t\t\tcase \"tan\":\n\t\t\t\tval = val.Tan()\n\t\t\t}\n\t\t\tstack[len(stack)-1] = val\n\t\tdefault:\n\t\t\tval, err := decimal.NewFromString(command)\n\t\t\tif err != nil {\n\t\t\t\treturn val, err\n\t\t\t}\n\t\t\tstack = append(stack, val)\n\t\t}\n\t}\n\n\tif len(stack) != 1 {\n\t\treturn decimal.Zero, errors.New(\"unclean stack\")\n\t}\n\treturn stack[0], nil\n}", "func main() {\n //Number oparator\n fmt.Println(\"oparator\")\n fmt.Println(\"1 + 1 =\", 1 + 1)\n fmt.Println(\"1 - 1 =\", 1 - 1)\n fmt.Println(\"1 / 1 =\", 1 / 1)\n fmt.Println(\"2 * 2 =\", 2 * 2)\n fmt.Println(\"\\n\")\n\n //Strings\n fmt.Println(\"Strings\")\n fmt.Println(\"Len(): \", len(\"Hello, World\"))\n fmt.Println(\"Position [1]: \", \"Hello, World\"[1])\n fmt.Println(\"Hello, \" + \"World\")\n fmt.Println(\"\\n\")\n\n //Bool\n fmt.Println(\"Bool\")\n fmt.Println(true && true)\n fmt.Println(true && false)\n fmt.Println(true || true)\n fmt.Println(true || false)\n fmt.Println(\"\\n\")\n\n\n // Practice Exercises\n\n /* 1. Although overpowered for the task, you can use Go as a calculator. Write a program\n * that computes 32,132 × 42,452 and prints it to the terminal (use the * operator\n * for multiplication).\n */\n fmt.Println(\"Exercises\")\n fmt.Println(\"32,132 * 42,452 = \", 32132 * 42452)\n fmt.Println(\"\\n\")\n}", "func (s *BaseGraffleParserListener) ExitArithm_expr(ctx *Arithm_exprContext) {}", "func main() {\n\tcmd.Root().Execute()\n}", "func (s *BasearithmeticListener) ExitEquation(ctx *EquationContext) {}", "func main() {\n for {\n var weight0 int\n fmt.Scan(&weight0)\n \n var weight1 int\n fmt.Scan(&weight1)\n \n var weight2 int\n fmt.Scan(&weight2)\n \n stdoutWriter := os.Stdout\n os.Stdout = os.Stderr\n action := Solve(weight0, weight1, weight2)\n os.Stdout = stdoutWriter\n fmt.Println(action)\n }\n}", "func main(){\n\t// Strings, which can be added together with +. \n\tfmt.Println(\"go\" + \"lang\")\n\tfmt.Println(\"1+1.5 = \", 1+1.5)\n\tfmt.Println(\"7.0/3 =\", 7.0/3)\n\tfmt.Println(true && false)\n\tfmt.Println(!true)\n}", "func mathOp(a, b int, operator string, doOperation func(int, int, string) int) int {\n\tresult := doOperation(a, b, operator)\n\treturn result\n}", "func main() {\n\t// Variable Creation\n\tgravity := -9.81 // a in formula, Earth's gravity in m/s^2\n\tvar initialVelocity float64 = 0 // vi in formula, should be 0\n\tvar fallingTime float64 = 10 // t in formula, should be 10 (s)\n\tvar initialPosition float64 = 0 // xi in formula, should be 0\n\tvar finalPosition float64 // x(t) in formula, final position the program solves for\n\n\t// Calculation + Final Print Statement\n\tfinalPosition = 0.5*(gravity*math.Pow(fallingTime, 2)) + (initialVelocity * fallingTime) + initialPosition\n\tfmt.Printf(\"The object's position after %f seconds is %f m.\", fallingTime, finalPosition) // Prints the objects final height\n}", "func EvaluateFuncs(exp string) string {\n exp = EvaluateFunc(exp, \"abs\")\n exp = EvaluateFunc(exp, \"sin\")\n exp = EvaluateFunc(exp, \"cos\")\n exp = EvaluateFunc(exp, \"tan\")\n return exp\n}", "func TestPerformArithmeticOperation(t *testing.T) {\n\t// operand stack (also known as data stack)\n\tstack := evaluator.Stack{}\n\n\t// push two values onto the stack\n\tstack.Push(1)\n\tstack.Push(2)\n\n\t// any token that is not token.QUO or token.REM\n\ttok := token.ADD\n\n\t// perform the selected arithmetic operation\n\taddOperation := func(x int, y int) int { return x + y }\n\n\t// perform the selected arithmetic operation\n\terr := evaluator.PerformArithmeticOperation(&stack, addOperation, tok)\n\tassert.NoError(t, err)\n\n\t// check stack\n\tassert.False(t, stack.Empty())\n\tassert.Equal(t, stack.Size(), 1)\n\n\t// stack should contain one value\n\tvalue, err := stack.Pop()\n\tassert.NoError(t, err)\n\tassert.Equal(t, value, 3)\n}", "func RunExam() {\n\tvarConst()\n\tcondition(0, 10)\n\tcondition(1, 10)\n\tcondition(1, 0)\n\tswitchCase(0)\n\tswitchCase(2)\n\tswitchCase(3)\n\tforLoop()\n\tfuncTest()\n\tanonyFuncTest()\n\t// closure\n\trunClosure()\n\t// slice\n\trunSlice()\n\t// map\n\trunMap()\n}", "func (s *BaseGraffleParserListener) EnterArithm_expr(ctx *Arithm_exprContext) {}", "func execEval(_ int, p *gop.Context) {\n\targs := p.GetArgs(4)\n\tret, ret1 := types.Eval(args[0].(*token.FileSet), args[1].(*types.Package), token.Pos(args[2].(int)), args[3].(string))\n\tp.Ret(4, ret, ret1)\n}" ]
[ "0.6487734", "0.5995231", "0.58327144", "0.5822389", "0.5811831", "0.5791715", "0.57338893", "0.57224756", "0.57194424", "0.57014334", "0.56691843", "0.5606424", "0.5599023", "0.5535396", "0.55270267", "0.5525703", "0.55127937", "0.54547864", "0.54376626", "0.54136235", "0.54105663", "0.54099524", "0.54046535", "0.5404296", "0.53985584", "0.53985584", "0.53919506", "0.53753024", "0.5373558", "0.5344937", "0.53356504", "0.5317623", "0.530852", "0.5285987", "0.52802086", "0.52506363", "0.5242789", "0.52306974", "0.5229315", "0.52205265", "0.5219223", "0.5218196", "0.52081954", "0.5178109", "0.5174024", "0.516973", "0.51634884", "0.51614964", "0.51566344", "0.5151378", "0.51479995", "0.5147286", "0.5140751", "0.5131026", "0.51282156", "0.509653", "0.50932705", "0.5090778", "0.50757384", "0.5075497", "0.50693434", "0.50614864", "0.5060157", "0.5052606", "0.50449294", "0.50423956", "0.5038579", "0.5037728", "0.50310266", "0.50203174", "0.50173473", "0.4999302", "0.49972183", "0.49957228", "0.49921817", "0.4981521", "0.49702156", "0.49691933", "0.49658167", "0.49636513", "0.4962331", "0.4956211", "0.4955907", "0.49554253", "0.49488005", "0.4945684", "0.49388772", "0.49382487", "0.49377182", "0.49349982", "0.49334496", "0.49249414", "0.4909223", "0.49048164", "0.4904224", "0.49042174", "0.4903804", "0.4898981", "0.48936185", "0.4891127" ]
0.72959805
0
PrivateMessage is called on each given incoming twitch.PrivateMessage and checks if the message contains something we should act on.
func PrivateMessage(message twitch.PrivateMessage, tc *twitch.Client) { if message.Message == "xd" { tc.Say(message.Channel, "xd") } log.Info(message) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isPrivateMessage(destination, fileName, request, keywords, msg string) bool {\n\treturn !isNull(msg) && !isNull(destination) && isNull(fileName) && isNull(request) && isNull(keywords)\n}", "func (m *Message) Private() bool {\n\treturn m.Chat.Type == ChatPrivate\n}", "func (d *Discord) IsPrivate(message Message) bool {\n\tc, err := d.Channel(message.Channel())\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn c.Type == 1\n}", "func (d *Discord) SupportsPrivateMessages() bool {\n\treturn true\n}", "func (d *Discord) SupportsPrivateMessages() bool {\n\treturn true\n}", "func (d *Discord) IsPrivate(message Message) bool {\n\tc, err := d.Channel(message.Channel())\n\treturn err == nil && c.Type == discordgo.ChannelTypeDM\n}", "func (c *Client) MessagePrivate(dest *Peer, content string) {\n\tif c.protoIsAdc == true {\n\t\tc.connHub.conn.Write(&msgAdcDMessage{\n\t\t\tmsgAdcTypeD{c.sessionId, dest.adcSessionId},\n\t\t\tmsgAdcKeyMessage{Content: content},\n\t\t})\n\n\t} else {\n\t\tc.connHub.conn.Write(&msgNmdcPrivateChat{c.conf.Nick, dest.Nick, content})\n\t}\n}", "func (c *Chat) IsPrivate() bool {\n\treturn c.Type == \"private\"\n}", "func PrivateMessage(pkt event.Packet) client.RegistryFunc {\n\treturn func(clients client.Registry) error {\n\t\tto := pkt.UIDs()[1]\n\n\t\tif _, ok := clients[to]; !ok {\n\t\t\treturn fmt.Errorf(\"for packet numbered %v client %v is not connected\", pkt.Sequence(), to)\n\t\t}\n\n\t\ttargetClient := clients[to]\n\n\t\tif err := targetClient.Send(pkt); err != nil {\n\t\t\tlog.Debug(fmt.Sprintf(\"notify.PrivateMessage: for client %v, got error %#q\", to, err))\n\t\t\tclient.UnregisterFunc(to)(clients)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (d *Discord) PrivateMessage(userID, message string) error {\n\tc, err := d.Session.UserChannelCreate(userID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.SendMessage(c.ID, message)\n}", "func privateMessageReachedDestination(gossiperPtr *core.Gossiper, msg *core.PrivateMessage) bool {\n\treturn (strings.Compare(gossiperPtr.Name, msg.Destination) == 0)\n}", "func (o *InlineResponse20033Milestones) SetPrivate(v bool) {\n\to.Private = &v\n}", "func privMsg(channel, msg string) {\n\tlines := strings.Split(msg, \"\\n\")\n\tfor i := 0; i < len(lines); i++ {\n\t\tif lines[i] == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tIRCOutgoing <- fmt.Sprintf(\"PRIVMSG %s :%s\", channel, lines[i])\n\n\t\t// Make test mode faster.\n\t\tif cfg.TestMode {\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t} else {\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t}\n\t}\n}", "func (o *InlineResponse20034Milestone) SetPrivate(v bool) {\n\to.Private = &v\n}", "func (d *Discord) PrivateMessage(userID, message string) (*discordgo.Message, error) {\n\tc, err := d.Session.UserChannelCreate(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn d.SendMessage(c.ID, message)\n}", "func (o *ViewMilestone) SetPrivate(v bool) {\n\to.Private = &v\n}", "func (m *Member) IsPrivate() bool { return !m.Published }", "func (o *InlineResponse20033Milestones) HasPrivate() bool {\n\tif o != nil && o.Private != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20049Post) SetPrivate(v string) {\n\to.Private = &v\n}", "func (c *Client) OnPrivateSub(handler PrivateSubHandler) {\n\tproxy := &eventProxy{client: c, onPrivateSub: handler}\n\tc.client.OnPrivateSub(proxy)\n}", "func (o *InlineResponse20034Milestone) HasPrivate() bool {\n\tif o != nil && o.Private != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (b *Bot) PrivateReply(m *irc.Message, format string, v ...interface{}) {\n\tb.Send(&irc.Message{\n\t\tPrefix: &irc.Prefix{},\n\t\tCommand: \"PRIVMSG\",\n\t\tParams: []string{\n\t\t\tm.Prefix.Name,\n\t\t\tfmt.Sprintf(format, v...),\n\t\t},\n\t})\n}", "func (o *InlineResponse200115) SetPrivate(v string) {\n\to.Private = &v\n}", "func (o *InlineResponse20033Milestones) GetPrivateOk() (*bool, bool) {\n\tif o == nil || o.Private == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Private, true\n}", "func (o *InlineResponse20051TodoItems) SetPrivate(v string) {\n\to.Private = &v\n}", "func (o *InlineResponse20049Post) HasPrivate() bool {\n\tif o != nil && o.Private != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse200115) HasPrivate() bool {\n\tif o != nil && o.Private != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20051TodoItems) HasPrivate() bool {\n\tif o != nil && o.Private != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func sendPrivateMsg(resp *WsJsonResponse, target string) bool {\n\tfor wsConn, name := range Clients {\n\t\tif name == target {\n\t\t\terr := wsConn.MyConn.WriteJSON(resp)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func isClientMessagePrivate(clientMsg *core.Message) bool {\n\treturn (strings.Compare(*(clientMsg.Destination), \"\") != 0)\n}", "func (mr *MockapprunnerDescriberMockRecorder) IsPrivate() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsPrivate\", reflect.TypeOf((*MockapprunnerDescriber)(nil).IsPrivate))\n}", "func (o *InlineResponse20034Milestone) GetPrivateOk() (*bool, bool) {\n\tif o == nil || o.Private == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Private, true\n}", "func (sb *SweetieBot) ChannelIsPrivate(channelID DiscordChannel) (*discordgo.Channel, bool) {\n\tif channelID == \"heartbeat\" {\n\t\treturn nil, true\n\t}\n\tch, err := sb.DG.State.Channel(channelID.String())\n\tif err == nil { // Because of the magic of web development, we can get a message BEFORE the \"channel created\" packet for the channel being used by that message.\n\t\treturn ch, typeIsPrivate(ch.Type)\n\t}\n\t// Bots aren't supposed to be in Group DMs but can be grandfathered into them, and these channels will always fail to exist, so we simply ignore this error as harmless.\n\treturn nil, true\n}", "func PrivMsgCallback(con *irc.Connection) {\n\tcon.AddCallback(\"PRIVMSG\", func(e *irc.Event) {\n\t\tmessage := e.Arguments[1]\n\t\tchannel := e.Arguments[0]\n\t\tlog.Printf(\"%s said: %s\\n\", e.Nick, message)\n\n\t\t// Look for urls in messages\n\t\turls := urlRegex.FindAllString(message, -1)\n\t\tif len(urls) > 0 {\n\t\t\tfor _, url := range urls {\n\t\t\t\tmessageCh := make(chan string, 1)\n\t\t\t\tgo scrapePage(url, messageCh)\n\t\t\t\tcon.Privmsg(channel, <-messageCh)\n\t\t\t}\n\t\t}\n\t})\n}", "func (o *InlineResponse20049Post) GetPrivateOk() (*string, bool) {\n\tif o == nil || o.Private == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Private, true\n}", "func (o *FileversionFileversion) SetPrivate(v bool) {\n\to.Private = &v\n}", "func (o *InlineResponse20051TodoItems) GetPrivateOk() (*string, bool) {\n\tif o == nil || o.Private == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Private, true\n}", "func NewPrivateMessage(id, hopLimit uint32, txt, origin, destination string) *PrivateMessage {\n\tprivateMessage := &PrivateMessage{\n\t\tOrigin: origin,\n\t\tID: id,\n\t\tText: txt,\n\t\tDestination: destination,\n\t\tHopLimit: hopLimit,\n\t}\n\treturn privateMessage\n}", "func Private(pattern string, f func(*Response)) Handler {\n\treturn Handler{\n\t\tMethod: PrivateMsg,\n\t\tPattern: pattern,\n\t\tRun: f,\n\t}\n}", "func (t *Task) IsPrivate() bool {\n\t// I like the idea of not needing to put an astrick next to a task\n\t// ... Descriptions automagically qualify for \"important tasks\"\n\t// No descriptions means it's filler, or private\n\t// Summaries WITH private: true are private\n\tif t.Summary == \"\" || t.Private {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ViewMilestone) HasPrivate() bool {\n\tif o != nil && o.Private != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *PostContextsAddPhpParams) SetPrivate(private *int64) {\n\to.Private = private\n}", "func (dtk *DcmTagKey) IsPrivate() bool {\n\treturn ((dtk.group & 1) != 0) && dtk.HasValidGroup()\n}", "func (n IpcMode) IsPrivate() bool {\n\treturn n == IPCModePrivate\n}", "func (o *ViewMilestone) GetPrivateOk() (*bool, bool) {\n\tif o == nil || o.Private == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Private, true\n}", "func privAction(channel, msg string) {\n\tIRCOutgoing <- fmt.Sprintf(\"PRIVMSG %s :\\x01ACTION %s\\x01\", channel, msg)\n}", "func (o *InlineResponse20033Milestones) GetPrivate() bool {\n\tif o == nil || o.Private == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Private\n}", "func forwardPrivateMessage(gossiperPtr *core.Gossiper, msg *core.PrivateMessage) {\n\n\tif msg.HopLimit == 0 {\n\t\t// if we have reached the HopLimit, drop the message\n\t\treturn\n\t}\n\n\tgossiperPtr.DestinationTable.DsdvLock.Lock()\n\tforwardingAddress := gossiperPtr.DestinationTable.Dsdv[msg.Destination]\n\tgossiperPtr.DestinationTable.DsdvLock.Unlock()\n\t// If current node has no information about next hop to the destination in question\n\tif strings.Compare(forwardingAddress, \"\") == 0 {\n\t\t// TODO: What to do if there is no 'next hop' known when peer has to forward a private packet\n\t}\n\n\t// Decrement the HopLimit right before forwarding the packet\n\tmsg.HopLimit--\n\t// Encode and send packet\n\tpacketToSend := core.GossipPacket{Private: msg}\n\tpacketBytes, err := protobuf.Encode(&packetToSend)\n\thelpers.HandleErrorFatal(err)\n\tcore.ConnectAndSend(forwardingAddress, gossiperPtr.Conn, packetBytes)\n}", "func (o *InlineResponse200115) GetPrivateOk() (*string, bool) {\n\tif o == nil || o.Private == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Private, true\n}", "func (n UTSMode) IsPrivate() bool {\n\treturn !n.IsHost()\n}", "func (mr *MockPrivateNetworkDetectorMockRecorder) IsPrivate(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsPrivate\", reflect.TypeOf((*MockPrivateNetworkDetector)(nil).IsPrivate), arg0)\n}", "func (mr *MockapprunnerClientMockRecorder) PrivateURL(vicARN interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"PrivateURL\", reflect.TypeOf((*MockapprunnerClient)(nil).PrivateURL), vicARN)\n}", "func (n PidMode) IsPrivate() bool {\n\treturn !(n.IsHost() || n.IsContainer())\n}", "func (c *Client) SendPrivateMsg(msg string, dest string, anonymous *bool, relayRate *float64, fullAnonimity *bool) {\n\ttoSend := gossiper.Message{\n\t\tText: msg,\n\t\tDestination: &dest,\n\t\tAnonymous: anonymous,\n\t}\n\n\tif *anonymous {\n\t\ttoSend.RelayRate = relayRate\n\t\ttoSend.FullAnonimity = fullAnonimity\n\t}\n\n\tpacketBytes, err := protobuf.Encode(&toSend)\n\tif err != nil {\n\t\tlog.Error(\"Error could not encode message : \", err)\n\t}\n\terr = c.SendBytes(packetBytes)\n\tif err != nil {\n\t\tlog.Error(\"Error could not send message : \", err)\n\t}\n\n\treturn\n\n}", "func (o *InlineResponse20034Milestone) GetPrivate() bool {\n\tif o == nil || o.Private == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Private\n}", "func NewPrivateMsgMap() *PrivateMsgMap {\n\tvariable := make(map[string][]string)\n\treturn &PrivateMsgMap{messages: variable}\n}", "func (o *LinkPrivateIpsRequest) SetPrivateIps(v []string) {\n\to.PrivateIps = &v\n}", "func (o *LinkPrivateIpsRequest) HasPrivateIps() bool {\n\tif o != nil && o.PrivateIps != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *LinkPrivateIpsRequest) GetPrivateIpsOk() (*[]string, bool) {\n\tif o == nil || o.PrivateIps == nil {\n\t\treturn nil, false\n\t}\n\treturn o.PrivateIps, true\n}", "func PrivateIP(ip string) bool {\n\taddr := net.ParseIP(RemovePort(strings.TrimSpace(ip)))\n\tif addr == nil { // Not an IP address?\n\t\treturn true\n\t}\n\n\tfor _, c := range privateCIDR {\n\t\tif c.Contains(addr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p *Provider) IsPrivate() bool {\n\treturn p.key != nil\n}", "func (b *Bucket) IsPrivate() bool {\n\treturn b.LinkKey != \"\"\n}", "func (conn *Conn) Privmsg(t, msg string) {\n\tfor _, s := range splitMessage(msg, conn.cfg.SplitLen) {\n\t\tconn.Raw(PRIVMSG + \" \" + t + \" :\" + s)\n\t}\n}", "func handleNonDirectMessage(api *slack.Client, users []slack.User, message models.Message, bot *models.Bot) error {\n\t// 'direct_message_only' is either 'false' OR\n\t// 'direct_message_only' was probably never set\n\t// Is output to rooms set?\n\tif len(message.OutputToRooms) > 0 {\n\t\tfor _, roomID := range message.OutputToRooms {\n\t\t\terr := sendChannelMessage(api, roomID, message)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\t// Is output to users set?\n\tif len(message.OutputToUsers) > 0 {\n\t\tfor _, u := range message.OutputToUsers {\n\t\t\t// Get users Slack user ID\n\t\t\tuserID := getUserID(u, users, bot)\n\t\t\tif userID != \"\" {\n\t\t\t\t// If 'direct_message_only' is 'false' but the user listed himself in the 'output_to_users'\n\t\t\t\tif userID == message.Vars[\"_user.id\"] && !message.DirectMessageOnly {\n\t\t\t\t\tbot.Log.Warn(\"You have specified 'direct_message_only' as 'false' but listed yourself in 'output_to_users'\")\n\t\t\t\t}\n\t\t\t\t// Respond back to these users via direct message\n\t\t\t\terr := sendDirectMessage(api, userID, message)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// Was there no specified output set?\n\t// Send message back to original channel\n\tif len(message.OutputToRooms) == 0 && len(message.OutputToUsers) == 0 {\n\t\terr := sendBackToOriginMessage(api, message)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ExtSession) AddPrivateCommand(category string, check func(ctx *Context) bool, c *Command) {\n\tc.Check = check\n\ts.AddCommand(category, c)\n}", "func (n NetworkMode) IsPrivate() bool {\n\treturn !(n.IsHost() || n.IsContainer())\n}", "func IsPrivate(host string) (bool, error) {\n\tif strings.HasPrefix(host, \"http\") {\n\t\tu, err := url.Parse(host)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\thost = u.Hostname()\n\t}\n\tif ip := net.ParseIP(host); ip != nil {\n\t\treturn IsIPPrivate(ip), nil\n\t}\n\tips, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif len(ips) == 0 {\n\t\treturn false, err\n\t}\n\tfor _, ip := range ips {\n\t\tif !IsIPPrivate(ip) {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn true, nil\n}", "func (n UsernsMode) IsPrivate() bool {\n\treturn !n.IsHost()\n}", "func (o *FileversionFileversion) HasPrivate() bool {\n\tif o != nil && o.Private != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (h *Headers) IsPrivate() bool {\n\treturn h.private\n}", "func (o *LinkPublicIpRequest) HasPrivateIp() bool {\n\tif o != nil && o.PrivateIp != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func PrivateIndicator(psi []byte) bool {\n\treturn psi[2+PointerField(psi)]&0x40 != 0\n}", "func (o *InlineResponse20027Person) HasPrivateNotes() bool {\n\tif o != nil && o.PrivateNotes != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *LinkPublicIpRequest) SetPrivateIp(v string) {\n\to.PrivateIp = &v\n}", "func (r *NucypherAccountRepository) Private(updatedBy string, accountID int, now time.Time) error {\n\n\t_, err := r.store.db.NamedExec(`UPDATE nucypher_accounts \n\tSET is_private=true, updated_by=:updated_by, updated_at=:updated_at \n\tWHERE (created_by=:updated_by AND account_id=:account_id AND is_private=false)`,\n\t\tmap[string]interface{}{\n\t\t\t\"updated_by\": updatedBy,\n\t\t\t\"account_id\": accountID,\n\t\t\t\"updated_at\": now,\n\t\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func createNewPrivateMessage(origin string, msg string, dest *string) *core.PrivateMessage {\n\tdefaultID := uint32(0) // to enforce NOT sequencing\n\tdefaultHopLimit := uint32(10)\n\tprivateMsg := core.PrivateMessage{Origin: origin, ID: defaultID, Text: msg, Destination: *dest, HopLimit: defaultHopLimit}\n\treturn &privateMsg\n}", "func (sock *Server) Privmsg(user string, message string) {\n\tsock.Send(\"PRIVMSG \" + user + \" :\" + message)\n}", "func isPrivateIP(ip net.IP) bool {\n\t//TODO: another great cidrtree option\n\t//TODO: Private for ipv6 or just let it ride?\n\treturn private24BitBlock.Contains(ip) || private20BitBlock.Contains(ip) || private16BitBlock.Contains(ip)\n}", "func (k *Key) IsPrivate() bool {\n\treturn k.Key.IsPrivate()\n}", "func (d Dispatcher) IsJobPrivate(id string) (bool, error) {\n\tj, err := d.GetBC().FindJob(id)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn j.GetPrivate(), nil\n}", "func (f *FilterConfig) privateIP(ip net.IP) bool {\n\tfor _, cidr := range f.privateCIDRs {\n\t\tif cidr.Contains(ip) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (mr *MockNetworkDescriberMockRecorder) IsAPIServerPrivate() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsAPIServerPrivate\", reflect.TypeOf((*MockNetworkDescriber)(nil).IsAPIServerPrivate))\n}", "func (mr *MockClusterScoperMockRecorder) IsAPIServerPrivate() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsAPIServerPrivate\", reflect.TypeOf((*MockClusterScoper)(nil).IsAPIServerPrivate))\n}", "func (o *PublicIp) SetPrivateIp(v string) {\n\to.PrivateIp = &v\n}", "func (o *PublicIp) SetPrivateIp(v string) {\n\to.PrivateIp = &v\n}", "func (t *SimpleChaincode) QueryPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response{\n\tif len(args)!=2{\n\t\treturn shim.Error(\"Incorrect arguments, expecting 2\")\n\t}\n\n\tusername := args[0]\n\tcoll := args[1]\n\t//check if the state under username has been deleted\n\tstate_b, err := stub.GetState(username)\n\tif state_b == nil {\n\t\treturn shim.Error(\"User does not exist\")\n\t}\n\n\tprivate_b, err := stub.GetPrivateData(coll, username) \n if err != nil {\n return shim.Error(\"Failed to get private details for \"+username)\n } else if private_b == nil {\n return shim.Error(\"Private details do not exist for \"+username)\n }\n\treturn shim.Success(private_b)\n}", "func (pi *packetInfo) processReceivedMessage() error {\n\tvar player *PlayerData\n\t// If we are not tracking the connection yet, the user must be\n\t// registering with the server.\n\tif pi.tracker.playerByConnection(pi.conn) == nil {\n\t\terr := pi.registerClient()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tplayer = pi.tracker.playerByConnection(pi.conn)\n\n\t\t// If a malicious client is connecting and disconnecting\n\t\t// quickly it is possible that PlayerData will be nil.\n\t\t// No need to return an error, just don't register them.\n\t\tif player == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif player.pool.IsFrozen() {\n\t\t\tpi.announceStart()\n\t\t} else {\n\t\t\tpi.broadcastJoinedPool(player)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif err := pi.verifyMessage(); err != nil {\n\t\treturn err\n\t}\n\n\t// At this point we are confident that the user has at least attempted\n\t// to broadcast a valid message with their verification key. We unset\n\t// the passive flag so that they will not get a ban score / p2p ban\n\t// when leaving the pool.\n\t// At least this must happen before blame checking logic happens.\n\tif player = pi.tracker.playerByConnection(pi.conn); player != nil {\n\t\tplayer.isPassive = false\n\t}\n\n\tif err := pi.checkBlameMessage(); err != nil {\n\t\treturn err\n\t}\n\n\tpi.broadcastMessage()\n\treturn nil\n}", "func (o *FileversionFileversion) GetPrivateOk() (*bool, bool) {\n\tif o == nil || o.Private == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Private, true\n}", "func (me TrestrictionType) IsPrivate() bool { return me.String() == \"private\" }", "func (c CgroupnsMode) IsPrivate() bool {\n\treturn c == CgroupnsModePrivate\n}", "func isPrivate(v *big.Int) bool {\n\treturn v.Cmp(big.NewInt(37)) == 0 || v.Cmp(big.NewInt(38)) == 0\n}", "func Privmsg(ch, text string) {\n\tirc.Privmsg(ch, text)\n}", "func (domain Domain) IsPrivate() bool {\n\treturn domain.Type == constant.PrivateDomain\n}", "func (o *LinkPublicIpRequest) GetPrivateIpOk() (*string, bool) {\n\tif o == nil || o.PrivateIp == nil {\n\t\treturn nil, false\n\t}\n\treturn o.PrivateIp, true\n}", "func (dtk *DcmTagKey) IsPrivateReservation() bool {\n\treturn dtk.IsPrivate() && dtk.element >= 0x10 && dtk.element <= 0xFF\n}", "func (o *ViewMilestone) GetPrivate() bool {\n\tif o == nil || o.Private == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Private\n}", "func (mgmt *Management) handlePeersMessage(received []byte) {\n\tvar cm collaborationMessage\n\terr := json.Unmarshal(received, &cm)\n\tif err != nil {\n\t\tlog.Println(\"Error while unmarshalling collaborationMessage:\", err)\n\t}\n\tlog.Println(cm, \"parsed FrontendtoMgmt\")\n\n\tswitch cm.Event {\n\tcase \"ABTU\":\n\t\tmgmt.doc.PeersToABTU <- cm.Content\n\tcase \"AccessControl\":\n\t//\tTODO Handle access control messages\n\tcase \"Cursor\":\n\t//\tTODO Handle cursor messages\n\t}\n}", "func HandlePrivmsg(h func(*Server, string, string, string)) {\n\teventOnPrivmsg = h\n}", "func receive(dec json.Decoder, ctrl *Control){\n\tif testing {log.Println(\"receive\")}\n\tmsg:= new(Message)\n\tfor {\n\t\tif err := dec.Decode(msg);err != nil {\n\t\t\tfmt.Println(\"Something went wrong, closing connection\")\n\t\t\tpanic(err)\n\t\t\treturn\n\t\t}\n\t\tif msg.Kind==\"PRIVATE\"{\n\t\t\tctrl.updateText(msg.Username+\" wispers: \"+msg.MSG)\n\t\t}else if msg.Kind==\"PUBLIC\"{\n\t\t\tctrl.updateText(msg.Username+\": \"+msg.MSG)\n\t\t}else if msg.Kind==\"ADD\" || msg.Kind==\"DISCONNECT\"{\n\t\t\tctrl.updateText(msg.MSG)\n\t\t\tctrl.updateList(msg.Usernames)\n\t\t}else if msg.Kind==\"SAMENAME\"{\n\t\t\tmyName=msg.Username\n\t\t\tctrl.updateText(msg.MSG)\n\t\t}\t\n\t}\n}", "func receiveMessageHeld(mutexMessage MessageMutexEntity) {\n\twaitingForSendingAnswerBack(mutexMessage)\n\tsendReplyOkMessage(mutexMessage.User + mutexMessage.Reply)\n}" ]
[ "0.71419716", "0.6794124", "0.6723901", "0.655466", "0.655466", "0.65224487", "0.63669544", "0.6241274", "0.60934085", "0.6075975", "0.6045646", "0.5995853", "0.5965748", "0.59029645", "0.5892197", "0.5843672", "0.58056587", "0.5782442", "0.5735349", "0.57043934", "0.5700961", "0.56938565", "0.5687623", "0.566283", "0.5659668", "0.56130284", "0.5602799", "0.5596425", "0.5594052", "0.55568177", "0.55501354", "0.5511128", "0.5499277", "0.5470654", "0.5420554", "0.54114217", "0.53918076", "0.5329463", "0.53068274", "0.52952266", "0.52943265", "0.5287359", "0.5282908", "0.5261694", "0.526076", "0.5259712", "0.5242389", "0.5222205", "0.5217899", "0.521033", "0.5203377", "0.5198939", "0.51539373", "0.5151245", "0.51298326", "0.51133597", "0.5102543", "0.5080029", "0.507575", "0.5070685", "0.50703496", "0.5068116", "0.5065763", "0.50646895", "0.5041777", "0.5029305", "0.5022723", "0.5006989", "0.49976742", "0.49947956", "0.49727556", "0.49638888", "0.49566406", "0.49476635", "0.4947418", "0.49242628", "0.49198255", "0.48749244", "0.48616716", "0.4849071", "0.4847632", "0.48459134", "0.48444164", "0.48425302", "0.48425302", "0.48271707", "0.48246735", "0.48240563", "0.48192927", "0.48052716", "0.47916847", "0.47893706", "0.47771382", "0.47701353", "0.47699797", "0.47694898", "0.47653922", "0.47589704", "0.47541967", "0.47532228" ]
0.6992184
1
DoTimestamp records the timestamp to the context with the given description. If the context does not implement TimestampContext, nothing happens.
func DoTimestamp(ctx context.Context, description string) { if ctx, ok := ctx.(TimestampContext); ok { ctx.DoTimestamp(description) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (ctx Context) UpdateTimestamp() Context {\n\tctx.Timestamp = time.Now().UnixNano() / int64(time.Millisecond)\n\treturn ctx\n}", "func (i *InsertFactBuilder) OTimestamp(v time.Time, p logentry.TimestampPrecision, unitID uint64) *InsertFactBuilder {\n\ti.fact.Object = ATimestamp(v, p, unitID)\n\treturn i\n}", "func WithTimestamp(parent context.Context, opts ...TimestampContextOptionsFunc) TimestampContext {\n\tconf := timestampContextOptions{\n\t\tmaxStamps: 30,\n\t\tsince: time.Now(),\n\t}\n\tfor _, o := range opts {\n\t\to(&conf)\n\t}\n\tc := timestampContext{\n\t\tContext: parent,\n\t\tcreatedAt: conf.since,\n\t\tmaxStamps: conf.maxStamps,\n\t\tstamps: make([]elapsed, 0, conf.maxStamps),\n\t}\n\treturn &c\n}", "func Timestamp() logger.MiddlewareInterface {\n\treturn func(handler logger.HandlerInterface) logger.HandlerInterface {\n\t\treturn func(entry logger.Entry) error {\n\t\t\tif entry.Context == nil {\n\t\t\t\tentry.Context = &logger.Context{}\n\t\t\t}\n\t\t\tentry.Context.SetField(logger.Time(\"timestamp\", time.Now()))\n\t\t\treturn handler(entry)\n\t\t}\n\t}\n}", "func WithTimestamp(ctx context.Context, ts time.Time) context.Context {\n\treturn context.WithValue(ctx, ghctxtimestamp, ts)\n}", "func Timestamp(scope *Scope) (ts tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"Timestamp\",\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func (s *BaseRFC5424Listener) EnterTimestamp(ctx *TimestampContext) {}", "func (m *Model) handleTimestamp() {\n\tif m.IsTimestamps() {\n\t\tm.Columns = append(m.Columns, Col(CREATED_AT))\n\t\tm.Columns = append(m.Columns, Col(UPDATED_AT))\n\t}\n}", "func (e *DiscordWebhookEmbed) SetTimestamp() {\n\te.Timestamp = time.Now().UTC().Format(\"2006-01-02T15:04:05-0700\")\n}", "func (bse *BaseEvent) Timestamp(timestamp int64) {\n\tbse.Timestmp = timestamp\n}", "func Timestamp(s *Sword) {\n\ts.timestamp = true\n}", "func logWithTimestamp(text string) {\n\tfmt.Println(Timestamp(), text)\n}", "func (scsuo *SurveyCellScanUpdateOne) SetTimestamp(t time.Time) *SurveyCellScanUpdateOne {\n\tscsuo.timestamp = &t\n\treturn scsuo\n}", "func (s *BaseRFC5424Listener) ExitTimestamp(ctx *TimestampContext) {}", "func (tracer *TraceLogger) OnTimestamp(ts time.Time) {\n\ttracer.traceMutex.Lock()\n\tdefer tracer.traceMutex.Unlock()\n\tif tracer.fileHandle == nil || tracer.hasProperFilename {\n\t\treturn\n\t}\n\tformatted := ts.Format(time.RFC3339)\n\tfname := \"/var/log/stratux/\" + formatted + \"_trace.txt.gz\"\n\tif formatted != fname {\n\t\terr := os.Rename(tracer.fileName, fname)\n\t\tif err == nil {\n\t\t\ttracer.fileName = fname\n\t\t}\n\t}\n\ttracer.hasProperFilename = true\n}", "func (s *OutputStream) WriteWithTimestamp(timestamp time.Time, data []byte) error {\n\ts.mux.RLock()\n\tdefer s.mux.RUnlock()\n\n\tif s.closed {\n\t\treturn nil\n\t}\n\n\tselect {\n\tcase <-s.ctx.Done():\n\t\ts.sendError(timestamp, s.ctx.Err())\n\t\ts.sentCtxErr = true\n\t\treturn s.ctx.Err()\n\tdefault:\n\t\ts.sendData(timestamp, string(data))\n\t\treturn nil\n\t}\n}", "func (c Context) Timestamp() Context {\n\tif len(c.l.context) > 0 {\n\t\tc.l.context[0] = 1\n\t} else {\n\t\tc.l.context = append(c.l.context, 1)\n\t}\n\treturn c\n}", "func Timestamp(ts time.Time) func(Call) error {\n\treturn func(o Call) error {\n\t\tm, ok := o.(*Mutate)\n\t\tif !ok {\n\t\t\treturn errors.New(\"'Timestamp' option can only be used with mutation queries\")\n\t\t}\n\t\tm.timestamp = uint64(ts.UnixNano() / 1e6)\n\t\treturn nil\n\t}\n}", "func (truo *TradeRecordUpdateOne) SetTimestamp(t time.Time) *TradeRecordUpdateOne {\n\ttruo.mutation.SetTimestamp(t)\n\treturn truo\n}", "func (rec *RawEventCreate) SetTimestamp(t time.Time) *RawEventCreate {\n\trec.mutation.SetTimestamp(t)\n\treturn rec\n}", "func (ms SummaryDataPoint) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (o *HyperflexClusterHealthCheckExecutionSnapshot) SetTimestamp(v time.Time) {\n\to.Timestamp = &v\n}", "func (t *TagCollector) PublishWithTimestamp(ts time.Time, measurement string, values ...types.Field) {\n\tt.sink.PublishWithTimestamp(ts, measurement, t.collect(), values...)\n}", "func (f *Format) SetTimestamp(timestampFormat string) {\n\n\tf.Timestamp = timestampFormat\n\n}", "func (t TsTimestampPoint) Timestamp() time.Time {\n\treturn t.timestamp\n}", "func (scsu *SurveyCellScanUpdate) SetTimestamp(t time.Time) *SurveyCellScanUpdate {\n\tscsu.timestamp = &t\n\treturn scsu\n}", "func ExampleTime_Timestamp() {\n\tt := gtime.Timestamp()\n\n\tfmt.Println(t)\n\n\t// May output:\n\t// 1533686888\n}", "func (o *PaymentStatusUpdateWebhook) SetTimestamp(v time.Time) {\n\to.Timestamp = v\n}", "func (o *SecurityProblemEvent) SetTimestamp(v int64) {\n\to.Timestamp = &v\n}", "func (tx *Hello) Timestamp() uint64 {\n\treturn tx.Timestamp_\n}", "func (ms Int64DataPoint) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (bb *BlockBuilder) SetTimestamp(timestamp uint64) {\n\tbb.block.Timestamp = timestamp\n}", "func (o *ActionDTO) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func (ms DoubleDataPoint) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (sh *ServerPropertiesHandle) SetTimestamp(timestamp string) {\n\tsh.timestamp = timestamp\n}", "func (o *ServiceCheck) SetTimestamp(v int64) {\n\to.Timestamp = &v\n}", "func (o *LogContent) HasTimestamp() bool {\n\tif o != nil && o.Timestamp != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (writer *DefaultPacketWriter) WriteTimestamped(timestamp *Packet1BLayer, generic RakNetPacket) error {\n\tlayers := &PacketLayers{\n\t\tTimestamp: timestamp,\n\t\tMain: generic,\n\t\tPacketType: generic.Type(),\n\t}\n\treturn writer.writeTimestamped(layers, Unreliable)\n}", "func (theme Theme) Timestamp(s string) string {\n\tif theme.sys == nil {\n\t\treturn s\n\t}\n\treturn theme.sys.Format(s)\n}", "func (bb *BlockBuilder) SetTimestamp(timestamp types.Uint64) {\n\tbb.block.Timestamp = timestamp\n}", "func timestamped(orig io.Writer) io.Writer {\n\treturn wfilter.LinePrepender(orig, func(w io.Writer) (int, error) {\n\t\treturn fmt.Fprintf(w, \"%s - \", time.Now().In(time.UTC).Format(logTimestampFormat))\n\t})\n}", "func (o *LogContent) SetTimestamp(v time.Time) {\n\to.Timestamp = &v\n}", "func (*AutoMlForecastingInputs_Transformation_TimestampTransformation) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_schema_trainingjob_definition_automl_time_series_forecasting_proto_rawDescGZIP(), []int{1, 0, 3}\n}", "func Timestamp(layout string) Valuer {\n\treturn func(context.Context) interface{} {\n\t\treturn time.Now().Format(layout)\n\t}\n}", "func (o *EventAttributes) SetTimestamp(v int64) {\n\to.Timestamp = &v\n}", "func (s *SampledHTTPRequest) SetTimestamp(v time.Time) *SampledHTTPRequest {\n\ts.Timestamp = &v\n\treturn s\n}", "func (d Dispatcher) ExecTimestamp(id string, hash string) (int64, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.GetTimestamp(), nil\n}", "func (o *ExportProductsUsingGETParams) SetTimestamp(timestamp *string) {\n\to.Timestamp = timestamp\n}", "func (o *NewData) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func (_L1Block *L1BlockCallerSession) Timestamp() (uint64, error) {\n\treturn _L1Block.Contract.Timestamp(&_L1Block.CallOpts)\n}", "func (o *GetPublicAuthParams) SetTimestamp(timestamp string) {\n\to.Timestamp = timestamp\n}", "func (_L1Block *L1BlockSession) Timestamp() (uint64, error) {\n\treturn _L1Block.Contract.Timestamp(&_L1Block.CallOpts)\n}", "func (ms HistogramDataPoint) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (ms HistogramBucketExemplar) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (o *DatasetEvent) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func (c *Context) getTimestamp() uint64 {\n\treturn uint64(c.Config.Timer.Now().UnixNano()) / c.Config.TimestampIncrement * c.Config.TimestampIncrement\n}", "func (b *Blueprint) Timestamp(column string, precision int) *ColumnDefinition {\n\treturn b.addColumn(\"timestamp\", column, &ColumnOptions{\n\t\tPrecision: precision,\n\t})\n}", "func (o *DatasetEvent) HasTimestamp() bool {\n\tif o != nil && o.Timestamp != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *HyperflexSnapshotStatus) SetTimestamp(v int64) {\n\to.Timestamp = &v\n}", "func (o *HyperflexHealthCheckPackageChecksum) SetTimestamp(v time.Time) {\n\to.Timestamp = &v\n}", "func (_Token *TokenCaller) LaunchTimestamp(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"launchTimestamp\")\n\treturn *ret0, err\n}", "func (o *DeviceParameterValue) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func (o *SecurityProblemEvent) HasTimestamp() bool {\n\tif o != nil && o.Timestamp != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func runTestPreparedTimestamp(t *C) {\n\trunTestsOnNewDB(t, nil, \"prepared_timestamp\", func(dbt *DBTest) {\n\t\tdbt.mustExec(\"create table test (a timestamp, b time)\")\n\t\tdbt.mustExec(\"set time_zone='+00:00'\")\n\t\tinsertStmt := dbt.mustPrepare(\"insert test values (?, ?)\")\n\t\tdefer insertStmt.Close()\n\t\tvts := time.Unix(1, 1)\n\t\tvt := time.Unix(-1, 1)\n\t\tdbt.mustExecPrepared(insertStmt, vts, vt)\n\t\tselectStmt := dbt.mustPrepare(\"select * from test where a = ? and b = ?\")\n\t\tdefer selectStmt.Close()\n\t\trows := dbt.mustQueryPrepared(selectStmt, vts, vt)\n\t\tt.Assert(rows.Next(), IsTrue)\n\t\tvar outA, outB string\n\t\terr := rows.Scan(&outA, &outB)\n\t\tt.Assert(err, IsNil)\n\t\tt.Assert(outA, Equals, \"1970-01-01 00:00:01\")\n\t\tt.Assert(outB, Equals, \"23:59:59\")\n\t})\n}", "func MarshalTimestamp(s *jsonplugin.MarshalState, v *timestamppb.Timestamp) {\n\tif v == nil {\n\t\ts.WriteNil()\n\t\treturn\n\t}\n\ts.WriteTime(v.AsTime())\n}", "func (tru *TradeRecordUpdate) SetTimestamp(t time.Time) *TradeRecordUpdate {\n\ttru.mutation.SetTimestamp(t)\n\treturn tru\n}", "func (m *TradeRecordMutation) SetTimestamp(t time.Time) {\n\tm.timestamp = &t\n}", "func (o EventDataStoreOutput) CreatedTimestamp() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EventDataStore) pulumi.StringOutput { return v.CreatedTimestamp }).(pulumi.StringOutput)\n}", "func timestamp(s string) string {\n\tif DisableTime {\n\t\treturn s\n\t}\n\tt := time.Now().Format(tsf)\n\treturn t + \" - \" + s\n}", "func (o *BulletinDTO) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func (tb *Batch) WithTimestamp(timestamp int64) *Batch {\n\ttb.Batch = tb.Batch.WithTimestamp(timestamp)\n\treturn tb\n}", "func (o *ActionDTO) HasTimestamp() bool {\n\tif o != nil && o.Timestamp != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *BarRecordMutation) SetTimestamp(t time.Time) {\n\tm.timestamp = &t\n}", "func (_BaseAccessWallet *BaseAccessWalletCaller) ValidateTimestamp(opts *bind.CallOpts, _ts *big.Int) (bool, error) {\n\tvar out []interface{}\n\terr := _BaseAccessWallet.contract.Call(opts, &out, \"validateTimestamp\", _ts)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}", "func (s *FileSystemSize) SetTimestamp(v time.Time) *FileSystemSize {\n\ts.Timestamp = &v\n\treturn s\n}", "func (o *DeviceResourceVersionValueWeb) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func SetWatchdogViaTimestamp(iTimestamp int, watchdog *Watchdog) (int64, error) {\n\twatchdog.Timestamp = iTimestamp\n\treturn Engine.Insert(watchdog)\n}", "func (_Transactable *TransactableCaller) ValidateTimestamp(opts *bind.CallOpts, _ts *big.Int) (bool, *big.Int, error) {\n\tvar out []interface{}\n\terr := _Transactable.contract.Call(opts, &out, \"validateTimestamp\", _ts)\n\n\tif err != nil {\n\t\treturn *new(bool), *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\tout1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\n\treturn out0, out1, err\n\n}", "func PutWatchdogViaTimestamp(Timestamp_ int, iWatchdog *Watchdog) (int64, error) {\n\trow, err := Engine.Update(iWatchdog, &Watchdog{Timestamp: Timestamp_})\n\treturn row, err\n}", "func (t *TimestampCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- t.Description\n}", "func (el Elements) Timestamp(i int) int64 {\n\tswitch el.Type {\n\tcase part3.Int32:\n\t\treturn el.I32[i].Ts\n\tcase part3.Float32:\n\t\treturn el.F32[i].Ts\n\tcase part3.Float64:\n\t\treturn el.F64[i].Ts\n\tdefault:\n\t\treturn int64(-1)\n\t}\n}", "func (o *EventAttributes) HasTimestamp() bool {\n\treturn o != nil && o.Timestamp != nil\n}", "func (e *EchoSendEvent) Timestamp() time.Time {\n\treturn e.time\n}", "func (mr *MockPostForkBlockMockRecorder) Timestamp() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Timestamp\", reflect.TypeOf((*MockPostForkBlock)(nil).Timestamp))\n}", "func (o *DatasetEvent) GetTimestampOk() (*string, bool) {\n\tif o == nil || o.Timestamp == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Timestamp, true\n}", "func (ms SummaryDataPoint) Timestamp() TimestampUnixNano {\n\treturn TimestampUnixNano((*ms.orig).TimeUnixNano)\n}", "func (e event) Timestamp() time.Time {\n\treturn e.dbEvent.Timestamp\n}", "func WriteTimestamp(buffer []byte, offset int, value Timestamp) {\n nanoseconds := uint64(value.UnixNano())\n WriteUInt64(buffer, offset, nanoseconds)\n}", "func (l *LockArgs) SetTimestamp(tstamp time.Time) {\n\tl.Timestamp = tstamp\n}", "func (o TopicRuleErrorActionCloudwatchMetricOutput) MetricTimestamp() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TopicRuleErrorActionCloudwatchMetric) *string { return v.MetricTimestamp }).(pulumi.StringPtrOutput)\n}", "func (s *DataLakeException) SetTimestamp(v time.Time) *DataLakeException {\n\ts.Timestamp = &v\n\treturn s\n}", "func timestamp(ctx iscp.SandboxBase) uint64 {\n\ttsNano := time.Duration(ctx.GetTimestamp()) * time.Nanosecond\n\treturn uint64(tsNano / time.Second)\n}", "func (o EventDataStoreOutput) UpdatedTimestamp() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EventDataStore) pulumi.StringOutput { return v.UpdatedTimestamp }).(pulumi.StringOutput)\n}", "func (e *HTTPResponseEvent) WithTimestamp(ts time.Time) *HTTPResponseEvent {\n\te.ts = ts\n\treturn e\n}", "func (_L1Block *L1BlockCaller) Timestamp(opts *bind.CallOpts) (uint64, error) {\n\tvar out []interface{}\n\terr := _L1Block.contract.Call(opts, &out, \"timestamp\")\n\n\tif err != nil {\n\t\treturn *new(uint64), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(uint64)).(*uint64)\n\n\treturn out0, err\n\n}", "func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tvec := vals[0].(Vector)\n\tfor _, el := range vec {\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(el.Metric),\n\t\t\tF: float64(el.T) / 1000,\n\t\t})\n\t}\n\treturn enh.Out\n}", "func (s *EdgeMetric) SetTimestamp(v time.Time) *EdgeMetric {\n\ts.Timestamp = &v\n\treturn s\n}", "func (o TopicRuleCloudwatchMetricOutput) MetricTimestamp() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TopicRuleCloudwatchMetric) *string { return v.MetricTimestamp }).(pulumi.StringPtrOutput)\n}", "func (o *LogContent) GetTimestampOk() (*time.Time, bool) {\n\tif o == nil || o.Timestamp == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Timestamp, true\n}", "func (o *DeviceParameterValue) HasTimestamp() bool {\n\tif o != nil && o.Timestamp != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}" ]
[ "0.5994401", "0.58676094", "0.5676175", "0.5648619", "0.56246597", "0.5593229", "0.54707736", "0.54352665", "0.5433174", "0.5370343", "0.5338876", "0.526802", "0.52554613", "0.5241193", "0.5235192", "0.5227927", "0.52159977", "0.5208446", "0.5202941", "0.51961774", "0.515932", "0.5150974", "0.51465166", "0.5140693", "0.5139251", "0.5137399", "0.5132476", "0.51198107", "0.51182127", "0.50857335", "0.5073221", "0.5072876", "0.5057689", "0.50448555", "0.50284475", "0.5027649", "0.50244355", "0.5023314", "0.50165796", "0.5011174", "0.5007093", "0.4996399", "0.4983636", "0.49790338", "0.4973639", "0.49711353", "0.49507028", "0.49503633", "0.4947117", "0.49380827", "0.49360725", "0.49347737", "0.4933164", "0.49320617", "0.4931183", "0.49304956", "0.4923156", "0.49135214", "0.49029058", "0.49018326", "0.48991883", "0.48982882", "0.48894992", "0.4883945", "0.4868896", "0.48609284", "0.485668", "0.48525557", "0.4851992", "0.48514393", "0.48496622", "0.48481247", "0.48275512", "0.48224595", "0.48203266", "0.48201045", "0.48020935", "0.47815087", "0.4779957", "0.47549802", "0.4731097", "0.4728706", "0.4726524", "0.4726185", "0.4719472", "0.47168255", "0.4715622", "0.47051165", "0.46977165", "0.46789983", "0.46730036", "0.4671799", "0.46712303", "0.46602693", "0.4659478", "0.46593127", "0.46547642", "0.46525678", "0.46501783", "0.4634759" ]
0.84586316
0
WithTimestamp creates the TimestampContext
func WithTimestamp(parent context.Context, opts ...TimestampContextOptionsFunc) TimestampContext { conf := timestampContextOptions{ maxStamps: 30, since: time.Now(), } for _, o := range opts { o(&conf) } c := timestampContext{ Context: parent, createdAt: conf.since, maxStamps: conf.maxStamps, stamps: make([]elapsed, 0, conf.maxStamps), } return &c }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithTimestamp(ctx context.Context, ts time.Time) context.Context {\n\treturn context.WithValue(ctx, ghctxtimestamp, ts)\n}", "func (ctx Context) UpdateTimestamp() Context {\n\tctx.Timestamp = time.Now().UnixNano() / int64(time.Millisecond)\n\treturn ctx\n}", "func (c Context) Timestamp() Context {\n\tif len(c.l.context) > 0 {\n\t\tc.l.context[0] = 1\n\t} else {\n\t\tc.l.context = append(c.l.context, 1)\n\t}\n\treturn c\n}", "func (tb *Batch) WithTimestamp(timestamp int64) *Batch {\n\ttb.Batch = tb.Batch.WithTimestamp(timestamp)\n\treturn tb\n}", "func Timestamp() logger.MiddlewareInterface {\n\treturn func(handler logger.HandlerInterface) logger.HandlerInterface {\n\t\treturn func(entry logger.Entry) error {\n\t\t\tif entry.Context == nil {\n\t\t\t\tentry.Context = &logger.Context{}\n\t\t\t}\n\t\t\tentry.Context.SetField(logger.Time(\"timestamp\", time.Now()))\n\t\t\treturn handler(entry)\n\t\t}\n\t}\n}", "func DoTimestamp(ctx context.Context, description string) {\n\tif ctx, ok := ctx.(TimestampContext); ok {\n\t\tctx.DoTimestamp(description)\n\t}\n}", "func WithTimestampFormat(format string) Option {\n\treturn func(o *options) {\n\t\to.timestampFormat = format\n\t}\n}", "func Timestamp(timestamp time.Time) Option {\n\treturn &timestampOption{timestamp}\n}", "func (s *OutputStream) WriteWithTimestamp(timestamp time.Time, data []byte) error {\n\ts.mux.RLock()\n\tdefer s.mux.RUnlock()\n\n\tif s.closed {\n\t\treturn nil\n\t}\n\n\tselect {\n\tcase <-s.ctx.Done():\n\t\ts.sendError(timestamp, s.ctx.Err())\n\t\ts.sentCtxErr = true\n\t\treturn s.ctx.Err()\n\tdefault:\n\t\ts.sendData(timestamp, string(data))\n\t\treturn nil\n\t}\n}", "func WithReceivedTimestamp(parent context.Context, receivedTimestamp time.Time) context.Context {\n\tif receivedTimestamp.IsZero() {\n\t\treturn parent\n\t}\n\treturn WithValue(parent, requestReceivedTimestampKey, receivedTimestamp)\n}", "func (i *InsertFactBuilder) OTimestamp(v time.Time, p logentry.TimestampPrecision, unitID uint64) *InsertFactBuilder {\n\ti.fact.Object = ATimestamp(v, p, unitID)\n\treturn i\n}", "func (rec *RawEventCreate) SetTimestamp(t time.Time) *RawEventCreate {\n\trec.mutation.SetTimestamp(t)\n\treturn rec\n}", "func Timestamp(scope *Scope) (ts tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"Timestamp\",\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func Timestamp(ts time.Time) func(Call) error {\n\treturn func(o Call) error {\n\t\tm, ok := o.(*Mutate)\n\t\tif !ok {\n\t\t\treturn errors.New(\"'Timestamp' option can only be used with mutation queries\")\n\t\t}\n\t\tm.timestamp = uint64(ts.UnixNano() / 1e6)\n\t\treturn nil\n\t}\n}", "func Timestamp(layout string) Valuer {\n\treturn func(context.Context) interface{} {\n\t\treturn time.Now().Format(layout)\n\t}\n}", "func MarshalTimestamp(t time.Time) graphql.Marshaler {\n\treturn graphql.WriterFunc(func(w io.Writer) {\n\t\tio.WriteString(w, strconv.FormatInt(t.Unix(), 10))\n\t})\n}", "func MarshalTimestamp(t time.Time) graphql.Marshaler {\n\treturn graphql.WriterFunc(func(w io.Writer) {\n\t\tio.WriteString(w, strconv.FormatInt(t.Unix(), 10))\n\t})\n}", "func (e *DiscordWebhookEmbed) SetTimestamp() {\n\te.Timestamp = time.Now().UTC().Format(\"2006-01-02T15:04:05-0700\")\n}", "func NewTimestampFormatter() TimeFormatter {\n\treturn NewTimeFormatter(true)\n}", "func (o *GetPublicAuthParams) SetTimestamp(timestamp string) {\n\to.Timestamp = timestamp\n}", "func (f *Format) SetTimestamp(timestampFormat string) {\n\n\tf.Timestamp = timestampFormat\n\n}", "func Timestamp(s *Sword) {\n\ts.timestamp = true\n}", "func (b *Blueprint) Timestamp(column string, precision int) *ColumnDefinition {\n\treturn b.addColumn(\"timestamp\", column, &ColumnOptions{\n\t\tPrecision: precision,\n\t})\n}", "func LogWithTimestamp(msg string, timestamp time.Time) logEvent {\n\treturn logEvent{Msg: msg, Time: timestamp}\n}", "func (o *ExportProductsUsingGETParams) SetTimestamp(timestamp *string) {\n\to.Timestamp = timestamp\n}", "func (c current) Timestamp(l CurrentLabels) prometheus.Gauge {\n\treturn c.timestamp.WithLabelValues(l.Values()...)\n}", "func (bb *BlockBuilder) SetTimestamp(timestamp uint64) {\n\tbb.block.Timestamp = timestamp\n}", "func logWithTimestamp(text string) {\n\tfmt.Println(Timestamp(), text)\n}", "func (t TsTimestampPoint) Timestamp() time.Time {\n\treturn t.timestamp\n}", "func (scsuo *SurveyCellScanUpdateOne) SetTimestamp(t time.Time) *SurveyCellScanUpdateOne {\n\tscsuo.timestamp = &t\n\treturn scsuo\n}", "func (bse *BaseEvent) Timestamp(timestamp int64) {\n\tbse.Timestmp = timestamp\n}", "func NewTimestamp(v time.Time) *Timestamp {\n\treturn ((*Timestamp)(nil)).Load(v)\n}", "func (bb *BlockBuilder) SetTimestamp(timestamp types.Uint64) {\n\tbb.block.Timestamp = timestamp\n}", "func MarshalTimestamp(s *jsonplugin.MarshalState, v *timestamppb.Timestamp) {\n\tif v == nil {\n\t\ts.WriteNil()\n\t\treturn\n\t}\n\ts.WriteTime(v.AsTime())\n}", "func (s *BaseRFC5424Listener) EnterTimestamp(ctx *TimestampContext) {}", "func (scsu *SurveyCellScanUpdate) SetTimestamp(t time.Time) *SurveyCellScanUpdate {\n\tscsu.timestamp = &t\n\treturn scsu\n}", "func (m *Model) handleTimestamp() {\n\tif m.IsTimestamps() {\n\t\tm.Columns = append(m.Columns, Col(CREATED_AT))\n\t\tm.Columns = append(m.Columns, Col(UPDATED_AT))\n\t}\n}", "func (e *HTTPResponseEvent) WithTimestamp(ts time.Time) *HTTPResponseEvent {\n\te.ts = ts\n\treturn e\n}", "func (et ExfatTimestamp) TimestampWithOffset(offset int) time.Time {\n\tlocation := time.FixedZone(fmt.Sprintf(\"(off=%d)\", offset), offset)\n\n\treturn time.Date(et.Year(), time.Month(et.Month()), et.Day(), et.Hour(), et.Minute(), et.Second(), 0, location)\n}", "func (_Token *TokenCaller) LaunchTimestamp(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"launchTimestamp\")\n\treturn *ret0, err\n}", "func (tx *Hello) Timestamp() uint64 {\n\treturn tx.Timestamp_\n}", "func (truo *TradeRecordUpdateOne) SetTimestamp(t time.Time) *TradeRecordUpdateOne {\n\ttruo.mutation.SetTimestamp(t)\n\treturn truo\n}", "func (t *TimeTravelCtx) WithContext(ctx context.Context) *TimeTravelCtx {\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\tt.Context = context.WithValue(ctx, ContextKey, NowProvider(t.now))\n\treturn t\n}", "func (tru *TradeRecordUpdate) SetTimestamp(t time.Time) *TradeRecordUpdate {\n\ttru.mutation.SetTimestamp(t)\n\treturn tru\n}", "func (o *NewData) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func (ms HistogramBucketExemplar) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (l *LockArgs) SetTimestamp(tstamp time.Time) {\n\tl.Timestamp = tstamp\n}", "func (t *NGETime) FromTimestamp(timestamp int64) {\n\tsec := timestamp / 1000\n\tnsec := (timestamp - sec*1000) * 1e6\n\ttm := time.Unix(sec, nsec)\n\t*t = NGETime(tm)\n}", "func (o *PaymentStatusUpdateWebhook) SetTimestamp(v time.Time) {\n\to.Timestamp = v\n}", "func (_Token *TokenCallerSession) LaunchTimestamp() (*big.Int, error) {\n\treturn _Token.Contract.LaunchTimestamp(&_Token.CallOpts)\n}", "func (_Token *TokenSession) LaunchTimestamp() (*big.Int, error) {\n\treturn _Token.Contract.LaunchTimestamp(&_Token.CallOpts)\n}", "func (o *BulletinDTO) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func (o *HyperflexClusterHealthCheckExecutionSnapshot) SetTimestamp(v time.Time) {\n\to.Timestamp = &v\n}", "func ExampleTime_Timestamp() {\n\tt := gtime.Timestamp()\n\n\tfmt.Println(t)\n\n\t// May output:\n\t// 1533686888\n}", "func (o *HyperflexSnapshotStatus) SetTimestamp(v int64) {\n\to.Timestamp = &v\n}", "func Timestamp(t, i uint32) Val {\n\tv := Val{t: bsontype.Timestamp}\n\tv.bootstrap[0] = byte(i)\n\tv.bootstrap[1] = byte(i >> 8)\n\tv.bootstrap[2] = byte(i >> 16)\n\tv.bootstrap[3] = byte(i >> 24)\n\tv.bootstrap[4] = byte(t)\n\tv.bootstrap[5] = byte(t >> 8)\n\tv.bootstrap[6] = byte(t >> 16)\n\tv.bootstrap[7] = byte(t >> 24)\n\treturn v\n}", "func (ms HistogramDataPoint) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (info *Info) SetTimestamp(t time.Time) {\n\tinfo.Attributes[\"sig.timestamp\"] = t.UTC()\n}", "func (b *ClusterBuilder) CreationTimestamp(value time.Time) *ClusterBuilder {\n\tb.creationTimestamp = value\n\tb.bitmap_ |= 8192\n\treturn b\n}", "func (sh *ServerPropertiesHandle) SetTimestamp(timestamp string) {\n\tsh.timestamp = timestamp\n}", "func (s *SampledHTTPRequest) SetTimestamp(v time.Time) *SampledHTTPRequest {\n\ts.Timestamp = &v\n\treturn s\n}", "func NewTimestamp(t time.Time) *Timestamp {\n\treturn &Timestamp{\n\t\tbase: newBase(typeTimestamp, nil),\n\t\tConfig: TimestampConfig{\n\t\t\tTimestamp: t.Unix(),\n\t\t},\n\t}\n}", "func NewTemplateTimestamped() *gomol.Template {\n\ttpl, _ := gomol.NewTemplate(\"{{.Timestamp.Format \\\"2006-01-02 15:04:05.000\\\"}} [{{color}}{{ucase .LevelName}}{{reset}}] {{.Message}}\")\n\treturn tpl\n}", "func (o *EventAttributes) SetTimestamp(v int64) {\n\to.Timestamp = &v\n}", "func (theme Theme) Timestamp(s string) string {\n\tif theme.sys == nil {\n\t\treturn s\n\t}\n\treturn theme.sys.Format(s)\n}", "func (c *Context) getTimestamp() uint64 {\n\treturn uint64(c.Config.Timer.Now().UnixNano()) / c.Config.TimestampIncrement * c.Config.TimestampIncrement\n}", "func WithCreationTimestamp(t time.Time) RevisionOption {\n\treturn func(rev *v1alpha1.Revision) {\n\t\trev.ObjectMeta.CreationTimestamp = metav1.Time{t}\n\t}\n}", "func (t *TagCollector) PublishWithTimestamp(ts time.Time, measurement string, values ...types.Field) {\n\tt.sink.PublishWithTimestamp(ts, measurement, t.collect(), values...)\n}", "func (v *View) AddTimestamp(t Timestamp) {\n\tv.tMutex.Lock()\n\tdefer v.tMutex.Unlock()\n\n\tif _, ok := v.Timestamps[t.ID]; !ok {\n\t\tv.Timestamps[t.ID] = Entry{\n\t\t\tOpinions: Opinions{t.Opinion},\n\t\t\tTimestamp: clock.SyncedTime(),\n\t\t}\n\t\treturn\n\t}\n\n\tentry := v.Timestamps[t.ID]\n\tentry.Opinions = append(entry.Opinions, t.Opinion)\n\tv.Timestamps[t.ID] = entry\n}", "func (m *BarRecordMutation) SetTimestamp(t time.Time) {\n\tm.timestamp = &t\n}", "func timestamped(orig io.Writer) io.Writer {\n\treturn wfilter.LinePrepender(orig, func(w io.Writer) (int, error) {\n\t\treturn fmt.Fprintf(w, \"%s - \", time.Now().In(time.UTC).Format(logTimestampFormat))\n\t})\n}", "func NewContext(ctx context.Context) context.Context {\n\treturn context.WithValue(ctx, timingKey, &[]buildapi.StageInfo{})\n}", "func (ms SummaryDataPoint) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (o *NewData) HasTimestamp() bool {\n\tif o != nil && o.Timestamp != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func WithParams(\n\tctx context.Context,\n\tts time.Time,\n\tpriority uint8,\n\tweight int64,\n\tkey string,\n\tmessage interface{},\n\tmarshaler Marshaler,\n) context.Context {\n\tctx = WithTimestamp(ctx, ts)\n\tctx = WithPriority(ctx, priority)\n\tctx = WithWeight(ctx, weight)\n\tctx = WithKey(ctx, key)\n\tctx = WithMessage(ctx, message)\n\tctx = WithMarshaler(ctx, marshaler)\n\treturn ctx\n}", "func (o *HyperflexHealthCheckPackageChecksum) SetTimestamp(v time.Time) {\n\to.Timestamp = &v\n}", "func Timestamp(ts *tspb.Timestamp) (time.Time, error) {\n\t// Don't return the zero value on error, because corresponds to a valid\n\t// timestamp. Instead return whatever time.Unix gives us.\n\tvar t time.Time\n\tif ts == nil {\n\t\tt = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp\n\t} else {\n\t\tt = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()\n\t}\n\treturn t, validateTimestamp(ts)\n}", "func NewContext() context.Context {\r\n\tvalues := node.Values{\r\n\t\tNow: protocol.CurrentTimestamp(),\r\n\t}\r\n\r\n\treturn context.WithValue(context.Background(), node.KeyValues, &values)\r\n}", "func (o *EventAttributes) HasTimestamp() bool {\n\treturn o != nil && o.Timestamp != nil\n}", "func (o *DeviceParameterValue) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func SetWatchdogViaTimestamp(iTimestamp int, watchdog *Watchdog) (int64, error) {\n\twatchdog.Timestamp = iTimestamp\n\treturn Engine.Insert(watchdog)\n}", "func (o *ActionDTO) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func (_L1Block *L1BlockCallerSession) Timestamp() (uint64, error) {\n\treturn _L1Block.Contract.Timestamp(&_L1Block.CallOpts)\n}", "func CurrentTimestamp() *Timestamp {\n\tt := time.Now()\n\treturn (*Timestamp)(&t)\n}", "func (s *Client) Timestamp(options *types.Options) (*types.Timestamp, error) {\n\turl := fmt.Sprintf(\"%s/common/timestamp\", baseURL)\n\n\tbody, err := s.getResponse(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result = new(types.Timestamp)\n\tif err := json.Unmarshal(body, result); err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func timestamp(ctx iscp.SandboxBase) uint64 {\n\ttsNano := time.Duration(ctx.GetTimestamp()) * time.Nanosecond\n\treturn uint64(tsNano / time.Second)\n}", "func (ms DoubleDataPoint) SetTimestamp(v TimestampUnixNano) {\n\t(*ms.orig).TimeUnixNano = uint64(v)\n}", "func (o *GetPublicAuthParams) WithTimestamp(timestamp string) *GetPublicAuthParams {\n\to.SetTimestamp(timestamp)\n\treturn o\n}", "func (o *DeviceResourceVersionValueWeb) SetTimestamp(v string) {\n\to.Timestamp = &v\n}", "func addTimestamp(mkdocs interface{}) {\n\tswitch m := mkdocs.(type) {\n\tcase map[string]interface{}:\n\t\tswitch t := m[\"theme\"].(type) {\n\t\tcase map[interface{}]interface{}:\n\t\t\tswitch f := t[\"feature\"].(type) {\n\t\t\tcase map[interface{}]interface{}:\n\t\t\t\tf[\"timestamp\"] = time.Now().Format(time.RFC3339)\n\t\t\t}\n\t\t}\n\t}\n}", "func (u *Update) CurrentTimestamp(fields ...string) *Update {\n\tobjTemp, p := u.update[\"$currentDate\"]\n\tif !p {\n\t\tobjTemp = utils.M{}\n\t}\n\n\tobj := objTemp.(utils.M)\n\tfor _, field := range fields {\n\t\tobj[field] = utils.M{\"$type\": \"timestamp\"}\n\t}\n\n\tu.update[\"$currentDate\"] = obj\n\treturn u\n}", "func (s *FileSystemSize) SetTimestamp(v time.Time) *FileSystemSize {\n\ts.Timestamp = &v\n\treturn s\n}", "func runTestPreparedTimestamp(t *C) {\n\trunTestsOnNewDB(t, nil, \"prepared_timestamp\", func(dbt *DBTest) {\n\t\tdbt.mustExec(\"create table test (a timestamp, b time)\")\n\t\tdbt.mustExec(\"set time_zone='+00:00'\")\n\t\tinsertStmt := dbt.mustPrepare(\"insert test values (?, ?)\")\n\t\tdefer insertStmt.Close()\n\t\tvts := time.Unix(1, 1)\n\t\tvt := time.Unix(-1, 1)\n\t\tdbt.mustExecPrepared(insertStmt, vts, vt)\n\t\tselectStmt := dbt.mustPrepare(\"select * from test where a = ? and b = ?\")\n\t\tdefer selectStmt.Close()\n\t\trows := dbt.mustQueryPrepared(selectStmt, vts, vt)\n\t\tt.Assert(rows.Next(), IsTrue)\n\t\tvar outA, outB string\n\t\terr := rows.Scan(&outA, &outB)\n\t\tt.Assert(err, IsNil)\n\t\tt.Assert(outA, Equals, \"1970-01-01 00:00:01\")\n\t\tt.Assert(outB, Equals, \"23:59:59\")\n\t})\n}", "func NewTsTimestampPoint(timestamp time.Time, value time.Time) TsTimestampPoint {\n\treturn TsTimestampPoint{timestamp, value}\n}", "func (s *EdgeMetric) SetTimestamp(v time.Time) *EdgeMetric {\n\ts.Timestamp = &v\n\treturn s\n}", "func (tracer *TraceLogger) OnTimestamp(ts time.Time) {\n\ttracer.traceMutex.Lock()\n\tdefer tracer.traceMutex.Unlock()\n\tif tracer.fileHandle == nil || tracer.hasProperFilename {\n\t\treturn\n\t}\n\tformatted := ts.Format(time.RFC3339)\n\tfname := \"/var/log/stratux/\" + formatted + \"_trace.txt.gz\"\n\tif formatted != fname {\n\t\terr := os.Rename(tracer.fileName, fname)\n\t\tif err == nil {\n\t\t\ttracer.fileName = fname\n\t\t}\n\t}\n\ttracer.hasProperFilename = true\n}", "func (p *parser) parseTimestamp(annotations []Symbol) Timestamp {\n\treturn Timestamp{annotations: annotations, text: p.next().Val}\n}", "func (u *Update) CurrentTimestamp(fields ...string) *Update {\n\tobjTemp, p := u.update[\"$currentDate\"]\n\tif !p {\n\t\tobjTemp = types.M{}\n\t}\n\n\tobj := objTemp.(types.M)\n\tfor _, field := range fields {\n\t\tobj[field] = types.M{\"$type\": \"timestamp\"}\n\t}\n\n\tu.update[\"$currentDate\"] = obj\n\treturn u\n}", "func SetTimestampFormat(_tsFormat string) {\n\tglobalTsFormat = _tsFormat\n}", "func (self *ResTransaction)GetTimestamp()time.Time{\n return self.Timestamp\n}" ]
[ "0.80977577", "0.7183554", "0.70614016", "0.67979366", "0.6572396", "0.632787", "0.61412925", "0.61298275", "0.60963523", "0.60741365", "0.606171", "0.6014762", "0.6009179", "0.5986602", "0.58685726", "0.5814137", "0.5814137", "0.5791734", "0.57800174", "0.5765214", "0.57217", "0.5701566", "0.5636416", "0.56361014", "0.56000215", "0.5596215", "0.55747724", "0.552139", "0.55095035", "0.54915625", "0.54907066", "0.54889315", "0.54831046", "0.5479104", "0.54676986", "0.54423684", "0.5442106", "0.54152393", "0.5414726", "0.53904563", "0.53895485", "0.5383105", "0.53745764", "0.5356624", "0.5353548", "0.5340671", "0.5333015", "0.5322863", "0.53080523", "0.53079104", "0.5297954", "0.5290015", "0.5279698", "0.52755517", "0.5268546", "0.5262896", "0.5256552", "0.5253831", "0.5253196", "0.5249714", "0.52434295", "0.523961", "0.52220803", "0.52108353", "0.52014166", "0.5201322", "0.51900417", "0.51861936", "0.5177857", "0.5156086", "0.5150297", "0.51478577", "0.5132726", "0.5120308", "0.51133215", "0.5111447", "0.5105468", "0.51040816", "0.509501", "0.5090875", "0.5087714", "0.5087646", "0.50856906", "0.50814265", "0.5076855", "0.50708294", "0.5066982", "0.5060696", "0.5035552", "0.50234914", "0.5020318", "0.501834", "0.5016309", "0.5010393", "0.50046784", "0.49933007", "0.49901742", "0.49860388", "0.49855468", "0.49846584" ]
0.80684143
1
Indexes of the UuidId.
func (UuidId) Indexes() []ent.Index { return []ent.Index{ index.Fields("id"), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *PodUIDIndexer) GetIndexes(pod *kubernetes.Pod) []string {\n\treturn []string{pod.Metadata.GetUid()}\n}", "func (e *Account) EntIndexes() []ent.EntIndex { return ent_Account_idx }", "func (e *Account) EntIndexes() []ent.EntIndex { return ent_Account_idx }", "func (Like) Indexes() []ent.Index {\n\treturn []ent.Index{\n\t\tindex.Fields(\"user_id\").Edges(\"restaurant\").Unique(),\n\t}\n}", "func (i *Info) getValidIndexes() ([][]string, error) {\n\tvar validIndexes [][]string\n\tvar possibleIndexes [][]string\n\n\tpossibleIndexes = append(possibleIndexes, []string{\"_uuid\"})\n\tpossibleIndexes = append(possibleIndexes, i.Metadata.TableSchema.Indexes...)\n\n\t// Iterate through indexes and validate them\nOUTER:\n\tfor _, idx := range possibleIndexes {\n\t\tfor _, col := range idx {\n\t\t\tif !i.hasColumn(col) {\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t\tcolumnSchema := i.Metadata.TableSchema.Column(col)\n\t\t\tif columnSchema == nil {\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t\tfield, err := i.FieldByColumn(col)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !reflect.ValueOf(field).IsValid() || ovsdb.IsDefaultValue(columnSchema, field) {\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\t\tvalidIndexes = append(validIndexes, idx)\n\t}\n\treturn validIndexes, nil\n}", "func OrTableIndexes(db models.XODB, schema string, table string) ([]*models.Index, error) {\n\tvar err error\n\n\t// sql query\n\tconst sqlstr = `SELECT ` +\n\t\t`LOWER(i.index_name) AS index_name, ` +\n\t\t`CASE WHEN i.uniqueness = 'UNIQUE' THEN '1' ELSE '0' END AS is_unique, ` +\n\t\t`CASE WHEN c.constraint_type = 'P' THEN '1' ELSE '0' END AS is_primary ` +\n\t\t`FROM user_indexes i ` +\n\t\t`LEFT JOIN user_constraints c on i.INDEX_NAME = c.constraint_name ` +\n\t\t`WHERE i.TABLE_OWNER = UPPER(:1) AND i.TABLE_NAME = :2`\n\n\t// run query\n\tmodels.XOLog(sqlstr, schema, table)\n\tq, err := db.Query(sqlstr, schema, table)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer q.Close()\n\n\t// load results\n\tres := []*models.Index{}\n\tfor q.Next() {\n\t\ti := models.Index{}\n\n\t\t// scan\n\t\terr = q.Scan(&i.IndexName, &i.IsUnique, &i.IsPrimary)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres = append(res, &i)\n\t}\n\n\treturn res, nil\n}", "func addUserIndexes(db *mgo.Session) error {\n\tsession := db.Copy()\n\tdefer session.Close()\n\tc := session.DB(\"\").C(userCollection)\n\ti := mgo.Index{\n\t\tKey: []string{\"phone\"},\n\t\tUnique: true,\n\t}\n\terr := c.EnsureIndex(i)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to add index: %v\", err)\n\t}\n\n\treturn nil\n}", "func idIndex() mgo.Index {\n\treturn mgo.Index{\n\t\tKey: []string{\"id\"},\n\t\tUnique: true,\n\t\tDropDups: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n}", "func (filter *BloomFilter) getIndexes(key interface{}) []uint {\n\tindexes := make([]uint, filter.hashCount)\n\tkeyBytes, _ := getBytes(key)\n\tmax := uint(filter.capacity)\n\th1, h2 := murmur3Hash(keyBytes)\n\n\tfor hashIndex := 0; hashIndex < filter.hashCount; hashIndex++ {\n\t\ti := uint(hashIndex)\n\t\tindexes[i] = (uint(h1) + i*uint(h2) + i*i) % max\n\t}\n\n\treturn indexes\n}", "func (MixinID) Indexes() []ent.Index {\n\treturn []ent.Index{\n\t\tindex.Fields(\"id\"),\n\t\tindex.Fields(\"id\", \"some_field\"),\n\t\tindex.Fields(\"id\", \"mixin_field\"),\n\t\tindex.Fields(\"id\", \"mixin_field\", \"some_field\"),\n\t}\n}", "func (meta metadata) getIndexes() []mgo.Index {\n\tindexes := []mgo.Index{}\n\tfor _, field := range meta.findFieldsWithIndex() {\n\t\tindexes = append(indexes, mgo.Index{Key: []string{field.key}, Unique: field.unique})\n\t}\n\treturn indexes\n}", "func m4accountIndices(db *IndexerDb, state *MigrationState) error {\n\tsqlLines := []string{\n\t\t\"CREATE INDEX IF NOT EXISTS account_asset_by_addr ON account_asset ( addr )\",\n\t\t\"CREATE INDEX IF NOT EXISTS asset_by_creator_addr ON asset ( creator_addr )\",\n\t\t\"CREATE INDEX IF NOT EXISTS app_by_creator ON app ( creator )\",\n\t\t\"CREATE INDEX IF NOT EXISTS account_app_by_addr ON account_app ( addr )\",\n\t}\n\treturn sqlMigration(db, state, sqlLines)\n}", "func (api *Api) createIndexes() {\n\t// username and email will be unique.\n\tkeys := bsonx.Doc{\n\t\t{Key: \"username\", Value: bsonx.Int32(1)},\n\t\t{Key: \"email\", Value: bsonx.Int32(1)},\n\t}\n\tpeople := api.DB.Collection(\"people\")\n\tdb.SetIndexes(people, keys)\n}", "func (Friendship) Indexes() []ent.Index {\n\treturn []ent.Index{\n\t\tindex.Fields(\"created_at\"),\n\t}\n}", "func addIndexes() {\n\tvar err error\n\tuserIndex := mgo.Index{\n\t\tKey: []string{\"email\"},\n\t\tUnique: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\t// Add indexes into MongoDB\n\tsession := GetSession().Copy()\n\tdefer session.Close()\n\tuserCol := session.DB(AppConfig.Database).C(\"users\")\n\n\terr = userCol.EnsureIndex(userIndex)\n\tif err != nil {\n\t\tlog.Fatalf(\"[addIndexes]: %s\\n\", err)\n\t}\n}", "func (vt *perfSchemaTable) Indices() []table.Index {\n\treturn nil\n}", "func createIndexes(ts *Schema, ti *Info, idxs []schema.Index, store *stor.Stor) {\n\tif len(idxs) == 0 {\n\t\treturn\n\t}\n\tts.Indexes = slices.Clip(ts.Indexes) // copy on write\n\tnold := len(ts.Indexes)\n\tfor i := range idxs {\n\t\tix := &idxs[i]\n\t\tif ts.FindIndex(ix.Columns) != nil {\n\t\t\tpanic(\"duplicate index: \" +\n\t\t\t\tstr.Join(\"(,)\", ix.Columns) + \" in \" + ts.Table)\n\t\t}\n\t\tts.Indexes = append(ts.Indexes, *ix)\n\t}\n\tidxs = ts.SetupNewIndexes(nold)\n\tn := len(ti.Indexes)\n\tti.Indexes = slices.Clip(ti.Indexes) // copy on write\n\tfor i := range idxs {\n\t\tbt := btree.CreateBtree(store, &ts.Indexes[n+i].Ixspec)\n\t\tti.Indexes = append(ti.Indexes, index.OverlayFor(bt))\n\t}\n}", "func IndexOf(u *task.List, uid uint64) int {\n\ti := sort.Search(len(u.Uids), func(i int) bool { return u.Uids[i] >= uid })\n\tif i < len(u.Uids) && u.Uids[i] == uid {\n\t\treturn i\n\t}\n\treturn -1\n}", "func (e *Department) EntIndexes() []ent.EntIndex { return ent_Department_idx }", "func addIndexes() {\n\tvar err error\n\tuserIndex := mgo.Index{\n\t\tKey: []string{\"email\"},\n\t\tUnique: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\n\tauthIndex := mgo.Index{\n\t\tKey: []string{\"sender_id\"},\n\t\tUnique: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\n\t// Add indexes into MongoDB\n\tsession := GetSession().Copy()\n\tdefer session.Close()\n\tuserCol := session.DB(AppConfig.MongoDBName).C(\"users\")\n\tauthCol := session.DB(AppConfig.MongoDBName).C(\"auth\")\n\n\terr = userCol.EnsureIndex(userIndex)\n\tif err != nil {\n\t\tlog.Fatalf(\"[addIndexes]: %s\\n\", err)\n\t}\n\n\terr = authCol.EnsureIndex(authIndex)\n\tif err != nil {\n\t\tlog.Fatalf(\"[addIndexes]: %s\\n\", err)\n\t}\n\n}", "func Indexes() map[string]string {\n\treturn instance.getIndexes()\n}", "func createIndexes(db *sql.DB, table string) error {\n\tindexes := []string{}\n\n\tswitch table {\n\tcase \"dfp\":\n\t\tindexes = []string{\n\t\t\t\"CREATE INDEX IF NOT EXISTS dfp_metrics ON dfp (CODE, ID_CIA, YEAR, VL_CONTA);\",\n\t\t\t\"CREATE INDEX IF NOT EXISTS dfp_year_ver ON dfp (ID_CIA, YEAR, VERSAO);\",\n\t\t}\n\tcase \"itr\":\n\t\tindexes = []string{\n\t\t\t\"CREATE INDEX IF NOT EXISTS itr_metrics ON itr (CODE, ID_CIA, YEAR, VL_CONTA);\",\n\t\t\t\"CREATE INDEX IF NOT EXISTS itr_quarter_ver ON itr (ID_CIA, DT_FIM_EXERC, VERSAO);\",\n\t\t}\n\tcase \"stock_quotes\":\n\t\tindexes = []string{\n\t\t\t\"CREATE UNIQUE INDEX IF NOT EXISTS stock_quotes_stockdate ON stock_quotes (stock, date);\",\n\t\t}\n\tcase \"fii_dividends\":\n\t\tindexes = []string{\n\t\t\t\"CREATE UNIQUE INDEX IF NOT EXISTS fii_dividends_pk ON fii_dividends (trading_code, base_date);\",\n\t\t}\n\t}\n\n\tfor _, idx := range indexes {\n\t\t_, err := db.Exec(idx)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"erro ao criar índice\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p PropertyHashList) Indexes() []uint64 {\n\tindexes := make([]uint64, len(p))\n\tfor idx, pp := range p {\n\t\tindexes[idx] = pp.Index\n\t}\n\n\treturn indexes\n}", "func (s *Server) getIndexes(w http.ResponseWriter, r *http.Request) {\n\tfs, err := s.db.List(\"file\")\n\tif err != nil {\n\t\ts.logf(\"error listing files from mpd for building indexes: %v\", err)\n\t\twriteXML(w, errGeneric)\n\t\treturn\n\t}\n\tfiles := indexFiles(fs)\n\n\twriteXML(w, func(c *container) {\n\t\tc.Indexes = &indexesContainer{\n\t\t\tLastModified: time.Now().Unix(),\n\t\t}\n\n\t\t// Incremented whenever it's time to create a new index for a new\n\t\t// initial letter\n\t\tidx := -1\n\n\t\tvar indexes []index\n\n\t\t// A set of initial characters, used to deduplicate the addition of\n\t\t// nwe indexes\n\t\tseenChars := make(map[rune]struct{}, 0)\n\n\t\tfor _, f := range files {\n\t\t\t// Filter any non-top level items\n\t\t\tif strings.Contains(f.Name, string(os.PathSeparator)) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Initial rune is used to create an index name\n\t\t\tc, _ := utf8.DecodeRuneInString(f.Name)\n\t\t\tname := string(c)\n\n\t\t\t// If initial rune is a digit, put index under a numeric section\n\t\t\tif unicode.IsDigit(c) {\n\t\t\t\tc = '#'\n\t\t\t\tname = \"#\"\n\t\t\t}\n\n\t\t\t// If a new rune appears, create a new index for it\n\t\t\tif _, ok := seenChars[c]; !ok {\n\t\t\t\tseenChars[c] = struct{}{}\n\t\t\t\tindexes = append(indexes, index{Name: name})\n\t\t\t\tidx++\n\t\t\t}\n\n\t\t\tindexes[idx].Artists = append(indexes[idx].Artists, artist{\n\t\t\t\tName: f.Name,\n\t\t\t\tID: strconv.Itoa(f.ID),\n\t\t\t})\n\t\t}\n\n\t\tc.Indexes.Indexes = indexes\n\t})\n}", "func (c *Collection) GetIndexes() []Index {\n\tif len(c.indexes) == 0 {\n\t\treturn nil\n\t}\n\tindexes := make([]Index, len(c.indexes)-1)\n\tvar i int\n\tfor _, index := range c.indexes {\n\t\tif index.Path != idFieldName {\n\t\t\tindexes[i] = index\n\t\t\ti++\n\t\t}\n\t}\n\treturn indexes\n}", "func (mapper *TagMapper) KeyIndexes(value reflect.Value) (KeyIndexes, error) {\n\tmapper.RLock()\n\ttyp := value.Type()\n\tindexes, ok := mapper.types[typ]\n\tmapper.RUnlock()\n\tif ok {\n\t\treturn indexes, nil\n\t}\n\treturn mapper.registerValue(value)\n}", "func (r Rules) Indexes() []string {\n\ti, ok := r[\".indexOn\"]\n\tif !ok {\n\t\treturn nil\n\t}\n\tindexes, ok := i.([]string)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn indexes\n}", "func canListIndexes(context datastore.QueryContext, elems ...string) bool {\n\tprivs := auth.NewPrivileges()\n\tprivs.Add(algebra.NewPathFromElements(elems).FullName(), auth.PRIV_QUERY_LIST_INDEX, auth.PRIV_PROPS_NONE)\n\t_, err := datastore.GetDatastore().Authorize(privs, context.Credentials())\n\tres := err == nil\n\treturn res\n}", "func (n *Notification) Index() mgo.Index {\n\treturn mgo.Index{\n\t\tKey: []string{\"uid\", \"list\"},\n\t\tUnique: true,\n\t\t//DropDups: true,\n\t\tBackground: true,\n\t\t//Sparse: true,\n\t}\n}", "func (ts *STableSpec) Indexes() []STableIndex {\n\treturn ts._indexes\n}", "func addIndexes() {\n\tvar err error\n\n\tufIndex1 := mgo.Index{\n\t\tKey: []string{\"codigo\"},\n\t\tUnique: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\tmunicipioIndex1 := mgo.Index{\n\t\tKey: []string{\"codigo\"},\n\t\tUnique: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\t// Add indexes into MongoDB\n\tsession := Session.Copy()\n\tdefer session.Close()\n\tufCol := session.DB(commons.AppConfig.Database).C(\"ufs\")\n\tmunicipioCol := session.DB(commons.AppConfig.Database).C(\"municipios\")\n\n\t// cria indice codigo para UF\n\terr = ufCol.EnsureIndex(ufIndex1)\n\tif err != nil {\n\t\tlog.Fatalf(\"[addIndexes]: %s\\n\", err)\n\t}\n\tlog.Println(\"Indice para UF criado com sucesso\")\n\n\t// cria indice codigo para Municipio\n\terr = municipioCol.EnsureIndex(municipioIndex1)\n\tif err != nil {\n\t\tlog.Fatalf(\"[addIndexes]: %s\\n\", err)\n\t}\n\tlog.Println(\"Indice para Municipio criado com sucesso\")\n\n}", "func (uips *unlockHashesWithIndices) Len() int {\n\treturn len(uips.Indices)\n}", "func storeUIntIndex (txn *badger.Txn, key uint64, value []byte, prefix byte) error {\r\n\r\n\tindex := make([]byte, 8)\r\n\tbinary.LittleEndian.PutUint64(index, key)\r\n\tindex = append ([]byte{prefix}, index...)\r\n\r\n\treturn txn.Set(index, value)\r\n}", "func (vt *perfSchemaTable) WritableIndices() []table.Index {\n\treturn nil\n}", "func MetaUIDIndexFunc(obj interface{}) ([]string, error) {\n\tmeta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn []string{\"\"}, fmt.Errorf(\"object has no meta: %v\", err)\n\t}\n\treturn []string{string(meta.GetUID())}, nil\n}", "func (d *drawingsVML) buildIndexes() {\n\tfor id, s := range d.ml.Shape {\n\t\t_ = d.shapeIndex.Add(s, id)\n\t}\n}", "func NewIndicesUintSlice(n ...uint) *IndicesSlice { return NewIndicesSlice(UintSlice(n)) }", "func (db *DB) Indexes() (res []string) {\n\tdb.kv.View(func(tx *bolt.Tx) error {\n\t\tbkt := tx.Bucket([]byte(\"indexes\"))\n\t\tc := bkt.Cursor()\n\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\tif v == nil {\n\t\t\t\tres = append(res, string(k))\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn res\n}", "func (t *tableCommon) Indices() []table.Index {\n\ttrace_util_0.Count(_tables_00000, 35)\n\treturn t.indices\n}", "func (t *BoundedTable) Indices() []table.Index {\n\treturn nil\n}", "func (sl *Slice) UniqueNameToIndexMap() map[string]int {\n\tif len(*sl) == 0 {\n\t\treturn nil\n\t}\n\tnim := make(map[string]int, len(*sl))\n\tfor i, kid := range *sl {\n\t\tnim[kid.UniqueName()] = i\n\t}\n\treturn nim\n}", "func (x EntIndex) IsUnique() bool { return (x.Flags & EntIndexUnique) != 0 }", "func (m *Mongo) Index(gid string, background bool) error {\n\tm.Session.ResetIndexCache()\n\n\tsessionCopy := m.Session.Copy()\n\tdefer sessionCopy.Close()\n\tcol := sessionCopy.DB(m.DBName).C(gid)\n\n\tcInfo := &mgo.CollectionInfo{DisableIdIndex: true}\n\terr := col.Create(cInfo)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\t/*\n\t\t// TODO figure out the magic of mongo indexes\n\t\tindex := mgo.Index{\n\t\t\tKey: []string{\"g\", \"s\", \"p\", \"o\"},\n\t\t\tBackground: false,\n\t\t\tSparse: true,\n\t\t\tUnique: true,\n\t\t\tDropDups: true,\n\t\t}\n\t\terr := col.EnsureIndex(index)\n\t\treturn err\n\t*/\n\n\tindex := mgo.Index{\n\t\tKey: []string{\"g\", \"s\"},\n\t\tBackground: background,\n\t\tSparse: true,\n\t}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"o\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"p\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"s\", \"p\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"s\", \"o\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"p\", \"o\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"s\", \"p\", \"o\"}\n\tindex.Unique = true\n\tindex.DropDups = true\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\treturn nil\n}", "func (c *Couchbase) RegisterIndexes(indexes []*Index) {\n\tc.indexes = indexes\n}", "func TestSQLSmith_LoadIndexes(t *testing.T) {\n\te := Executor{\n\t\tconn: nil,\n\t\tdb: dbname,\n\t\ttables: make(map[string]*types.Table),\n\t}\n\tindexes[\"users\"] = []types.CIStr{\"idx1\", \"idx2\"}\n\te.loadSchema(schema, indexes)\n\n\tassert.Equal(t, len(e.tables), 6)\n\tassert.Equal(t, len(e.tables[\"users\"].Indexes), 2)\n}", "func (d *Dao) IndexCache(c context.Context, mid int64, start, end int) (aids []int64, err error) {\n\tconn := d.redis.Get(c)\n\tdefer conn.Close()\n\tvalues, err := redis.Values(conn.Do(\"ZREVRANGE\", keyIndex(mid), start, end, \"WITHSCORES\"))\n\tif err != nil {\n\t\tlog.Error(\"conn.Do(ZREVRANGE %v) error(%v)\", keyIndex(mid), err)\n\t\treturn\n\t}\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\tvar aid, unix int64\n\tfor len(values) > 0 {\n\t\tif values, err = redis.Scan(values, &aid, &unix); err != nil {\n\t\t\tlog.Error(\"redis.Scan(%v) error(%v)\", values, err)\n\t\t\treturn\n\t\t}\n\t\taids = append(aids, aid)\n\t}\n\treturn\n}", "func (z *Zzz) IdxId() int { //nolint:dupl false positive\n\treturn 0\n}", "func (c *Collection) GetBleveIndexes() []string {\n\tc.db.lock.RLock()\n\tdefer c.db.lock.RUnlock()\n\n\tret := make([]string, len(c.bleveIndexes))\n\n\tfor i, index := range c.bleveIndexes {\n\t\tret[i] = index.Name()\n\t}\n\n\treturn ret\n}", "func IndexUint64(s uint64, list []uint64) int {\n\tfor i, b := range list {\n\t\tif b == s {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}", "func (d *Dataset) Indexes() []string {\n\t//grab indexes\n\tvar indexes []string\n\n\tindexFiles, err := ioutil.ReadDir(filepath.Join(path.Dir(d.path), \".gitdb/index/\", d.Name()))\n\tif err != nil {\n\t\treturn indexes\n\t}\n\n\tfor _, indexFile := range indexFiles {\n\t\tindexes = append(indexes, strings.TrimSuffix(indexFile.Name(), \".json\"))\n\t}\n\n\treturn indexes\n}", "func readUIntIndex (txn *badger.Txn, key uint64, prefix byte) ([]byte, error) {\r\n\r\n\tindex := make([]byte, 8)\r\n\tbinary.LittleEndian.PutUint64(index, key)\r\n\tindex = append ([]byte{prefix}, index...)\r\n\r\n\titem, err := txn.Get(index)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\treturn item.ValueCopy(nil)\r\n}", "func GetUniformIndices(program uint32, uniformCount int32, uniformNames **int8, uniformIndices *uint32) {\n C.glowGetUniformIndices(gpGetUniformIndices, (C.GLuint)(program), (C.GLsizei)(uniformCount), (**C.GLchar)(unsafe.Pointer(uniformNames)), (*C.GLuint)(unsafe.Pointer(uniformIndices)))\n}", "func (c *Conn) GetIndexes(schemaName, tableName string) ([]db.Index, error) {\n\tvar ret = []db.Index{}\n\tvar ind db.Index\n\tquery := fmt.Sprintf(`\n\tselect \n\t\tindex_name as 'index',\n\t\tgroup_concat(column_name order by seq_in_index separator ', ') as columns\n\tfrom information_schema.statistics\n\tgroup by \n\t\ttable_schema,\n\t\ttable_name,\n\t\tindex_name\n\thaving\n\t\ttable_schema = '%s'\n\t\tand table_name = '%s'\n\t\tand index_name <> 'PRIMARY'\n\t`, schemaName, tableName)\n\t// log.Fatal(query)\n\tres, err := c.Query(query)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tfor _, r := range res {\n\t\tind = db.Index{\n\t\t\tName: r[\"index\"].(string),\n\t\t\tColumns: r[\"columns\"].(string),\n\t\t}\n\t\tret = append(ret, ind)\n\t}\n\treturn ret, nil\n}", "func (a *Action) TableIndices() []*schemas.Index {\n\trepoIndex := schemas.NewIndex(\"r_u_d\", schemas.IndexType)\n\trepoIndex.AddColumn(\"repo_id\", \"user_id\", \"is_deleted\")\n\n\tactUserIndex := schemas.NewIndex(\"au_r_c_u_d\", schemas.IndexType)\n\tactUserIndex.AddColumn(\"act_user_id\", \"repo_id\", \"created_unix\", \"user_id\", \"is_deleted\")\n\n\tcudIndex := schemas.NewIndex(\"c_u_d\", schemas.IndexType)\n\tcudIndex.AddColumn(\"created_unix\", \"user_id\", \"is_deleted\")\n\n\tindices := []*schemas.Index{actUserIndex, repoIndex, cudIndex}\n\n\treturn indices\n}", "func ValidateInvertedIndexes(\n\tctx context.Context,\n\tcodec keys.SQLCodec,\n\ttableDesc catalog.TableDescriptor,\n\tindexes []*descpb.IndexDescriptor,\n\trunHistoricalTxn HistoricalInternalExecTxnRunner,\n\tgatherAllInvalid bool,\n) error {\n\tgrp := ctxgroup.WithContext(ctx)\n\tinvalid := make(chan descpb.IndexID, len(indexes))\n\n\texpectedCount := make([]int64, len(indexes))\n\tcountReady := make([]chan struct{}, len(indexes))\n\n\tfor i, idx := range indexes {\n\t\t// Shadow i and idx to prevent the values from changing within each\n\t\t// gorountine.\n\t\ti, idx := i, idx\n\t\tcountReady[i] = make(chan struct{})\n\n\t\tgrp.GoCtx(func(ctx context.Context) error {\n\t\t\t// Inverted indexes currently can't be interleaved, so a KV scan can be\n\t\t\t// used to get the index length.\n\t\t\t// TODO (lucy): Switch to using DistSQL to get the count, so that we get\n\t\t\t// distributed execution and avoid bypassing the SQL decoding\n\t\t\tstart := timeutil.Now()\n\t\t\tvar idxLen int64\n\t\t\tspan := tableDesc.IndexSpan(codec, idx.ID)\n\t\t\tkey := span.Key\n\t\t\tendKey := span.EndKey\n\t\t\tif err := runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, _ *InternalExecutor) error {\n\t\t\t\tfor {\n\t\t\t\t\tkvs, err := txn.Scan(ctx, key, endKey, 1000000)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif len(kvs) == 0 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tidxLen += int64(len(kvs))\n\t\t\t\t\tkey = kvs[len(kvs)-1].Key.PrefixEnd()\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Infof(ctx, \"inverted index %s/%s count = %d, took %s\",\n\t\t\t\ttableDesc.GetName(), idx.Name, idxLen, timeutil.Since(start))\n\t\t\tselect {\n\t\t\tcase <-countReady[i]:\n\t\t\t\tif idxLen != expectedCount[i] {\n\t\t\t\t\tif gatherAllInvalid {\n\t\t\t\t\t\tinvalid <- idx.ID\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\t// JSON columns cannot have unique indexes, so if the expected and\n\t\t\t\t\t// actual counts do not match, it's always a bug rather than a\n\t\t\t\t\t// uniqueness violation.\n\t\t\t\t\treturn errors.AssertionFailedf(\n\t\t\t\t\t\t\"validation of index %s failed: expected %d rows, found %d\",\n\t\t\t\t\t\tidx.Name, errors.Safe(expectedCount[i]), errors.Safe(idxLen))\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tgrp.GoCtx(func(ctx context.Context) error {\n\t\t\tdefer close(countReady[i])\n\n\t\t\tstart := timeutil.Now()\n\t\t\tcol := idx.InvertedColumnName()\n\n\t\t\tif err := runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, ie *InternalExecutor) error {\n\t\t\t\tvar stmt string\n\t\t\t\tif geoindex.IsEmptyConfig(&idx.GeoConfig) {\n\t\t\t\t\tstmt = fmt.Sprintf(\n\t\t\t\t\t\t`SELECT coalesce(sum_int(crdb_internal.num_inverted_index_entries(%q, %d)), 0) FROM [%d AS t]`,\n\t\t\t\t\t\tcol, idx.Version, tableDesc.GetID(),\n\t\t\t\t\t)\n\t\t\t\t} else {\n\t\t\t\t\tstmt = fmt.Sprintf(\n\t\t\t\t\t\t`SELECT coalesce(sum_int(crdb_internal.num_geo_inverted_index_entries(%d, %d, %q)), 0) FROM [%d AS t]`,\n\t\t\t\t\t\ttableDesc.GetID(), idx.ID, col, tableDesc.GetID(),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t\t// If the index is a partial index the predicate must be added\n\t\t\t\t// as a filter to the query.\n\t\t\t\tif idx.IsPartial() {\n\t\t\t\t\tstmt = fmt.Sprintf(`%s WHERE %s`, stmt, idx.Predicate)\n\t\t\t\t}\n\t\t\t\treturn ie.WithSyntheticDescriptors([]catalog.Descriptor{tableDesc}, func() error {\n\t\t\t\t\trow, err := ie.QueryRowEx(ctx, \"verify-inverted-idx-count\", txn, sessiondata.InternalExecutorOverride{}, stmt)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif row == nil {\n\t\t\t\t\t\treturn errors.New(\"failed to verify inverted index count\")\n\t\t\t\t\t}\n\t\t\t\t\texpectedCount[i] = int64(tree.MustBeDInt(row[0]))\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Infof(ctx, \"column %s/%s expected inverted index count = %d, took %s\",\n\t\t\t\ttableDesc.GetName(), col, expectedCount[i], timeutil.Since(start))\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif err := grp.Wait(); err != nil {\n\t\treturn err\n\t}\n\tclose(invalid)\n\tinvalidErr := InvalidIndexesError{}\n\tfor i := range invalid {\n\t\tinvalidErr.Indexes = append(invalidErr.Indexes, i)\n\t}\n\tif len(invalidErr.Indexes) > 0 {\n\t\treturn invalidErr\n\t}\n\treturn nil\n}", "func (o *EventsScalarQuery) GetIndexes() []string {\n\tif o == nil || o.Indexes == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.Indexes\n}", "func updateDBIndexes(mi *modelInfo) {\n\tadapter := adapters[db.DriverName()]\n\t// update column indexes\n\tfor colName, fi := range mi.fields.registryByJSON {\n\t\tif !fi.index {\n\t\t\tcontinue\n\t\t}\n\t\tif !adapter.indexExists(mi.tableName, fmt.Sprintf(\"%s_%s_index\", mi.tableName, colName)) {\n\t\t\tcreateColumnIndex(mi.tableName, colName)\n\t\t}\n\t}\n}", "func (v *RealVerifier) Index(vrfProof []byte, directoryID, userID string) ([]byte, error) {\n\tindex, err := v.vrf.ProofToHash([]byte(userID), vrfProof)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"vrf.ProofToHash(): %v\", err)\n\t}\n\treturn index[:], nil\n}", "func SetIndexes(collection *mongo.Collection, keys bsonx.Doc) {\n\tindex := mongo.IndexModel{}\n\tindex.Keys = keys\n\tunique := true\n\tindex.Options = &options.IndexOptions{\n\t\tUnique: &unique,\n\t}\n\topts := options.CreateIndexes().SetMaxTime(10 * time.Second)\n\t_, err := collection.Indexes().CreateOne(context.Background(), index, opts)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error while creating indexs: %v\", err)\n\t}\n}", "func (sc *SchemaChanger) validateIndexes(ctx context.Context) error {\n\tif lease.TestingTableLeasesAreDisabled() {\n\t\treturn nil\n\t}\n\tlog.Info(ctx, \"validating new indexes\")\n\n\t_, err := sc.updateJobRunningStatus(ctx, RunningStatusValidation)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif fn := sc.testingKnobs.RunBeforeIndexValidation; fn != nil {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treadAsOf := sc.clock.Now()\n\tvar tableDesc catalog.TableDescriptor\n\tif err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) (err error) {\n\t\ttableDesc, err = catalogkv.MustGetTableDescByID(ctx, txn, sc.execCfg.Codec, sc.descID)\n\t\treturn err\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tvar forwardIndexes []*descpb.IndexDescriptor\n\tvar invertedIndexes []*descpb.IndexDescriptor\n\n\tfor _, m := range tableDesc.AllMutations() {\n\t\tif sc.mutationID != m.MutationID() {\n\t\t\tbreak\n\t\t}\n\t\tidx := m.AsIndex()\n\t\tif idx == nil || idx.Dropped() {\n\t\t\tcontinue\n\t\t}\n\t\tswitch idx.GetType() {\n\t\tcase descpb.IndexDescriptor_FORWARD:\n\t\t\tforwardIndexes = append(forwardIndexes, idx.IndexDesc())\n\t\tcase descpb.IndexDescriptor_INVERTED:\n\t\t\tinvertedIndexes = append(invertedIndexes, idx.IndexDesc())\n\t\t}\n\t}\n\tif len(forwardIndexes) == 0 && len(invertedIndexes) == 0 {\n\t\treturn nil\n\t}\n\n\tgrp := ctxgroup.WithContext(ctx)\n\trunHistoricalTxn := sc.makeFixedTimestampInternalExecRunner(readAsOf)\n\n\tif len(forwardIndexes) > 0 {\n\t\tgrp.GoCtx(func(ctx context.Context) error {\n\t\t\treturn ValidateForwardIndexes(ctx, tableDesc, forwardIndexes, runHistoricalTxn, true /* withFirstMutationPubic */, false /* gatherAllInvalid */)\n\t\t})\n\t}\n\tif len(invertedIndexes) > 0 {\n\t\tgrp.GoCtx(func(ctx context.Context) error {\n\t\t\treturn ValidateInvertedIndexes(ctx, sc.execCfg.Codec, tableDesc, invertedIndexes, runHistoricalTxn, false /* gatherAllInvalid */)\n\t\t})\n\t}\n\tif err := grp.Wait(); err != nil {\n\t\treturn err\n\t}\n\tlog.Info(ctx, \"finished validating new indexes\")\n\treturn nil\n}", "func (m *Meta) TouchIndexes(table string) *Meta {\n\tschema := *m.GetRoSchema(table) // copy\n\tschema.Indexes = slices.Clone(schema.Indexes)\n\tmu := newMetaUpdate(m)\n\tmu.putSchema(&schema)\n\treturn mu.freeze()\n}", "func UInt64Idx(list []uint64, indices []int, element uint64) (int, bool) {\n\tleft := 0\n\tright := len(indices) - 1\n\tfor left <= right {\n\t\tmiddle := (left + right) / 2\n\t\tvalueIndex := indices[middle]\n\t\tvalue := list[valueIndex]\n\t\tif element > value {\n\t\t\tleft = middle + 1\n\t\t} else if element < value {\n\t\t\tright = middle - 1\n\t\t} else {\n\t\t\treturn middle, true\n\t\t}\n\t}\n\treturn left, false\n}", "func IndexMask(mask uint32) {\n C.glowIndexMask(gpIndexMask, (C.GLuint)(mask))\n}", "func (t Table) Indices() []*Index {\n\treturn t.indices\n}", "func (l *Log) Indexes() (uint64, uint64, error) {\n\tfi, err := l.FirstIndex()\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"failed to get first index: %s\", err)\n\t}\n\tli, err := l.LastIndex()\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"failed to get last index: %s\", err)\n\t}\n\treturn fi, li, nil\n}", "func UIntIdx(list []uint, indices []int, element uint) (int, bool) {\n\tleft := 0\n\tright := len(indices) - 1\n\tfor left <= right {\n\t\tmiddle := (left + right) / 2\n\t\tvalueIndex := indices[middle]\n\t\tvalue := list[valueIndex]\n\t\tif element > value {\n\t\t\tleft = middle + 1\n\t\t} else if element < value {\n\t\t\tright = middle - 1\n\t\t} else {\n\t\t\treturn middle, true\n\t\t}\n\t}\n\treturn left, false\n}", "func DB_IndexAccount(db gorm.DB) {\n\n\tcols := []string{\n\t\t\"acc_active\", \"company\", \"ticker\", \"acc_ref\",\n\t\t\"on_hold\"\t, \"is_client\", \"is_supplier\", \"online\"}\n\n\tfor _, c := range cols {\n\t\tdb.Model(&Account{}).AddIndex(\"idx_\" + c, c)\n\t}\n}", "func (m *Meta) TouchIndexes(table string) *Meta {\n\tschema := *m.GetRoSchema(table) // copy\n\tschema.Indexes = append(schema.Indexes[:0:0], schema.Indexes...) // copy\n\tmu := newMetaUpdate(m)\n\tmu.putSchema(&schema)\n\treturn mu.freeze()\n}", "func (t *tableCommon) WritableIndices() []table.Index {\n\ttrace_util_0.Count(_tables_00000, 36)\n\tif len(t.writableIndices) > 0 {\n\t\ttrace_util_0.Count(_tables_00000, 39)\n\t\treturn t.writableIndices\n\t}\n\ttrace_util_0.Count(_tables_00000, 37)\n\twritable := make([]table.Index, 0, len(t.indices))\n\tfor _, index := range t.indices {\n\t\ttrace_util_0.Count(_tables_00000, 40)\n\t\ts := index.Meta().State\n\t\tif s != model.StateDeleteOnly && s != model.StateDeleteReorganization {\n\t\t\ttrace_util_0.Count(_tables_00000, 41)\n\t\t\twritable = append(writable, index)\n\t\t}\n\t}\n\ttrace_util_0.Count(_tables_00000, 38)\n\treturn writable\n}", "func getIndices(b []byte) [16]int {\n\n\tif len(b) != 6 {\n\t\tpanic(\"invalid index array size\")\n\t}\n\n\tdata := binary.BigEndian.Uint64(append([]byte{0, 0}, b...))\n\n\tix := [16]int{}\n\tfor i := 0; i < 16; i++ {\n\t\t//Bit shift data right by i*3 and & with 0x0111 to get index\n\t\tix[i] = int((data >> uint(i*3)) & 7)\n\t}\n\treturn ix\n}", "func SaveIndexesOfUsers(path string, indexes *IndexesOfUsers) error {\n\tbyteVal, err := json.Marshal(indexes)\n\tif err != nil {\n\t\treturn nil\n\t}\n\terr = ioutil.WriteFile(path, byteVal, 0644)\n\treturn err\n}", "func (db *MongoDbBridge) updateDatabaseIndexes() {\n\t// define index list loaders\n\tvar ixLoaders = map[string]indexListProvider{\n\t\tcolNetworkNodes: operaNodeCollectionIndexes,\n\t\tcolLockedDelegations: lockedDelegationsIndexes,\n\t}\n\n\t// the DB bridge needs a way to terminate this thread\n\tsig := make(chan bool, 1)\n\tdb.sig = append(db.sig, sig)\n\n\t// prep queue and start the updater\n\tiq := make(chan *IndexList, indexListQueueCapacity)\n\tdb.wg.Add(1)\n\tgo db.indexUpdater(iq, sig)\n\n\t// check indexes\n\tfor cn, ld := range ixLoaders {\n\t\tiq <- &IndexList{\n\t\t\tCollection: db.client.Database(db.dbName).Collection(cn),\n\t\t\tIndexes: ld(),\n\t\t}\n\t}\n\n\t// close the channel as no more updates will be sent\n\tclose(iq)\n}", "func (b *Bucket) createIndexes(ctx context.Context) error {\n\t// must use primary read pref mode to check if files coll empty\n\tcloned, err := b.filesColl.Clone(options.Collection().SetReadPreference(readpref.Primary()))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdocRes := cloned.FindOne(ctx, bson.D{}, options.FindOne().SetProjection(bson.D{{\"_id\", 1}}))\n\n\t_, err = docRes.DecodeBytes()\n\tif err != mongo.ErrNoDocuments {\n\t\t// nil, or error that occurred during the FindOne operation\n\t\treturn err\n\t}\n\n\tfilesIv := b.filesColl.Indexes()\n\tchunksIv := b.chunksColl.Indexes()\n\n\tfilesModel := mongo.IndexModel{\n\t\tKeys: bson.D{\n\t\t\t{\"filename\", int32(1)},\n\t\t\t{\"uploadDate\", int32(1)},\n\t\t},\n\t}\n\n\tchunksModel := mongo.IndexModel{\n\t\tKeys: bson.D{\n\t\t\t{\"files_id\", int32(1)},\n\t\t\t{\"n\", int32(1)},\n\t\t},\n\t\tOptions: options.Index().SetUnique(true),\n\t}\n\n\tif err = createNumericalIndexIfNotExists(ctx, filesIv, filesModel); err != nil {\n\t\treturn err\n\t}\n\treturn createNumericalIndexIfNotExists(ctx, chunksIv, chunksModel)\n}", "func indexMySQLUserPassword(rawObj client.Object) []string {\n\tobj, ok := rawObj.(*mysql.User)\n\tif !ok {\n\t\treturn nil\n\t}\n\tif obj.Spec.LocalUser == nil {\n\t\treturn nil\n\t}\n\tif obj.Spec.LocalUser.Password == nil {\n\t\treturn nil\n\t}\n\treturn []string{obj.Spec.LocalUser.Password.Name}\n}", "func (c *Conn) ListIndexes(ctx context.Context, schema, table string) ([]*ddl.Index, error) {\n\tdb, err := catalog.New(ctx, c.conn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db.ListIndexes(ctx, schema, table)\n}", "func UIntIdxDesc(list []uint, indices []int, element uint) (int, bool) {\n\tleft := 0\n\tright := len(indices) - 1\n\tfor left <= right {\n\t\tmiddle := (left + right) / 2\n\t\tvalueIndex := indices[middle]\n\t\tvalue := list[valueIndex]\n\t\tif element < value {\n\t\t\tleft = middle + 1\n\t\t} else if element > value {\n\t\t\tright = middle - 1\n\t\t} else {\n\t\t\treturn middle, true\n\t\t}\n\t}\n\treturn left, false\n}", "func (g *Graph) addIndexes(schema *load.Schema) {\n\ttyp, _ := g.typ(schema.Name)\n\tfor _, idx := range schema.Indexes {\n\t\tcheck(typ.AddIndex(idx), \"invalid index for schema %q\", schema.Name)\n\t}\n}", "func Indices(itr Iterator) chan int {\n\tch := make(chan int)\n\tgo func() {\n\t\tid := 0\n\t\tfor {\n\t\t\tw, n := itr.Next()\n\t\t\tif n == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tif w&(1<<uint(i)) != 0 {\n\t\t\t\t\tch <- id + i\n\t\t\t\t}\n\t\t\t}\n\t\t\tid += bitLength - 1\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func (a *APIGen) Indices(ctx context.Context, filter string) ([]Index, error) {\n\tpanic(\"Should Not Be Called from Gen Pattern.\")\n}", "func IndexValues(mibStore smi.Store, pdu *gosnmp.SnmpPDU) []string {\n\to := mibStore.GetObject(pdu.Name)\n\tif o == nil {\n\t\treturn nil\n\t}\n\treturn indexValues(pdu, o)\n}", "func (e *Element) updateIndexes(old, new int) {\n\tif old > new {\n\t\told, new = new, old\n\t}\n\tnew++\n\n\ts := e.children.Slice()\n\tfor i := old; i < new; i++ {\n\t\ts[i].Value.index = i\n\t}\n}", "func GetUniformIndices(program uint32, uniformCount int32, uniformNames **uint8, uniformIndices *uint32) {\n\tsyscall.Syscall6(gpGetUniformIndices, 4, uintptr(program), uintptr(uniformCount), uintptr(unsafe.Pointer(uniformNames)), uintptr(unsafe.Pointer(uniformIndices)), 0, 0)\n}", "func CreateAllIndexes() error {\n\terr := questionAnswerDAO.CreateIndexes()\n\n\treturn err\n}", "func (dp *Dumper) getIndices() ([]indexSchema, error) {\n\tquery := \"\" +\n\t\t\"SELECT schemaname, tablename, indexname, indexdef \" +\n\t\t\"FROM pg_indexes WHERE schemaname NOT IN ('pg_catalog', 'information_schema');\"\n\n\tvar indices []indexSchema\n\trows, err := dp.conn.DB.Query(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar idx indexSchema\n\t\tif err := rows.Scan(&idx.schemaName, &idx.tableName, &idx.name, &idx.statement); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tidx.schemaName, idx.tableName, idx.name = quoteIdentifier(idx.schemaName), quoteIdentifier(idx.tableName), quoteIdentifier(idx.name)\n\t\tindices = append(indices, idx)\n\t}\n\n\treturn indices, nil\n}", "func (o *EventsScalarQuery) HasIndexes() bool {\n\treturn o != nil && o.Indexes != nil\n}", "func indexTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"index\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"Key\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func indexes(t reflect.Type) map[string][]int {\n\tfields := make(map[string][]int)\n\n\tif err := verifyStruct(t); err != nil {\n\t\treturn fields\n\t}\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\ttag := f.Tag\n\t\tcol := tag.Get(\"sql\")\n\n\t\tif col == \"\" {\n\t\t\tif f.Anonymous && f.Type.Kind() == reflect.Struct {\n\t\t\t\tfor jCol, js := range indexes(f.Type) {\n\t\t\t\t\tfields[jCol] = append([]int{i}, js...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tfields[col] = []int{i}\n\t}\n\n\treturn fields\n}", "func (c *Collection) saveIndexes() error {\n\tib, err := json.Marshal(c.indexes)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.db.datastore.Put(dsIndexes.ChildString(c.name), ib)\n}", "func IndexUsers(offset int, count int) ([]UserEntry, error) {\n requestUsers := make([]UserEntry, 0, 0)\n \n // Find the users\n rows, err := model.Database.Queryx(\"SELECT * FROM users ORDER BY userid ASC LIMIT ?, ?\", offset, count)\n if err != nil {\n return nil, err\n }\n \n // Create the GameRow structs and then return it\n for rows.Next() {\n newObj := UserEntry{}\n err = rows.StructScan(&newObj)\n if err != nil {\n continue\n }\n requestUsers = append(requestUsers, newObj)\n }\n \n return requestUsers, nil\n}", "func (d *Dao) IndexCacheByTime(c context.Context, mid int64, start int64) (aids []int64, err error) {\n\tconn := d.redis.Get(c)\n\tdefer conn.Close()\n\tvalues, err := redis.Values(conn.Do(\"ZRANGEBYSCORE\", keyIndex(mid), start, \"INF\", \"WITHSCORES\"))\n\tif err != nil {\n\t\tlog.Error(\"conn.Do(ZRANGEBYSCORE %v) error(%v)\", keyIndex(mid), err)\n\t\treturn\n\t}\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\tvar aid, unix int64\n\tfor len(values) > 0 {\n\t\tif values, err = redis.Scan(values, &aid, &unix); err != nil {\n\t\t\tlog.Error(\"redis.Scan(%v) error(%v)\", values, err)\n\t\t\treturn\n\t\t}\n\t\taids = append(aids, aid)\n\t}\n\treturn\n}", "func GetIndexes(coll *mgo.Collection) ([]bson.D, error) {\n\tvar cmdResult struct {\n\t\tIndexes []bson.D\n\t}\n\n\terr := coll.Database.Run(bson.M{\"listIndexes\": coll.Name}, &cmdResult)\n\tswitch {\n\tcase err == nil:\n\t\treturn cmdResult.Indexes, nil\n\tcase IsNoCmd(err):\n\t\tlog.Logf(log.DebugLow, \"No support for listIndexes command, falling back to querying system.indexes\")\n\t\treturn getIndexesPre28(coll)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"error running `listIndexes`. Collection: `%v` Err: %v\", coll.FullName, err)\n\t}\n}", "func UInt64IdxDesc(list []uint64, indices []int, element uint64) (int, bool) {\n\tleft := 0\n\tright := len(indices) - 1\n\tfor left <= right {\n\t\tmiddle := (left + right) / 2\n\t\tvalueIndex := indices[middle]\n\t\tvalue := list[valueIndex]\n\t\tif element < value {\n\t\t\tleft = middle + 1\n\t\t} else if element > value {\n\t\t\tright = middle - 1\n\t\t} else {\n\t\t\treturn middle, true\n\t\t}\n\t}\n\treturn left, false\n}", "func TestBaseModel_EnsureIndexes(t *testing.T) {\n\tconfig := mongo.Configuration{\n\t\tURL: \"mongodb://localhost:27017/some-test-db\",\n\t\tUseSSL: false,\n\t\tSSLCert: []byte{},\n\t\tPingFrequency: 100,\n\t}\n\n\tdb, teardown := mongo.InitMongoFromConfig(config)\n\tdefer teardown()\n\n\t// initialize the collection..\n\tdb.C(\"some-collection\").Insert(&testDocument{})\n\n\tmodel := &BaseModel{\n\t\tDB: db,\n\t\tCollectionName: \"some-collection\",\n\t\tIndexes: []*mgo.Index{\n\t\t\t{\n\t\t\t\tUnique: true,\n\t\t\t\tName: \"test_1\",\n\t\t\t\tKey: []string{\"first_key\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tEnsureIndexes([]Model{model}, false)\n\n\tindexes, err := db.C(\"some-collection\").Indexes()\n\tassert.Nil(t, err)\n\tassert.Equal(t, []mgo.Index{\n\t\t{Key: []string{\"_id\"}, Name: \"_id_\"},\n\t\t{Key: []string{\"first_key\"}, Name: \"test_1\", Unique: true},\n\t}, indexes)\n}", "func GetIndexesFromDB(client *mongo.Client, dbName string) string {\n\tvar err error\n\tvar cur *mongo.Cursor\n\tvar icur *mongo.Cursor\n\tvar scur *mongo.Cursor\n\tvar buffer bytes.Buffer\n\tvar ctx = context.Background()\n\t// var pipeline = mongo.Pipeline{{{Key: \"$indexStats\", Value: bson.M{}}}}\n\tvar pipeline = MongoPipeline(`{\"$indexStats\": {}}`)\n\tif cur, err = client.Database(dbName).ListCollections(ctx, bson.M{}); err != nil {\n\t\treturn err.Error()\n\t}\n\tdefer cur.Close(ctx)\n\n\tfor cur.Next(ctx) {\n\t\tvar elem = bson.M{}\n\t\tif err = cur.Decode(&elem); err != nil {\n\t\t\tfmt.Println(\"0.1\", err)\n\t\t\tcontinue\n\t\t}\n\t\tcoll := fmt.Sprintf(\"%v\", elem[\"name\"])\n\t\tcollType := fmt.Sprintf(\"%v\", elem[\"type\"])\n\t\tif strings.Index(coll, \"system.\") == 0 || (elem[\"type\"] != nil && collType != \"collection\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tbuffer.WriteString(\"\\n\")\n\t\tbuffer.WriteString(dbName)\n\t\tbuffer.WriteString(\".\")\n\t\tbuffer.WriteString(coll)\n\t\tbuffer.WriteString(\":\\n\")\n\n\t\tif scur, err = client.Database(dbName).Collection(coll).Aggregate(ctx, pipeline); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tvar indexStats = []bson.M{}\n\t\tfor scur.Next(ctx) {\n\t\t\tvar result = bson.M{}\n\t\t\tif err = scur.Decode(&result); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tindexStats = append(indexStats, result)\n\t\t}\n\t\tscur.Close(ctx)\n\t\tindexView := client.Database(dbName).Collection(coll).Indexes()\n\t\tif icur, err = indexView.List(ctx); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer icur.Close(ctx)\n\t\tvar list []IndexStatsDoc\n\n\t\tfor icur.Next(ctx) {\n\t\t\tvar idx = bson.D{}\n\t\t\tif err = icur.Decode(&idx); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar keys bson.D\n\t\t\tvar indexName string\n\t\t\tfor _, v := range idx {\n\t\t\t\tif v.Key == \"name\" {\n\t\t\t\t\tindexName = v.Value.(string)\n\t\t\t\t} else if v.Key == \"key\" {\n\t\t\t\t\tkeys = v.Value.(bson.D)\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar strbuf bytes.Buffer\n\t\t\tfor n, value := range keys {\n\t\t\t\tif n == 0 {\n\t\t\t\t\tstrbuf.WriteString(\"{ \")\n\t\t\t\t}\n\t\t\t\tstrbuf.WriteString(value.Key + \": \" + fmt.Sprint(value.Value))\n\t\t\t\tif n == len(keys)-1 {\n\t\t\t\t\tstrbuf.WriteString(\" }\")\n\t\t\t\t} else {\n\t\t\t\t\tstrbuf.WriteString(\", \")\n\t\t\t\t}\n\t\t\t}\n\t\t\to := IndexStatsDoc{Key: strbuf.String()}\n\t\t\to.EffectiveKey = strings.Replace(o.Key[:len(o.Key)-2], \": -1\", \": 1\", -1)\n\t\t\to.Usage = []UsageDoc{}\n\t\t\tfor _, result := range indexStats {\n\t\t\t\tif result[\"name\"].(string) == indexName {\n\t\t\t\t\tdoc := result[\"accesses\"].(bson.M)\n\t\t\t\t\thost := result[\"host\"].(string)\n\t\t\t\t\tb, _ := bson.Marshal(doc)\n\t\t\t\t\tvar accesses UsageDoc\n\t\t\t\t\tbson.Unmarshal(b, &accesses)\n\t\t\t\t\taccesses.Hostname = host\n\t\t\t\t\to.Usage = append(o.Usage, accesses)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlist = append(list, o)\n\t\t}\n\t\ticur.Close(ctx)\n\t\tsort.Slice(list, func(i, j int) bool { return (list[i].EffectiveKey <= list[j].EffectiveKey) })\n\t\tfor i, o := range list {\n\t\t\tfont := \"\\x1b[0m \"\n\t\t\tif o.Key != \"{ _id: 1 }\" {\n\t\t\t\tif i < len(list)-1 && strings.Index(list[i+1].EffectiveKey, o.EffectiveKey) == 0 {\n\t\t\t\t\tfont = \"\\x1b[31;1mx \" // red\n\t\t\t\t} else {\n\t\t\t\t\tsum := 0\n\t\t\t\t\tfor _, u := range o.Usage {\n\t\t\t\t\t\tsum += u.Ops\n\t\t\t\t\t}\n\t\t\t\t\tif sum == 0 {\n\t\t\t\t\t\tfont = \"\\x1b[34;1m? \" // blue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuffer.WriteString(font + o.Key + \"\\x1b[0m\")\n\t\t\tfor _, u := range o.Usage {\n\t\t\t\tbuffer.Write([]byte(\"\\n\\thost: \" + u.Hostname + \", ops: \" + fmt.Sprintf(\"%v\", u.Ops) + \", since: \" + fmt.Sprintf(\"%v\", u.Since)))\n\t\t\t}\n\t\t\tbuffer.WriteString(\"\\n\")\n\t\t}\n\t}\n\treturn buffer.String()\n}", "func (p *ThriftHiveMetastoreClient) GetIndexes(ctx context.Context, db_name string, tbl_name string, max_indexes int16) (r []*Index, err error) {\n var _args114 ThriftHiveMetastoreGetIndexesArgs\n _args114.DbName = db_name\n _args114.TblName = tbl_name\n _args114.MaxIndexes = max_indexes\n var _result115 ThriftHiveMetastoreGetIndexesResult\n if err = p.Client_().Call(ctx, \"get_indexes\", &_args114, &_result115); err != nil {\n return\n }\n switch {\n case _result115.O1!= nil:\n return r, _result115.O1\n case _result115.O2!= nil:\n return r, _result115.O2\n }\n\n return _result115.GetSuccess(), nil\n}", "func (Project) Indexes() []ent.Index {\n\treturn []ent.Index{\n\t\tindex.Fields(\"name\").\n\t\t\tEdges(\"type\").\n\t\t\tUnique(),\n\t}\n}", "func (idx IndexType) Len() (count int) {\n\t(*idx.mutexPtr).RLock()\n\tcount = idx.bt.Len()\n\t(*idx.mutexPtr).RUnlock()\n\treturn\n}", "func (t *Transaction) checkIndexes() error {\n\t// check for index conflicts.\n\ttables := t.Cache.Tables()\n\tfor _, table := range tables {\n\t\ttc := t.Cache.Table(table)\n\t\tfor _, row := range tc.RowsShallow() {\n\t\t\terr := tc.IndexExists(row)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = t.Database.CheckIndexes(t.DbName, table, row)\n\t\t\terrIndexExists, isErrIndexExists := err.(*cache.ErrIndexExists)\n\t\t\tif err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !isErrIndexExists {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor _, existing := range errIndexExists.Existing {\n\t\t\t\tif _, isDeleted := t.DeletedRows[existing]; isDeleted {\n\t\t\t\t\t// this model is deleted in the transaction, ignore it\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif tc.HasRow(existing) {\n\t\t\t\t\t// this model is updated in the transaction and was not\n\t\t\t\t\t// detected as a duplicate, so an index must have been\n\t\t\t\t\t// updated, ignore it\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func CreateChallengeTableIndicesQuery(tableName string) string {\n\tqueryString := fmt.Sprintf(`\n\t\tCREATE INDEX IF NOT EXISTS challenge_addr_idx ON %s (listing_address);\n\t`, tableName)\n\treturn queryString\n}", "func init() {\n\tindexFields := []string{\"name\"}\n\tconfig.CreateHashIndexedCollection(CollectionName, indexFields)\n}" ]
[ "0.6117143", "0.5898386", "0.5898386", "0.58484846", "0.56631005", "0.5581296", "0.54612577", "0.54561615", "0.5394811", "0.5362643", "0.53544223", "0.5325579", "0.5317164", "0.5316965", "0.52906317", "0.5283771", "0.52551454", "0.5242685", "0.52318054", "0.52306277", "0.5219917", "0.5179926", "0.5161371", "0.5141281", "0.5103651", "0.50992584", "0.50731975", "0.5070569", "0.5053443", "0.5035648", "0.50287443", "0.5020654", "0.5019567", "0.5014211", "0.5009387", "0.49805734", "0.4935688", "0.49315983", "0.49295047", "0.49195057", "0.48734358", "0.48729923", "0.4867461", "0.4863407", "0.48456684", "0.4823166", "0.48184854", "0.47982147", "0.47924796", "0.47874635", "0.4777728", "0.47746253", "0.47698337", "0.4759684", "0.47593454", "0.47570026", "0.47330752", "0.47304305", "0.47293752", "0.4704591", "0.4701423", "0.46974614", "0.46942014", "0.46916327", "0.46906787", "0.4689752", "0.46844164", "0.4678099", "0.46780363", "0.46728697", "0.467137", "0.4652862", "0.46360728", "0.46335244", "0.46143633", "0.46021205", "0.46016908", "0.45992756", "0.4597633", "0.4590532", "0.45904812", "0.45829007", "0.45823595", "0.45779148", "0.45698702", "0.4567985", "0.4567622", "0.4564894", "0.45638043", "0.45398745", "0.4525903", "0.4518628", "0.45164332", "0.4510963", "0.45057663", "0.44883874", "0.4488024", "0.44754255", "0.4466653", "0.44665787" ]
0.7163844
0
WithRootTypeParser Can be used to output the root type of a protocol for better debugging
func WithRootTypeParser(rootTypeParser func(utils.ReadBufferByteBased) (interface{}, error)) WithOption { return withRootTypeParser{rootTypeParser: rootTypeParser} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RootTypeSpec(s TypeSpec) TypeSpec {\n\tif t, ok := s.(*TypedefSpec); ok {\n\t\treturn t.root\n\t}\n\treturn s\n}", "func AuxRootType(ty fgg.Type) fg.TypeBase {\n\tswitch ty_ := ty.(type) {\n\tcase fgg.TNamed:\n\t\treturn fg.Type(AuxBracket(ty_.TName))\n\t/*case fgg.TParam:\n\t\treturn fg.Type(\"Any\")\n\tcase fgg.ChannelType:\n\t\treturn fg.ChannelType{\n\t\t\tElementType: AuxRootType(ty_.ElementType),\n\t\t\tChType: ty_.ChType,\n\t\t}*/\n\tdefault:\n\t\tpanic(\"Only TNamed is used when channel is not implemented\")\n\t}\n}", "func (t *TOMLParser) Type() string {\n\treturn \"TOML\"\n}", "func (p *Parser) parseType(parserType *parser.Type) string {\n\tif parserType.ValueType != nil {\n\t\tswitch parserType.Name {\n\t\tcase \"list\":\n\t\t\treturn fmt.Sprintf(\"[]%s\", p.parseType(parserType.ValueType))\n\t\tcase \"map\":\n\t\t\treturn fmt.Sprintf(\"map[%s]%s\",\n\t\t\t\tp.parseType(parserType.KeyType),\n\t\t\t\tp.parseType(parserType.ValueType))\n\t\tcase \"set\":\n\t\t\treturn fmt.Sprintf(\"map[%s]bool\", p.parseType(parserType.ValueType))\n\t\tdefault:\n\t\t\tpanic(\"unknown type name\")\n\t\t}\n\t} else {\n\t\tname := p.parseName(parserType.Name)\n\t\treturn name\n\t}\n}", "func (cg *GenConfig) fakeRootTypeName(pathStruct bool) string {\n\tgoStructFakeRootName := yang.CamelCase(cg.FakeRootName)\n\tswitch {\n\tcase pathStruct:\n\t\treturn goStructFakeRootName + pathStructSuffix\n\tcase cg.GoImports.SchemaStructPkgPath != \"\":\n\t\treturn schemaStructPkgAlias + \".\" + goStructFakeRootName\n\tdefault:\n\t\treturn goStructFakeRootName\n\t}\n}", "func (o AnalyzerOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Analyzer) pulumi.StringPtrOutput { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (p *printer) tipe(t tipe.Type) {\n\tswitch t := t.(type) {\n\tcase tipe.Basic:\n\t\tp.print(string(t))\n\tcase *tipe.Struct:\n\t\tif len(t.Fields) == 0 {\n\t\t\tp.print(\"struct{}\")\n\t\t\treturn\n\t\t}\n\t\tp.print(\"struct {\")\n\t\tp.indent++\n\t\tmaxlen := 0\n\t\tfor _, sf := range t.Fields {\n\t\t\tif len(sf.Name) > maxlen {\n\t\t\t\tmaxlen = len(sf.Name)\n\t\t\t}\n\t\t}\n\t\tfor _, sf := range t.Fields {\n\t\t\tp.newline()\n\t\t\tname := sf.Name\n\t\t\tif name == \"\" {\n\t\t\t\tname = \"*ERROR*No*Name*\"\n\t\t\t}\n\t\t\tif !sf.Embedded {\n\t\t\t\tp.print(name)\n\t\t\t\tfor i := len(name); i <= maxlen; i++ {\n\t\t\t\t\tp.print(\" \")\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.tipe(sf.Type)\n\t\t\tif sf.Tag != \"\" {\n\t\t\t\tp.printf(\" %q\", sf.Tag)\n\t\t\t}\n\t\t}\n\t\tp.indent--\n\t\tp.newline()\n\t\tp.print(\"}\")\n\tcase *tipe.Named:\n\t\tif t.PkgPath != \"\" && t.PkgPath != p.pkg.Path {\n\t\t\tpkg := p.c.Pkg(t.PkgPath)\n\t\t\tp.print(p.imports[pkg.Type])\n\t\t\tp.print(\".\")\n\t\t\tp.print(t.Name)\n\t\t} else if p.underlying && t.Name != \"error\" {\n\t\t\tif t == p.typeCur {\n\t\t\t\tp.print(t.Name)\n\t\t\t} else if pkgpath := p.typePlugins[t]; pkgpath != \"\" {\n\t\t\t\t// This type has methods and previously\n\t\t\t\t// declared in a plugin. Use it.\n\t\t\t\tp.typePluginsUsed[t] = true\n\t\t\t\tp.print(path.Base(pkgpath))\n\t\t\t\tp.print(\".\")\n\t\t\t\tp.print(t.Name)\n\t\t\t} else {\n\t\t\t\tp.tipe(tipe.Underlying(t))\n\t\t\t}\n\t\t} else {\n\t\t\tp.print(t.Name)\n\t\t}\n\tcase *tipe.Pointer:\n\t\tp.print(\"*\")\n\t\tp.tipe(t.Elem)\n\tcase *tipe.Unresolved:\n\t\tif t.Package != \"\" {\n\t\t\tp.print(t.Package)\n\t\t\tp.print(\".\")\n\t\t}\n\t\tp.print(t.Name)\n\tcase *tipe.Array:\n\t\t// Do not print the ellipsis as we may be printing\n\t\t// a variable declaration for a global without the\n\t\t// initializer.\n\t\tp.printf(\"[%d]\", t.Len)\n\t\tp.tipe(t.Elem)\n\tcase *tipe.Slice:\n\t\tp.print(\"[]\")\n\t\tp.tipe(t.Elem)\n\tcase *tipe.Interface:\n\t\tif len(t.Methods) == 0 {\n\t\t\tp.print(\"interface{}\")\n\t\t\treturn\n\t\t}\n\t\tp.print(\"interface {\")\n\t\tp.indent++\n\t\tnames := make([]string, 0, len(t.Methods))\n\t\tfor name := range t.Methods {\n\t\t\tnames = append(names, name)\n\t\t}\n\t\tsort.Strings(names)\n\t\tfor _, name := range names {\n\t\t\tp.newline()\n\t\t\tp.print(name)\n\t\t\tp.tipeFuncSig(t.Methods[name])\n\t\t}\n\t\tp.indent--\n\t\tp.newline()\n\t\tp.print(\"}\")\n\tcase *tipe.Map:\n\t\tp.print(\"map[\")\n\t\tp.tipe(t.Key)\n\t\tp.print(\"]\")\n\t\tp.tipe(t.Value)\n\tcase *tipe.Chan:\n\t\tif t.Direction == tipe.ChanRecv {\n\t\t\tp.print(\"<-\")\n\t\t}\n\t\tp.print(\"chan\")\n\t\tif t.Direction == tipe.ChanSend {\n\t\t\tp.print(\"<-\")\n\t\t}\n\t\tp.print(\" \")\n\t\tp.tipe(t.Elem)\n\tcase *tipe.Func:\n\t\tp.print(\"func\")\n\t\tp.tipeFuncSig(t)\n\tcase *tipe.Alias:\n\t\tp.print(t.Name)\n\tcase *tipe.Tuple:\n\t\tp.print(\"(\")\n\t\tfor i, elt := range t.Elems {\n\t\t\tif i > 0 {\n\t\t\t\tp.print(\", \")\n\t\t\t}\n\t\t\tp.tipe(elt)\n\t\t}\n\t\tp.print(\")\")\n\tcase *tipe.Ellipsis:\n\t\tp.print(\"...\")\n\t\tp.tipe(t.Elem)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type: %T\", t))\n\t}\n}", "func (o MfaPingidOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *MfaPingid) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func (o SourceCodeTokenOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SourceCodeToken) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func (o JsonSerializationOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v JsonSerialization) string { return v.Type }).(pulumi.StringOutput)\n}", "func (t *Operator) Root() Type {\n\treturn t\n}", "func (s *BasePlSqlParserListener) EnterType_procedure_spec(ctx *Type_procedure_specContext) {}", "func (NamespaceNode) Type() string { return TypeNamespaceNode }", "func (s *BasePlSqlParserListener) EnterType_spec(ctx *Type_specContext) {}", "func (o LinkOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Link) pulumi.StringPtrOutput { return v.Type }).(pulumi.StringPtrOutput)\n}", "func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node {\n\tn.X = walkExpr(n.X, init)\n\t// Set up interface type addresses for back end.\n\tif !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {\n\t\tn.Itab = reflectdata.ITabAddr(n.Type(), n.X.Type())\n\t}\n\treturn n\n}", "func (o MixinOutput) Root() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Mixin) *string { return v.Root }).(pulumi.StringPtrOutput)\n}", "func (o SchemaOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Schema) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func (o JsonSerializationResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v JsonSerializationResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func packType(v interface{}) byte {\n\tswitch v.(type) {\n\tcase nil:\n\t\treturn ptNone\n\tcase string:\n\t\treturn ptString\n\tcase int32:\n\t\treturn ptInt\n\tcase float32:\n\t\treturn ptFloat\n\tcase uint32:\n\t\treturn ptPtr\n\tcase []uint16:\n\t\treturn ptWString\n\tcase color.NRGBA:\n\t\treturn ptColor\n\tcase uint64:\n\t\treturn ptUint64\n\tdefault:\n\t\tpanic(\"invalid vdf.Node\")\n\t}\n}", "func (o BackendAddressPoolTunnelInterfaceOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendAddressPoolTunnelInterface) string { return v.Type }).(pulumi.StringOutput)\n}", "func (o RuleTargetOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RuleTarget) string { return v.Type }).(pulumi.StringOutput)\n}", "func (o DatabaseDumpResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DatabaseDumpResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (g *GDB) getType(symbol string) string {\n\tg.stdin <- \"ptype \" + symbol\n\ttypeInfo := g.stdoutLines()\n\tif len(typeInfo) == 0 {\n\t\treturn \"unknown\"\n\t}\n\tti := typeInfo[0]\n\tti = strings.Replace(ti, \"type = struct \", \"\", 1)\n\tti = strings.Replace(ti, \" {\", \"\", 1)\n\treturn ti\n}", "func (o AnalyzerIdentityOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AnalyzerIdentity) string { return v.Type }).(pulumi.StringOutput)\n}", "func main() {\n\tdict := typedict.New([]interface{}{\n\t\t(*http.Request)(nil),\n\t})\n\n\tptn := regexp.MustCompile(`\\Anet/`)\n\n\t// TypeDict.Structs returns a silce of reflect.Type of struct\n\tstructs := typedict.SerializableTypes(dict.Structs(func(t reflect.Type) bool {\n\t\treturn ptn.MatchString(t.PkgPath())\n\t}))\n\ttypedict.WriteJson(os.Stdout, structs)\n}", "func (o GetTrafficPolicyDocumentRuleOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GetTrafficPolicyDocumentRule) *string { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (o ListenerOutput) Protocol() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Listener) *string { return v.Protocol }).(pulumi.StringPtrOutput)\n}", "func TestParser_GetKind(t *testing.T) {\n\tp := &Parser{}\n\trequire.Equal(t, model.KindJSON, p.GetKind())\n}", "func (o NetworkProfileOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *NetworkProfile) pulumi.StringPtrOutput { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (o OceanLaunchSpecOutput) RootVolumeType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *OceanLaunchSpec) pulumi.StringOutput { return v.RootVolumeType }).(pulumi.StringOutput)\n}", "func (o AvroSerializationOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AvroSerialization) string { return v.Type }).(pulumi.StringOutput)\n}", "func (o BackendRuleOutput) Protocol() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BackendRule) *string { return v.Protocol }).(pulumi.StringPtrOutput)\n}", "func (p *protocolACKN) Type() string {\n\treturn \"ACKN\"\n}", "func (Output) typ() string { return \"output1\" }", "func NodesFromTypedef(pkg *packages.Package, f *ast.File, typed *ast.GenDecl) ([]models.EncodedNode, []string, []string) {\n\tpf := pkg.Fset.File(f.Pos())\n\n\tkind := KindTypename\n\tnodes := []models.EncodedNode{}\n\tstructs := []string{}\n\tifaces := []string{}\n\n\tfor _, spec := range typed.Specs {\n\t\ttspec, ok := spec.(*ast.TypeSpec)\n\t\tif !ok {\n\t\t\tpanic(fmt.Errorf(\"Unknown type for processing types: %#v\", spec))\n\t\t}\n\t\tdoc := \"\"\n\t\tif tspec.Comment != nil {\n\t\t\tdoc = tspec.Comment.Text()\n\t\t}\n\t\tpublic := true\n\t\tname := tspec.Name.Name\n\t\tif 'a' <= name[0] && name[0] <= 'z' {\n\t\t\tpublic = false\n\t\t}\n\n\t\tuid := fmt.Sprintf(\"%s.%s\", pkg.PkgPath, name)\n\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\tComponent: models.Component{\n\t\t\t\tUID: uid,\n\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s\", pkg.Name, name),\n\t\t\t\tDescription: doc,\n\t\t\t\tKind: kind,\n\t\t\t\t// HACK one line for definition and one for closing curly brace\n\t\t\t\tLocation: pos2loc(pf.Name(), tspec.Name.NamePos - token.Pos(pf.Base()), uint(pf.Base()), spec, uint(2)),\n\t\t\t},\n\t\t\tPublic: public,\n\t\t})\n\t\tswitch typeTyped := tspec.Type.(type) {\n\t\tcase *ast.StructType:\n\t\t\tstructs = append(structs, uid)\n\t\t\tfor _, field := range typeTyped.Fields.List {\n\t\t\t\tfieldDoc := \"\"\n\t\t\t\tif field.Comment != nil {\n\t\t\t\t\tfieldDoc = field.Comment.Text()\n\t\t\t\t}\n\t\t\t\tfor _, fieldName := range field.Names {\n\t\t\t\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\t\t\t\tComponent: models.Component{\n\t\t\t\t\t\t\tUID: fmt.Sprintf(\"%s.%s.%s\", pkg.PkgPath, name, fieldName.Name),\n\t\t\t\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s.%s\", pkg.Name, name, fieldName.Name),\n\t\t\t\t\t\t\tDescription: fieldDoc,\n\t\t\t\t\t\t\tKind: KindField,\n\t\t\t\t\t\t\t// NOTE for multiple fields on the same line this is ambiguous\n\t\t\t\t\t\t\tLocation: pos2loc(pf.Name(), fieldName.NamePos - token.Pos(pf.Base()), uint(pf.Base()), field, 1),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPublic: public,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\tcase *ast.InterfaceType:\n\t\t\tifaces = append(ifaces, uid)\n\t\t\tfor _, method := range typeTyped.Methods.List {\n\t\t\t\tmethodDoc := \"\"\n\t\t\t\tif method.Comment != nil {\n\t\t\t\t\tmethodDoc = method.Comment.Text()\n\t\t\t\t}\n\t\t\t\tfor _, methodName := range method.Names {\n\t\t\t\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\t\t\t\tComponent: models.Component{\n\t\t\t\t\t\t\tUID: fmt.Sprintf(\"%s.%s.%s\", pkg.PkgPath, name, methodName.Name),\n\t\t\t\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s.%s\", pkg.Name, name, methodName.Name),\n\t\t\t\t\t\t\tDescription: methodDoc,\n\t\t\t\t\t\t\tKind: KindMethod,\n\t\t\t\t\t\t\tLocation: pos2loc(pf.Name(), methodName.NamePos - token.Pos(pf.Base()), uint(pf.Base()), method, 1),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPublic: public,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nodes, structs, ifaces\n}", "func (o OceanLoadBalancerOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v OceanLoadBalancer) *string { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (cp *cparser1) gettypespec(n *sitter.Node) string {\n\ttystr := \"\"\n\tnc := int(n.ChildCount())\n\tfor i := 0; i < nc; i++ {\n\t\tnx := n.Child(i)\n\t\testr := cp.exprtxt(nx)\n\t\tswitch nx.Type() {\n\t\tcase tsStorageClassSpecifier:\n\t\tcase tsTypeQualifier:\n\t\tcase tsPrimitiveType:\n\t\t\ttystr += \" \" + estr\n\t\tcase tsTypeIdentifier:\n\t\t\ttystr += \" \" + estr\n\t\tcase tsSizedTypeSpecifier:\n\t\t\ttystr += \" \" + estr\n\t\tcase tsStructSpecifier:\n\t\t\ttystr += \" \" + estr\n\t\tcase tsUnionSpecifier:\n\t\tcase tsEnumSpecifier:\n\t\t\ttystr += \" \" + estr\n\t\tcase tsPointerDeclarator:\n\t\t\ttystr += tsStar\n\t\t\ttystr += cp.gettypespec(nx)\n\t\t\ttt1 := \"\"\n\t\t\tif nx.NextSibling() != nil {\n\t\t\t\ttt1 = nx.NextSibling().Type()\n\t\t\t}\n\t\t\tlog.Println(nx.NextSibling() != nil, tt1)\n\t\tcase tsFunctionDeclarator: // 还有可能是 function pointer\n\t\t\tlog.Println(nx.Type(), cp.exprtxt(nx))\n\t\tcase tsIdentifier: // var name\n\t\tcase tsSemiColon:\n\t\tcase tsStar:\n\t\t\t// tystr += tsStar\n\t\tcase tsArrayDeclarator:\n\t\t\ttystr += \" \" + tsStar\n\t\tcase tsInitDeclarator:\n\t\t\ttystr += \" \" + cp.gettypespec(nx)\n\t\tcase \"=\":\n\t\tcase \"number_literal\":\n\t\tdefault:\n\t\t\tlog.Panicln(nx.Type(), \"|\", estr, \"|\")\n\t\t}\n\t\t// log.Println(n.Type(), i, nx.Type(), \"//\", len(txt), txt)\n\t}\n\treturn tystr\n}", "func (o AvroSerializationResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AvroSerializationResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func ParseProtocol(r io.Reader) (*Protocol, error) {\n\tconst errPrefix = \"invalid Avro protocol:\"\n\n\t// Parse the JSON-formatted Avro protocol\n\tdec := json.NewDecoder(r)\n\n\tvar rp rawProtocol\n\tif err := dec.Decode(&rp); err != nil {\n\t\treturn nil, errors.New(errPrefix + \" \" + err.Error())\n\t}\n\n\t// Perform basic validations. Additional validations will be performed\n\t// during codec construction below.\n\tif !isValidName(rp.Protocol) {\n\t\treturn nil, errors.New(errPrefix + \" missing or invalid protocol attribute\")\n\t}\n\n\tif len(rp.Messages) == 0 {\n\t\treturn nil, errors.New(errPrefix + \" missing or empty messages object\")\n\t}\n\n\tnamedTypes := make(map[string]map[string]interface{}, len(rp.Types))\n\tfor i, typ := range rp.Types {\n\t\tname, ok := typ[\"name\"].(string)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"%s missing name attribute (type %d)\", errPrefix, i+1)\n\t\t}\n\n\t\tif _, ok = typ[\"type\"].(string); !ok {\n\t\t\treturn nil, fmt.Errorf(\"%s missing type attribute (type %d)\", errPrefix, i+1)\n\t\t}\n\n\t\tnamedTypes[name] = typ\n\t\tnamespace, ok := typ[\"namespace\"].(string)\n\t\tif ok {\n\t\t\tnamedTypes[namespace+\".\"+name] = typ\n\t\t}\n\t}\n\n\tfor name, rmsg := range rp.Messages {\n\t\tif !isValidName(name) {\n\t\t\treturn nil, errors.New(errPrefix + \" missing or invalid message name attribute\")\n\t\t}\n\n\t\tif rmsg.OneWay && (rmsg.Response != \"null\" || len(rmsg.Errors) > 0) {\n\t\t\treturn nil, fmt.Errorf(\"%s the one-way message parameter may only be true when the response type is \\\"null\\\" and no errors are listed (%s)\", errPrefix, name)\n\t\t}\n\n\t\tswitch rmsg.Stream {\n\t\tcase \"\":\n\t\tcase RequestStream, ResponseStream, BidirStream:\n\t\t\tif rmsg.OneWay {\n\t\t\t\treturn nil, fmt.Errorf(\"%s the stream parameter may not be specified for one-way messages (%s)\", errPrefix, name)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%s invalid stream parameter (%s)\", errPrefix, name)\n\t\t}\n\t}\n\n\t// Construct the protocol with message-specific codecs\n\tp := &Protocol{\n\t\tNamespace: rp.Namespace,\n\t\tProtocol: rp.Protocol,\n\t\tDoc: rp.Doc,\n\t\tMessages: make(map[string]*Message, len(rp.Messages)),\n\t}\n\n\tfor name, rmsg := range rp.Messages {\n\t\t// Dynamically generate the request schema\n\t\treqSchema, err := json.Marshal(map[string]interface{}{\n\t\t\t\"type\": \"record\",\n\t\t\t\"name\": name,\n\t\t\t\"doc\": rmsg.Doc,\n\t\t\t\"fields\": fixupRequestParams(rmsg.Request, namedTypes),\n\t\t})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\t// Dynamically generate the response schema\n\t\trespSchema, err := json.Marshal(fixupSchema(rmsg.Response, namedTypes))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\t// Dynamically generate the errors schema\n\t\tvar (\n\t\t\terrsSchema []byte\n\t\t\thasExplicitErrors bool\n\t\t)\n\t\tif rmsg.Errors != nil {\n\t\t\tif errsSchema, err = json.Marshal(fixupErrors(rmsg.Errors, namedTypes)); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\thasExplicitErrors = len(rmsg.Errors) != 0\n\t\t}\n\n\t\t// Construct the message and its codecs\n\t\tmsg := &Message{\n\t\t\tDoc: rmsg.Doc,\n\t\t\tRequestSchema: string(reqSchema),\n\t\t\tResponseSchema: string(respSchema),\n\t\t\tErrorsSchema: string(errsSchema),\n\t\t\tHasExplicitErrors: hasExplicitErrors,\n\t\t\tOneWay: rmsg.OneWay,\n\t\t\tStream: rmsg.Stream,\n\t\t}\n\n\t\tif msg.RequestCodec, err = goavro.NewCodec(msg.RequestSchema); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%s invalid \\\"%s\\\" message request: %s\", errPrefix, name, err.Error())\n\t\t}\n\n\t\tif msg.ResponseCodec, err = goavro.NewCodec(msg.ResponseSchema); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%s invalid \\\"%s\\\" message response: %s\", errPrefix, name, err.Error())\n\t\t}\n\n\t\tif errsSchema != nil {\n\t\t\tif msg.ErrorsCodec, err = goavro.NewCodec(msg.ErrorsSchema); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"%s invalid \\\"%s\\\" message errors: %s\", errPrefix, name, err.Error())\n\t\t\t}\n\t\t}\n\n\t\tp.Messages[name] = msg\n\t}\n\n\treturn p, nil\n}", "func (o GatewayOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Gateway) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func (o MixinResponseOutput) Root() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MixinResponse) string { return v.Root }).(pulumi.StringOutput)\n}", "func PrintType(typ types.Type) string {\n\treturn types.TypeString(typ, func(i *types.Package) string {\n\t\treturn i.Name()\n\t})\n}", "func (p *printcb) tagProtocol() {}", "func (o ApplicationSpecRolloutplanRolloutwebhooksOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationSpecRolloutplanRolloutwebhooks) string { return v.Type }).(pulumi.StringOutput)\n}", "func (NamespaceStar) Type() string { return TypeNamespaceStar }", "func extractType(t *doc.Type, fset *token.FileSet) string {\n\tif t == nil || t.Decl == nil || len(t.Decl.Specs) == 0 {\n\t\treturn \"\"\n\t}\n\n\tts := t.Decl.Specs[0].(*ast.TypeSpec)\n\tswitch ts.Type.(type) {\n\tcase *ast.StructType:\n\t\treturn \"struct\"\n\tcase *ast.InterfaceType:\n\t\treturn \"interface\"\n\tdefault:\n\t\treturn extractSource(ts.Type, fset)\n\t}\n}", "func (o AnalyzerIdentityPtrOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AnalyzerIdentity) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Type\n\t}).(pulumi.StringPtrOutput)\n}", "func (o FlowOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Flow) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func (o OceanFiltersOutput) RootDeviceTypes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v OceanFilters) []string { return v.RootDeviceTypes }).(pulumi.StringArrayOutput)\n}", "func printType(anything interface{}) {\n\tfmt.Printf(\"Type: %T Value: %v\\n\", anything, anything)\n}", "func (o OceanFiltersPtrOutput) RootDeviceTypes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *OceanFilters) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.RootDeviceTypes\n\t}).(pulumi.StringArrayOutput)\n}", "func (o CustomClrSerializationOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CustomClrSerialization) string { return v.Type }).(pulumi.StringOutput)\n}", "func (p *Pprint) pprint(data interface{}) {\n\t// temp := reflect.TypeOf(data).Kind()\n\t// p.test[\"array\"] = reflect.Map\n\t// fmt.Println(p.test)\n\t// if reflect.Array == temp {\n\t// \tfmt.Println(\"scussu\")\n\t// }\n\t// p.test[temp.String()] = 0\n\tp.Typemode = reflect.TypeOf(data).Kind()\n\tif p.Typemode == reflect.Map {\n\t\tp.PrintMaps(data)\n\t} else if p.Typemode == reflect.Slice {\n\t\tp.PrintArrays(data)\n\t} else if p.Typemode == reflect.Array {\n\t\tp.PrintArrays(data)\n\t} else {\n\t\tfmt.Println(data)\n\t}\n}", "func (obj *language) Root() string {\n\treturn obj.root\n}", "func (p *Parser) parseTypeName() ast.Expr {\n\tif p.trace {\n\t\tdefer un(trace(p, \"TypeName\"))\n\t}\n\n\tident := p.parseIdent()\n\t// don't resolve ident yet - it may be a parameter or field name\n\n\t/*if p.tok == token.PERIOD {\n\t\t// ident is a package name\n\t\tp.next()\n\t\tp.resolve(ident)\n\t\tsel := p.parseIdent()\n\t\treturn &ast.SelectorExpr{X: ident, Sel: sel}\n\t}*/\n\n\treturn ident\n}", "func (o PriorityLevelConfigurationPatchOutput) Kind() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *PriorityLevelConfigurationPatch) pulumi.StringPtrOutput { return v.Kind }).(pulumi.StringPtrOutput)\n}", "func dumptype(t *_type) {\n\tif t == nil {\n\t\treturn\n\t}\n\n\t// If we've definitely serialized the type before,\n\t// no need to do it again.\n\tb := &typecache[t.hash&(typeCacheBuckets-1)]\n\tif t == b.t[0] {\n\t\treturn\n\t}\n\tfor i := 1; i < typeCacheAssoc; i++ {\n\t\tif t == b.t[i] {\n\t\t\t// Move-to-front\n\t\t\tfor j := i; j > 0; j-- {\n\t\t\t\tb.t[j] = b.t[j-1]\n\t\t\t}\n\t\t\tb.t[0] = t\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Might not have been dumped yet. Dump it and\n\t// remember we did so.\n\tfor j := typeCacheAssoc - 1; j > 0; j-- {\n\t\tb.t[j] = b.t[j-1]\n\t}\n\tb.t[0] = t\n\n\t// dump the type\n\tdumpint(tagType)\n\tdumpint(uint64(uintptr(unsafe.Pointer(t))))\n\tdumpint(uint64(t.size))\n\tif x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == \"\" {\n\t\tdumpstr(t.string())\n\t} else {\n\t\tpkgpathstr := t.nameOff(x.pkgpath).name()\n\t\tpkgpath := stringStructOf(&pkgpathstr)\n\t\tnamestr := t.name()\n\t\tname := stringStructOf(&namestr)\n\t\tdumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))\n\t\tdwrite(pkgpath.str, uintptr(pkgpath.len))\n\t\tdwritebyte('.')\n\t\tdwrite(name.str, uintptr(name.len))\n\t}\n\tdumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)\n}", "func (o IopingSpecVolumeVolumeSourceHostPathOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceHostPath) *string { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (s *BaseGraffleParserListener) EnterGraph_type(ctx *Graph_typeContext) {}", "func (o CustomClrSerializationResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CustomClrSerializationResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceHostPathOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceHostPath) *string { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (o ParquetSerializationResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ParquetSerializationResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func GenStructFromAllOfTypes(allOf []TypeDefinition) string {\n\t// Start out with struct {\n\tobjectParts := []string{\"struct {\"}\n\tfor _, td := range allOf {\n\t\tref := td.Schema.RefType\n\t\tif ref != \"\" {\n\t\t\t// We have a referenced type, we will generate an inlined struct\n\t\t\t// member.\n\t\t\t// struct {\n\t\t\t// InlinedMember\n\t\t\t// ...\n\t\t\t// }\n\t\t\tobjectParts = append(objectParts,\n\t\t\t\tfmt.Sprintf(\" // Embedded struct due to allOf(%s)\", ref))\n\t\t\tobjectParts = append(objectParts,\n\t\t\t\tfmt.Sprintf(\" %s `yaml:\\\",inline\\\"`\", ref))\n\t\t} else {\n\t\t\t// Inline all the fields from the schema into the output struct,\n\t\t\t// just like in the simple case of generating an object.\n\t\t\tobjectParts = append(objectParts, \" // Embedded fields due to inline allOf schema\")\n\t\t\tobjectParts = append(objectParts, GenFieldsFromProperties(td.Schema.Properties)...)\n\n\t\t\tif td.Schema.HasAdditionalProperties {\n\t\t\t\taddPropsType := td.Schema.AdditionalPropertiesType.GoType\n\t\t\t\tif td.Schema.AdditionalPropertiesType.RefType != \"\" {\n\t\t\t\t\taddPropsType = td.Schema.AdditionalPropertiesType.RefType\n\t\t\t\t}\n\n\t\t\t\tadditionalPropertiesPart := fmt.Sprintf(\"AdditionalProperties map[string]%s `json:\\\"-\\\"`\", addPropsType)\n\t\t\t\tif !StringInArray(additionalPropertiesPart, objectParts) {\n\t\t\t\t\tobjectParts = append(objectParts, additionalPropertiesPart)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tobjectParts = append(objectParts, \"}\")\n\treturn strings.Join(objectParts, \"\\n\")\n}", "func parseTopLevel(fset *token.FileSet, buf *Buffer) (*ast.File, error) {\n\tsrc := []byte(\"package p\\n\" + buf.String())\n\treturn parser.ParseFile(fset, \"<input>\", src, parser.DeclarationErrors|parser.ParseComments)\n}", "func (o BuildStrategySpecBuildStepsSecurityContextSeLinuxOptionsOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsSecurityContextSeLinuxOptions) *string { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (o AppGatewayOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AppGateway) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func (o LookupListenerResultOutput) Protocol() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupListenerResult) string { return v.Protocol }).(pulumi.StringOutput)\n}", "func (o SchemaPackageResponsePtrOutput) SchematizedParsingType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *SchemaPackageResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.SchematizedParsingType\n\t}).(pulumi.StringPtrOutput)\n}", "func (p *Parser) literalType() (*ast.LiteralType, error) {\n\tdefer un(trace(p, \"ParseLiteral\"))\n\n\treturn &ast.LiteralType{\n\t\tToken: p.tok,\n\t}, nil\n}", "func (p *parser) parse() {\n\tp.Root = p.program()\n\tp.expect(TokenEOF)\n\t//if err := t.Root.Check(); err != nil {\n\t//\tt.error(err)\n\t//}\n}", "func (o SecurityPolicyRuleRedirectOptionsResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SecurityPolicyRuleRedirectOptionsResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (p *simpleParser) parseTypeName(r reporter) (name *ast.TypeName) {\n\tname = &ast.TypeName{}\n\n\t// one or more name\n\tif next, ok := p.lookahead(r); ok && next.Type() == token.Literal {\n\t\tname.Name = append(name.Name, next)\n\t\tp.consumeToken()\n\t} else {\n\t\tr.unexpectedToken(token.Literal)\n\t}\n\tfor {\n\t\tif next, ok := p.lookahead(r); ok && next.Type() == token.Literal {\n\t\t\tname.Name = append(name.Name, next)\n\t\t\tp.consumeToken()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif next, ok := p.lookahead(r); ok && next.Type() == token.Delimiter && next.Value() == \"(\" {\n\t\tname.LeftParen = next\n\t\tp.consumeToken()\n\n\t\tname.SignedNumber1 = p.parseSignedNumber(r)\n\n\t} else {\n\t\treturn\n\t}\n\n\tif next, ok := p.lookahead(r); ok && next.Type() == token.Delimiter {\n\t\tswitch next.Value() {\n\t\tcase \",\":\n\t\t\tname.Comma = next\n\t\t\tp.consumeToken()\n\n\t\t\tname.SignedNumber2 = p.parseSignedNumber(r)\n\t\t\tnext, ok = p.lookahead(r)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif next.Type() != token.Delimiter {\n\t\t\t\tr.unexpectedToken(token.Delimiter)\n\t\t\t}\n\t\t\tfallthrough\n\t\tcase \")\":\n\t\t\tname.RightParen = next\n\t\t\tp.consumeToken()\n\t\t}\n\t} else {\n\t\treturn\n\t}\n\treturn\n}", "func (o FieldOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Field) *string { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (o RawOutputDatasourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RawOutputDatasourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (s *BaseConcertoListener) EnterTypeSpec(ctx *TypeSpecContext) {}", "func (o SchemaPackageResponseOutput) SchematizedParsingType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SchemaPackageResponse) string { return v.SchematizedParsingType }).(pulumi.StringOutput)\n}", "func typeRule(p *Parser) (types.TypeNode, error) {\n\ttpGuard := &types.TypeGuard{Types: make([]types.TypeNode, 0)}\n\tfor {\n\t\ttp, err := subTypeRule(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttpGuard.Types = append(tpGuard.Types, tp)\n\t\tif p.Peek().Token != tokens.LOGICAL || p.Peek().Value != \"|\" {\n\t\t\tbreak\n\t\t}\n\t\tp.Next()\n\t}\n\treturn tpGuard, nil\n}", "func WithRootSchema(oapi spec.OpenAPI) Option {\n\treturn func(r *Options) error {\n\t\tr.root = oapi\n\t\treturn nil\n\t}\n}", "func main() {\n\n\tswaggerSpec, err := loads.Embedded(restapi.SwaggerJSON, restapi.FlatSwaggerJSON)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tapi := operations.NewExternalTypesDemoAPI(swaggerSpec)\n\tserver := restapi.NewServer(api)\n\tdefer server.Shutdown()\n\n\tparser := flags.NewParser(server, flags.Default)\n\tparser.ShortDescription = \"external types imports: external anonymous types\"\n\tparser.LongDescription = \"This sample specification exercises external types, with both x-go-type in definitions and inlined.\\n\\nIt demonstrates how to use the x-go-type extension to plug external type definitions in the generated code,\\nfor models (e.g. for properties, arrays or maps) or operations.\\n\\nNotice that x-go-type works for schemas and is not supported for simple swagger types,\\nused for response headers and query & path parameters.\\n\"\n\tserver.ConfigureFlags()\n\tfor _, optsGroup := range api.CommandLineOptionsGroups {\n\t\t_, err := parser.AddGroup(optsGroup.ShortDescription, optsGroup.LongDescription, optsGroup.Options)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t}\n\n\tif _, err := parser.Parse(); err != nil {\n\t\tcode := 1\n\t\tif fe, ok := err.(*flags.Error); ok {\n\t\t\tif fe.Type == flags.ErrHelp {\n\t\t\t\tcode = 0\n\t\t\t}\n\t\t}\n\t\tos.Exit(code)\n\t}\n\n\tserver.ConfigureAPI()\n\n\tif err := server.Serve(); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n}", "func (s *BasecluListener) EnterType_spec(ctx *Type_specContext) {}", "func (o AddressGroupOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AddressGroup) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func (o ParquetSerializationOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ParquetSerialization) string { return v.Type }).(pulumi.StringOutput)\n}", "func (o LookupPipelineResultOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupPipelineResult) string { return v.Type }).(pulumi.StringOutput)\n}", "func protoToStructure(output interface{}, result *proto.TorrentResponse, err error) error {\n if err != nil {\n return err\n }\n err = json.Unmarshal(result.Result, &output)\n if err != nil {\n return errors.UnmarshalError.ToError(err)\n }\n return nil\n}", "func (o BuildSpecStrategyOutput) Kind() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecStrategy) *string { return v.Kind }).(pulumi.StringPtrOutput)\n}", "func (o NetworkInterfaceResponseOutput) Kind() pulumi.StringOutput {\n\treturn o.ApplyT(func(v NetworkInterfaceResponse) string { return v.Kind }).(pulumi.StringOutput)\n}", "func VisitTypes(root *rbxapijson.Root, visit func(rbxapi.Type)) {\n\tfor _, class := range root.GetClasses() {\n\t\tvisit(rbxapijson.Type{Category: \"Class\", Name: class.GetName()})\n\t\tvisit(rbxapijson.Type{Category: \"Class\", Name: class.GetSuperclass()})\n\t\tfor _, member := range class.GetMembers() {\n\t\t\tswitch member.GetMemberType() {\n\t\t\tcase \"Property\":\n\t\t\t\tmember := member.(rbxapi.Property)\n\t\t\t\tvisit(member.GetValueType())\n\t\t\tcase \"Function\":\n\t\t\t\tmember := member.(rbxapi.Function)\n\t\t\t\tfor _, param := range member.GetParameters().GetParameters() {\n\t\t\t\t\tvisit(param.GetType())\n\t\t\t\t}\n\t\t\t\tvisit(member.GetReturnType())\n\t\t\tcase \"Event\":\n\t\t\t\tmember := member.(rbxapi.Event)\n\t\t\t\tfor _, param := range member.GetParameters().GetParameters() {\n\t\t\t\t\tvisit(param.GetType())\n\t\t\t\t}\n\t\t\tcase \"Callback\":\n\t\t\t\tmember := member.(rbxapi.Callback)\n\t\t\t\tfor _, param := range member.GetParameters().GetParameters() {\n\t\t\t\t\tvisit(param.GetType())\n\t\t\t\t}\n\t\t\t\tvisit(member.GetReturnType())\n\t\t\t}\n\t\t}\n\t}\n\tfor _, enum := range root.GetEnums() {\n\t\tvisit(rbxapijson.Type{Category: \"Enum\", Name: enum.GetName()})\n\t}\n}", "func (o FieldResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FieldResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (o ApplicationOutput) PackageType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Application) pulumi.StringOutput { return v.PackageType }).(pulumi.StringOutput)\n}", "func (o BackendRuleResponseOutput) Protocol() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendRuleResponse) string { return v.Protocol }).(pulumi.StringOutput)\n}", "func (o KafkaConnectorOutput) PluginType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *KafkaConnector) pulumi.StringOutput { return v.PluginType }).(pulumi.StringOutput)\n}", "func (o TargetGroupOutput) Protocol() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TargetGroup) *string { return v.Protocol }).(pulumi.StringPtrOutput)\n}", "func (o EntryOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Entry) pulumi.StringPtrOutput { return v.Type }).(pulumi.StringPtrOutput)\n}", "func printTypedNames(w io.Writer, prefix string, ns []TypedEntry) {\n\tif len(ns) == 0 {\n\t\treturn\n\t}\n\ttprev := typeString(ns[0].Types)\n\tsep := prefix\n\tfor _, n := range ns {\n\t\ttcur := typeString(n.Types)\n\t\tif tcur != tprev {\n\t\t\tif tprev == \"\" {\n\t\t\t\t// Should be impossible.\n\t\t\t\tpanic(n.Location.String() + \": untyped declarations in the middle of a typed list\")\n\t\t\t}\n\t\t\tfmt.Fprintf(w, \" - %s\", tprev)\n\t\t\ttprev = tcur\n\t\t\tsep = prefix\n\t\t\tif sep == \"\" {\n\t\t\t\tsep = \" \"\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(w, \"%s%s\", sep, n.Str)\n\t\tsep = \" \"\n\t}\n\tif tprev != \"\" {\n\t\tfmt.Fprintf(w, \" - %s\", tprev)\n\t}\n}", "func RegisterProtocol(name string, v interface{}) {\n\troot.Protocols[name] = v\n}", "func (o IoTHubStreamInputDataSourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IoTHubStreamInputDataSourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func Root(w io.Writer) io.Writer {\n\tswitch x := w.(type) {\n\tcase tree:\n\t\treturn coalesceWriters(x.Root(), w)\n\tcase node:\n\t\treturn coalesceWriters(Root(x.Parent()), w)\n\tcase decorator:\n\t\treturn coalesceWriters(Root(x.Base()), w)\n\tdefault:\n\t\treturn w\n\t}\n}", "func (o UserOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *User) pulumi.StringPtrOutput { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (o MetricTargetPatchOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MetricTargetPatch) *string { return v.Type }).(pulumi.StringPtrOutput)\n}" ]
[ "0.5335888", "0.5227169", "0.5214134", "0.5140314", "0.50785226", "0.49905667", "0.49589553", "0.49425226", "0.48941708", "0.4878799", "0.48227474", "0.48148534", "0.48069713", "0.4792133", "0.47804672", "0.4775946", "0.47751683", "0.4744906", "0.47153187", "0.47070324", "0.4699059", "0.468579", "0.46549067", "0.4648813", "0.46433282", "0.46407408", "0.46239486", "0.4622659", "0.46215686", "0.46145478", "0.46058053", "0.4604814", "0.46007612", "0.4596337", "0.4594966", "0.4593273", "0.4584484", "0.45834008", "0.45809647", "0.45715964", "0.45698518", "0.4562125", "0.45449638", "0.45429596", "0.45330146", "0.45320687", "0.45302364", "0.45071432", "0.44913507", "0.44834295", "0.4483078", "0.4480314", "0.44752526", "0.4474185", "0.44729814", "0.4466569", "0.4463524", "0.44615403", "0.44592783", "0.44538027", "0.44534832", "0.44447833", "0.44435805", "0.44414526", "0.44374186", "0.4434401", "0.4431552", "0.44233337", "0.44218457", "0.44181803", "0.44179595", "0.44169822", "0.44122034", "0.44121873", "0.44066435", "0.44050795", "0.44029185", "0.44011593", "0.44000328", "0.43996024", "0.43984973", "0.43974954", "0.43952745", "0.43944192", "0.43924385", "0.43902615", "0.43853068", "0.43829665", "0.4381793", "0.43768302", "0.43757695", "0.43718165", "0.4370189", "0.43675047", "0.4366902", "0.4363687", "0.43634814", "0.43631858", "0.43604493", "0.4360181" ]
0.5815207
0
NewMockLoadBalance creates a new mock instance
func NewMockLoadBalance(ctrl *gomock.Controller) *MockLoadBalance { mock := &MockLoadBalance{ctrl: ctrl} mock.recorder = &MockLoadBalanceMockRecorder{mock} return mock }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewLoadBalance(name string) LoadBalance {\n\treturn LBS[name]()\n}", "func New(ringWeight int) LoadBalancer {\n\t// TODO: Implement this!\n\tnewLB := new(loadBalancer)\n\tnewLB.sortedNames = make([]MMENode, 0)\n\tnewLB.weight = ringWeight\n\tnewLB.hashRing = NewRing()\n\tif 7 == 2 {\n\t\tfmt.Println(ringWeight)\n\t}\n\treturn newLB\n}", "func NewMock(t *testing.T) *MockT { return &MockT{t: t} }", "func (_e *MockQueryCoord_Expecter) LoadBalance(ctx interface{}, req interface{}) *MockQueryCoord_LoadBalance_Call {\n\treturn &MockQueryCoord_LoadBalance_Call{Call: _e.mock.On(\"LoadBalance\", ctx, req)}\n}", "func New() (*mock, error) {\n\treturn &mock{\n\t\tConfigService: ConfigService{},\n\t\tContainerService: ContainerService{},\n\t\tDistributionService: DistributionService{},\n\t\tImageService: ImageService{},\n\t\tNetworkService: NetworkService{},\n\t\tNodeService: NodeService{},\n\t\tPluginService: PluginService{},\n\t\tSecretService: SecretService{},\n\t\tServiceService: ServiceService{},\n\t\tSystemService: SystemService{},\n\t\tSwarmService: SwarmService{},\n\t\tVolumeService: VolumeService{},\n\t\tVersion: Version,\n\t}, nil\n}", "func New() *Mock {\n\treturn &Mock{\n\t\tm: mockMap{},\n\t\toldTransport: http.DefaultTransport,\n\t}\n}", "func New(opts ...Option) staking.Contract {\n\tbs := &stakingContractMock{}\n\n\tfor _, o := range opts {\n\t\to(bs)\n\t}\n\n\treturn bs\n}", "func (env *env) newTestHelperCreateLgr(id string, t *testing.T) *testhelper {\n\tgenesisBlk, err := constructTestGenesisBlock(id)\n\tassert.NoError(t, err)\n\tlgr, err := env.ledgerMgr.CreateLedger(id, genesisBlk)\n\tassert.NoError(t, err)\n\tclient, committer, verifier := newClient(lgr, id, t), newCommitter(lgr, t), newVerifier(lgr, t)\n\treturn &testhelper{client, committer, verifier, lgr, id, assert.New(t)}\n}", "func newMockBlockHeaderStore() *mockBlockHeaderStore {\n\treturn &mockBlockHeaderStore{\n\t\theaders: make(map[chainhash.Hash]wire.BlockHeader),\n\t\theights: make(map[uint32]wire.BlockHeader),\n\t}\n}", "func NewMock(path string, nodes uint, replicas uint, vbuckets uint, specs ...BucketSpec) (m *Mock, err error) {\n\tvar lsn *net.TCPListener\n\tchAccept := make(chan bool)\n\tm = &Mock{}\n\n\tdefer func() {\n\t\tclose(chAccept)\n\t\tif lsn != nil {\n\t\t\tif err := lsn.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Failed to close listener: %v\", err)\n\t\t\t}\n\t\t}\n\t\texc := recover()\n\n\t\tif exc == nil {\n\t\t\t// No errors, everything is OK\n\t\t\treturn\n\t\t}\n\n\t\t// Close mock on error, destroying resources\n\t\tm.Close()\n\t\tif mExc, ok := exc.(mockError); !ok {\n\t\t\tpanic(mExc)\n\t\t} else {\n\t\t\tm = nil\n\t\t\terr = mExc\n\t\t}\n\t}()\n\n\tif lsn, err = net.ListenTCP(\"tcp\", &net.TCPAddr{Port: 0}); err != nil {\n\t\tthrowMockError(\"Couldn't set up listening socket\", err)\n\t}\n\t_, ctlPort, err := net.SplitHostPort(lsn.Addr().String())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to split host and port: %v\", err)\n\t}\n\tlog.Printf(\"Listening for control connection at %s\\n\", ctlPort)\n\n\tgo func() {\n\t\tvar err error\n\n\t\tdefer func() {\n\t\t\tchAccept <- false\n\t\t}()\n\t\tif m.conn, err = lsn.Accept(); err != nil {\n\t\t\tthrowMockError(\"Couldn't accept incoming control connection from mock\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tif len(specs) == 0 {\n\t\tspecs = []BucketSpec{{Name: \"default\", Type: BCouchbase}}\n\t}\n\n\toptions := []string{\n\t\t\"-jar\", path, \"--harakiri-monitor\", \"localhost:\" + ctlPort, \"--port\", \"0\",\n\t\t\"--replicas\", strconv.Itoa(int(replicas)),\n\t\t\"--vbuckets\", strconv.Itoa(int(vbuckets)),\n\t\t\"--nodes\", strconv.Itoa(int(nodes)),\n\t\t\"--buckets\", m.buildSpecStrings(specs),\n\t}\n\n\tlog.Printf(\"Invoking java %s\", strings.Join(options, \" \"))\n\tm.cmd = exec.Command(\"java\", options...)\n\n\tm.cmd.Stdout = os.Stdout\n\tm.cmd.Stderr = os.Stderr\n\n\tif err = m.cmd.Start(); err != nil {\n\t\tm.cmd = nil\n\t\tthrowMockError(\"Couldn't start command\", err)\n\t}\n\n\tselect {\n\tcase <-chAccept:\n\t\tbreak\n\n\tcase <-time.After(mockInitTimeout):\n\t\tthrowMockError(\"Timed out waiting for initialization\", errors.New(\"timeout\"))\n\t}\n\n\tm.rw = bufio.NewReadWriter(bufio.NewReader(m.conn), bufio.NewWriter(m.conn))\n\n\t// Read the port buffer, which is delimited by a NUL byte\n\tif portBytes, err := m.rw.ReadBytes(0); err != nil {\n\t\tthrowMockError(\"Couldn't get port information\", err)\n\t} else {\n\t\tportBytes = portBytes[:len(portBytes)-1]\n\t\tif entryPort, err := strconv.Atoi(string(portBytes)); err != nil {\n\t\t\tthrowMockError(\"Incorrectly formatted port from mock\", err)\n\t\t} else {\n\t\t\tm.EntryPort = uint16(entryPort)\n\t\t}\n\t}\n\n\tlog.Printf(\"Mock HTTP port at %d\\n\", m.EntryPort)\n\treturn\n}", "func NewMock(now time.Time) *Mock {\n\treturn &Mock{\n\t\tnow: now,\n\t\tmockTimers: &timerHeap{},\n\t}\n}", "func NewMock() *Mock {\n\treturn &Mock{VolumesMock: &VolumesServiceMock{}}\n}", "func NewMock() *Mock {\n\treturn &Mock{now: time.Unix(0, 0)}\n}", "func StubNew(cfg CacheConfig) *Cache {\n\tshared := stubnewShared(cfg)\n\treturn &Cache{\n\t\tshared: shared,\n\t\taccounts: &accountService{shared: shared},\n\t\tconfig: &configService{shared: shared},\n\t}\n}", "func (m *MockLoadBalancerServiceIface) NewListLoadBalancersParams() *ListLoadBalancersParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListLoadBalancersParams\")\n\tret0, _ := ret[0].(*ListLoadBalancersParams)\n\treturn ret0\n}", "func NewMock() *Mock {\n\treturn &Mock{\n\t\tData: MockData{\n\t\t\tUptime: true,\n\t\t\tFile: true,\n\t\t\tTCPResponse: true,\n\t\t\tHTTPStatus: true,\n\t\t},\n\t}\n}", "func newLoadBalancer(ctx context.Context, frontend NetAddr, policy loadBalancerPolicy, backends ...NetAddr) (*LoadBalancer, error) {\n\tif ctx == nil {\n\t\treturn nil, trace.BadParameter(\"missing parameter context\")\n\t}\n\twaitCtx, waitCancel := context.WithCancel(ctx)\n\treturn &LoadBalancer{\n\t\tfrontend: frontend,\n\t\tctx: ctx,\n\t\tbackends: backends,\n\t\tpolicy: policy,\n\t\twaitCtx: waitCtx,\n\t\twaitCancel: waitCancel,\n\t\tEntry: log.WithFields(log.Fields{\n\t\t\ttrace.Component: \"loadbalancer\",\n\t\t\ttrace.ComponentFields: log.Fields{\n\t\t\t\t\"listen\": frontend.String(),\n\t\t\t},\n\t\t}),\n\t\tconnections: make(map[NetAddr]map[int64]net.Conn),\n\t}, nil\n}", "func newMockNetworks() (*MockNetwork, *MockNetwork) {\n\tc := mockCon.NewConn()\n\treturn &MockNetwork{c.Client}, &MockNetwork{c.Server}\n}", "func New() *MockLibvirt {\n\tserv, conn := net.Pipe()\n\n\tm := &MockLibvirt{\n\t\tConn: conn,\n\t\tTest: serv,\n\t}\n\n\tgo m.handle(serv)\n\n\treturn m\n}", "func NewRoundRobin[T any](things ...T) (LoadBalance[T], any) {\n\tif len(things) == 0 {\n\t\treturn nil, ErrNoArguments\n\t}\n\treturn &roundRobin[T]{things: things, next: 0}, nil\n}", "func NewMock(response string) *Operator {\n\treturn &Operator{cli: client.NewMock(response)}\n}", "func NewMock() *MockMetrics {\n\treturn &MockMetrics{}\n}", "func NewRandom(res sd.Resolver) Balancer {\n\treturn &randomLB{\n\t\tres: res,\n\t}\n}", "func NewRandom() *randomLoadBalancer {\n\treturn &randomLoadBalancer{\n\t\trand: rand.New(rand.NewSource(0)),\n\t}\n}", "func NewMocklibrarian(t mockConstructorTestingTNewMocklibrarian) *Mocklibrarian {\n\tmock := &Mocklibrarian{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewRandomLoadBalancer() *RandomLoadBalancer {\n\tr := rand.New(rand.NewSource(time.Now().Unix()))\n\tlb := RandomLoadBalancer(*r)\n\treturn &lb\n}", "func newMock(deps mockDependencies, t testing.TB) (Component, error) {\n\tbackupConfig := config.NewConfig(\"\", \"\", strings.NewReplacer())\n\tbackupConfig.CopyConfig(config.Datadog)\n\n\tconfig.Datadog.CopyConfig(config.NewConfig(\"mock\", \"XXXX\", strings.NewReplacer()))\n\n\tconfig.SetFeatures(t, deps.Params.Features...)\n\n\t// call InitConfig to set defaults.\n\tconfig.InitConfig(config.Datadog)\n\tc := &cfg{\n\t\tConfig: config.Datadog,\n\t}\n\n\tif !deps.Params.SetupConfig {\n\n\t\tif deps.Params.ConfFilePath != \"\" {\n\t\t\tconfig.Datadog.SetConfigType(\"yaml\")\n\t\t\terr := config.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath))\n\t\t\tif err != nil {\n\t\t\t\t// The YAML was invalid, fail initialization of the mock config.\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\twarnings, _ := setupConfig(deps)\n\t\tc.warnings = warnings\n\t}\n\n\t// Overrides are explicit and will take precedence over any other\n\t// setting\n\tfor k, v := range deps.Params.Overrides {\n\t\tconfig.Datadog.Set(k, v)\n\t}\n\n\t// swap the existing config back at the end of the test.\n\tt.Cleanup(func() { config.Datadog.CopyConfig(backupConfig) })\n\n\treturn c, nil\n}", "func newTestChain(t testing.TB) *Blockchain {\n\treturn newTestChainWithCustomCfg(t, nil)\n}", "func NewMock() Cache {\n\treturn &mock{}\n}", "func NewPool(t mockConstructorTestingTNewPool) *Pool {\n\tmock := &Pool{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewGetBalanceCallback(t mockConstructorTestingTNewGetBalanceCallback) *GetBalanceCallback {\n\tmock := &GetBalanceCallback{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (m *MockClusterVersionBuilder) New(arg0 client.Client) clusterversion.ClusterVersion {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"New\", arg0)\n\tret0, _ := ret[0].(clusterversion.ClusterVersion)\n\treturn ret0\n}", "func (m *MockLoadBalancerServiceIface) NewCreateLoadBalancerParams(algorithm string, instanceport int, name, networkid, scheme, sourceipaddressnetworkid string, sourceport int) *CreateLoadBalancerParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewCreateLoadBalancerParams\", algorithm, instanceport, name, networkid, scheme, sourceipaddressnetworkid, sourceport)\n\tret0, _ := ret[0].(*CreateLoadBalancerParams)\n\treturn ret0\n}", "func (_m *MockQueryCoord) LoadBalance(ctx context.Context, req *querypb.LoadBalanceRequest) (*commonpb.Status, error) {\n\tret := _m.Called(ctx, req)\n\n\tvar r0 *commonpb.Status\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *querypb.LoadBalanceRequest) (*commonpb.Status, error)); ok {\n\t\treturn rf(ctx, req)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *querypb.LoadBalanceRequest) *commonpb.Status); ok {\n\t\tr0 = rf(ctx, req)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*commonpb.Status)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *querypb.LoadBalanceRequest) error); ok {\n\t\tr1 = rf(ctx, req)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func newTestingWallet(testdir string, cs modules.ConsensusSet, tp modules.TransactionPool) (modules.Wallet, error) {\n\tw, err := modWallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir), modules.DefaultAddressGapLimit, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey := crypto.GenerateSiaKey(crypto.TypeDefaultWallet)\n\tencrypted, err := w.Encrypted()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !encrypted {\n\t\t_, err = w.Encrypt(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\terr = w.Unlock(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// give it some money\n\tm, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := types.BlockHeight(0); i <= types.MaturityDelay; i++ {\n\t\t_, err := m.AddBlock()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn w, nil\n}", "func NewLB(addr string) *LB {\n\treturn &LB{\n\t\tServer: &http.Server{\n\t\t\tAddr: addr,\n\t\t},\n\t\tlf: lockfree.New(),\n\t}\n}", "func NewLoadBalancer(c config.LoadBalancerConfig) *LoadBalancer {\n\tvar lb LoadBalancer\n\tif c.Hosts != nil && len(c.Hosts) > 0 {\n\t\tlb.hosts = make([]string, len(c.Hosts))\n\t\tfor i, server := range c.Hosts {\n\t\t\tlb.hosts[i] = server\n\t\t\tgloballog.WithFields(logrus.Fields{\n\t\t\t\t\"host\": server,\n\t\t\t\t\"index\": i,\n\t\t\t}).Debug(\"adding lb host\")\n\t\t}\n\t} else {\n\t\tlb.hosts = make([]string, 10)\n\t}\n\tlb.mode = c.BalanceMode\n\tlb.hostLock = new(sync.RWMutex)\n\treturn &lb\n}", "func newLoadBalancerController(t *testing.T, cm *fakeClusterManager) *LoadBalancerController {\n\tkubeClient := fake.NewSimpleClientset()\n\tstopCh := make(chan struct{})\n\tctx := context.NewControllerContext(kubeClient, api_v1.NamespaceAll, 1*time.Second, true)\n\tlb, err := NewLoadBalancerController(kubeClient, stopCh, ctx, cm.ClusterManager, true)\n\tif err != nil {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\tlb.hasSynced = func() bool { return true }\n\treturn lb\n}", "func NewMocklbDescriber(ctrl *gomock.Controller) *MocklbDescriber {\n\tmock := &MocklbDescriber{ctrl: ctrl}\n\tmock.recorder = &MocklbDescriberMockRecorder{mock}\n\treturn mock\n}", "func NewLoadBalancer() *LoadBalancer {\n\tlb := &LoadBalancer{\n\t\tnodes: make(map[string]*weightedNode),\n\t}\n\treturn lb\n}", "func newMaglevLoadBalancer(info types.ClusterInfo, set types.HostSet) types.LoadBalancer {\n\tnames := []string{}\n\tfor _, host := range set.Hosts() {\n\t\tnames = append(names, host.AddressString())\n\t}\n\tmgv := &maglevLoadBalancer{\n\t\thosts: set,\n\t}\n\n\tnameCount := len(names)\n\t// if host count > BigM, maglev table building will cross array boundary\n\t// maglev lb will not work in this scenario\n\tif nameCount >= maglev.BigM {\n\t\tlog.DefaultLogger.Errorf(\"[lb][maglev] host count too large, expect <= %d, get %d\",\n\t\t\tmaglev.BigM, nameCount)\n\t\treturn mgv\n\t}\n\tif nameCount == 0 {\n\t\treturn mgv\n\t}\n\n\tmaglevM := maglev.SmallM\n\t// according to test, 30000 host with testing 1e8 times, hash distribution begins to go wrong,\n\t// max=4855, mean=3333.3333333333335, peak-to-mean=1.4565\n\t// so use BigM when host >= 30000\n\tlimit := 30000\n\tif nameCount >= limit {\n\t\tlog.DefaultLogger.Infof(\"[lb][maglev] host count %d >= %d, using maglev.BigM\", nameCount, limit)\n\t\tmaglevM = maglev.BigM\n\t}\n\n\tmgv.maglev = maglev.New(names, uint64(maglevM))\n\treturn mgv\n}", "func HandleFullyPopulatedLoadbalancerCreationSuccessfully(t *testing.T, response string) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestJSONRequest(t, r, `{\n\t\t\t\"loadbalancer\": {\n\t\t\t\t\"admin_state_up\": true,\n\t\t\t\t\"flavor_id\": \"bba40eb2-ee8c-11e9-81b4-2a2ae2dbcce4\",\n\t\t\t\t\"listeners\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"default_pool\": {\n\t\t\t\t\t\t\t\"healthmonitor\": {\n\t\t\t\t\t\t\t\t\"delay\": 3,\n\t\t\t\t\t\t\t\t\"expected_codes\": \"200\",\n\t\t\t\t\t\t\t\t\"http_method\": \"GET\",\n\t\t\t\t\t\t\t\t\"max_retries\": 2,\n\t\t\t\t\t\t\t\t\"max_retries_down\": 3,\n\t\t\t\t\t\t\t\t\"name\": \"db\",\n\t\t\t\t\t\t\t\t\"timeout\": 1,\n\t\t\t\t\t\t\t\t\"type\": \"HTTP\",\n\t\t\t\t\t\t\t\t\"url_path\": \"/index.html\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"lb_algorithm\": \"ROUND_ROBIN\",\n\t\t\t\t\t\t\t\"members\": [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.51\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.52\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\"name\": \"Example pool\",\n\t\t\t\t\t\t\t\"protocol\": \"HTTP\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"l7policies\": [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"action\": \"REDIRECT_TO_URL\",\n\t\t\t\t\t\t\t\t\"name\": \"redirect-example.com\",\n\t\t\t\t\t\t\t\t\"redirect_url\": \"http://www.example.com\",\n\t\t\t\t\t\t\t\t\"rules\": [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"compare_type\": \"REGEX\",\n\t\t\t\t\t\t\t\t\t\t\"type\": \"PATH\",\n\t\t\t\t\t\t\t\t\t\t\"value\": \"/images*\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t],\n\t\t\t\t\t\t\"name\": \"redirect_listener\",\n\t\t\t\t\t\t\"protocol\": \"HTTP\",\n\t\t\t\t\t\t\"protocol_port\": 8080\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"name\": \"db_lb\",\n\t\t\t\t\"provider\": \"octavia\",\n\t\t\t\t\"tags\": [\n\t\t\t\t\t\"test\",\n\t\t\t\t\t\"stage\"\n\t\t\t\t],\n\t\t\t\t\"vip_address\": \"10.30.176.48\",\n\t\t\t\t\"vip_port_id\": \"2bf413c8-41a9-4477-b505-333d5cbe8b55\",\n\t\t\t\t\"vip_subnet_id\": \"9cedb85d-0759-4898-8a4b-fa5a5ea10086\"\n\t\t\t}\n\t\t}`)\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, response)\n\t})\n}", "func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}", "func (env *env) newTestHelperOpenLgr(id string, t *testing.T) *testhelper {\n\tlgr, err := env.ledgerMgr.OpenLedger(id)\n\tassert.NoError(t, err)\n\tclient, committer, verifier := newClient(lgr, id, t), newCommitter(lgr, t), newVerifier(lgr, t)\n\treturn &testhelper{client, committer, verifier, lgr, id, assert.New(t)}\n}", "func (o *FakeObject) New(args ...interface{}) Object { return o.Invoke(args) }", "func New() *cachestub {\n\treturn &cachestub{}\n}", "func mockBlock(height uint32, txs ...*types.Transaction) *types.Block {\n\treturn &types.Block{\n\t\tHeader: types.Header{\n\t\t\tHeight: height,\n\t\t},\n\t\tTransactions: txs,\n\t}\n}", "func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}", "func newStubDriver() *stubDriver {\n\treturn &stubDriver{\n\t\tdedupedBlocks: make(map[string][]byte),\n\t\theaders: make(map[string][]byte),\n\t}\n}", "func MockInitialize() {\n\tledgermgmt.InitializeTestEnvWithInitializer(\n\t\t&ledgermgmt.Initializer{\n\t\t\tCustomTxProcessors: ConfigTxProcessors,\n\t\t},\n\t)\n\tchains.list = make(map[string]*chain)\n\tchainInitializer = func(string) { return }\n}", "func NewMockStore(blocksWritten map[ipld.Link][]byte) (ipldbridge.Loader, ipldbridge.Storer) {\n\tvar storeLk sync.RWMutex\n\tstorer := func(lnkCtx ipldbridge.LinkContext) (io.Writer, ipldbridge.StoreCommitter, error) {\n\t\tvar buffer bytes.Buffer\n\t\tcommitter := func(lnk ipld.Link) error {\n\t\t\tstoreLk.Lock()\n\t\t\tblocksWritten[lnk] = buffer.Bytes()\n\t\t\tstoreLk.Unlock()\n\t\t\treturn nil\n\t\t}\n\t\treturn &buffer, committer, nil\n\t}\n\tloader := func(lnk ipld.Link, lnkCtx ipldbridge.LinkContext) (io.Reader, error) {\n\t\tstoreLk.RLock()\n\t\tdata, ok := blocksWritten[lnk]\n\t\tstoreLk.RUnlock()\n\t\tif ok {\n\t\t\treturn bytes.NewReader(data), nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"unable to load block\")\n\t}\n\n\treturn loader, storer\n}", "func (m *MockLoadBalancerServiceIface) NewCreateLBHealthCheckPolicyParams(lbruleid string) *CreateLBHealthCheckPolicyParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewCreateLBHealthCheckPolicyParams\", lbruleid)\n\tret0, _ := ret[0].(*CreateLBHealthCheckPolicyParams)\n\treturn ret0\n}", "func NewLightbulb(info Info) *Lightbulb {\n\tacc := Lightbulb{}\n\tacc.Accessory = New(info, TypeLightbulb)\n\tacc.Lightbulb = service.NewLightbulb()\n\n\tacc.AddService(acc.Lightbulb.Service)\n\n\treturn &acc\n}", "func New(sub map[string]string) broker.Nats {\n\treturn &mockBroker{\n\t\tsubscriptions: sub,\n\t}\n}", "func NewBusinessScenarioPlanner()(*BusinessScenarioPlanner) {\n m := &BusinessScenarioPlanner{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewMockWithLogger(logger Logger) MockClient {\n\ttestCluster := generateTestCluster()\n\ttestHost := generateTestHost(testCluster)\n\ttestStorageDomain := generateTestStorageDomain()\n\tsecondaryStorageDomain := generateTestStorageDomain()\n\ttestDatacenter := generateTestDatacenter(testCluster)\n\ttestNetwork := generateTestNetwork(testDatacenter)\n\ttestVNICProfile := generateTestVNICProfile(testNetwork)\n\tblankTemplate := &template{\n\t\tnil,\n\t\tDefaultBlankTemplateID,\n\t\t\"Blank\",\n\t\t\"Blank template\",\n\t\tTemplateStatusOK,\n\t\t&vmCPU{\n\t\t\t&vmCPUTopo{\n\t\t\t\tcores: 1,\n\t\t\t\tthreads: 1,\n\t\t\t\tsockets: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := getClient(\n\t\tlogger,\n\t\ttestStorageDomain,\n\t\tsecondaryStorageDomain,\n\t\ttestCluster,\n\t\ttestHost,\n\t\tblankTemplate,\n\t\ttestVNICProfile,\n\t\ttestNetwork,\n\t\ttestDatacenter,\n\t)\n\n\ttestCluster.client = client\n\ttestHost.client = client\n\tblankTemplate.client = client\n\ttestStorageDomain.client = client\n\tsecondaryStorageDomain.client = client\n\ttestDatacenter.client = client\n\ttestNetwork.client = client\n\ttestVNICProfile.client = client\n\n\treturn client\n}", "func (m *MockLoadBalancerServiceIface) NewCreateLBStickinessPolicyParams(lbruleid, methodname, name string) *CreateLBStickinessPolicyParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewCreateLBStickinessPolicyParams\", lbruleid, methodname, name)\n\tret0, _ := ret[0].(*CreateLBStickinessPolicyParams)\n\treturn ret0\n}", "func NewRandomLoadBalancer(ctx context.Context, frontend NetAddr, backends ...NetAddr) (*LoadBalancer, error) {\n\treturn newLoadBalancer(ctx, frontend, randomPolicy(), backends...)\n}", "func newObject(db *StateDB, address helper.Address, data Account, onDirty func(addr helper.Address)) *StateObject {\n\tif data.Balance == nil {\n\t\tdata.Balance = new(big.Int)\n\t}\n\tif data.CodeHash == nil {\n\t\tdata.CodeHash = emptyCodeHash\n\t}\n\treturn &StateObject{db: db, address: address, data: data, cachedStorage: make(Storage), dirtyStorage: make(Storage), onDirty: onDirty}\n}", "func (n *mockAgent) load(s persistapi.AgentState) {}", "func Test_NewTxPool(t *testing.T) {\n\tvar slot uint64 = 64\n\tassert := assert.New(t)\n\n\tmock_config := mock_txpool_config(slot)\n\ttxpool := NewTxPool(mock_config)\n\tassert.NotNil(txpool)\n\tinstance := txpool.(*TxPool)\n\tassert.Equal(slot, instance.config.GlobalSlots, \"they should be equal\")\n\tassert.NotNil(instance.all)\n\tassert.NotNil(instance.process)\n\tassert.NotNil(instance.txsQueue)\n\n\tmock_config = mock_txpool_config(uint64(0))\n\ttxpool = NewTxPool(mock_config)\n\tinstance = txpool.(*TxPool)\n\tassert.Equal(uint64(4096), instance.config.GlobalSlots, \"they should be equal\")\n\n}", "func NewSibling(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Sibling {\n\tmock := &Sibling{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New() *Prober {\n\treturn newForTest(time.Now, newRealTicker)\n}", "func newBackloader(conf *options) (*backloader, error) {\n\t// validate config\n\tif err := conf.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create producer\n\tp, err := bus.NewProducer(&conf.Bus)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &backloader{\n\t\tbusProducer: p,\n\t\tconfig: conf,\n\t}, nil\n}", "func NewLightning(cfg *config.SubTaskConfig, cli *clientv3.Client, workerName string) *LightningLoader {\n\tlightningCfg := makeGlobalConfig(cfg)\n\tcore := lightning.New(lightningCfg)\n\tloader := &LightningLoader{\n\t\tcfg: cfg,\n\t\tcli: cli,\n\t\tcore: core,\n\t\tlightningConfig: lightningCfg,\n\t\tlogger: log.With(zap.String(\"task\", cfg.Name), zap.String(\"unit\", \"lightning-load\")),\n\t\tworkerName: workerName,\n\t}\n\treturn loader\n}", "func MockCreateChain(cid string) error {\n\tvar ledger ledger.PeerLedger\n\tvar err error\n\n\tif ledger = GetLedger(cid); ledger == nil {\n\t\tgb, _ := configtxtest.MakeGenesisBlock(cid)\n\t\tif ledger, err = ledgermgmt.CreateLedger(gb); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tchains.Lock()\n\tdefer chains.Unlock()\n\n\tchains.list[cid] = &chain{\n\t\tcs: &chainSupport{\n\t\t\tResources: &mockchannelconfig.Resources{\n\t\t\t\tPolicyManagerVal: &mockpolicies.Manager{\n\t\t\t\t\tPolicy: &mockpolicies.Policy{},\n\t\t\t\t},\n\t\t\t\tConfigtxValidatorVal: &mockconfigtx.Validator{},\n\t\t\t\tApplicationConfigVal: &mockchannelconfig.MockApplication{CapabilitiesRv: &mockchannelconfig.MockApplicationCapabilities{}},\n\t\t\t},\n\n\t\t\tledger: ledger,\n\t\t},\n\t}\n\n\treturn nil\n}", "func New(url, token string, mock bool, l *logrus.Logger) Nest {\n\n\tinitLog(l)\n\n\tlogDebug(funcName(), \"New nest structure\", url)\n\n\t// Read mock file\n\tif mock {\n\t\tlogWarn(funcName(), \"Mock activated !!!\")\n\t\tmockFileByte = readFile(mockFile)\n\t}\n\n\trest = http.New(log)\n\n\treturn &nest{url: url, token: token, mock: mock}\n\n}", "func trackMockBalances(bankKeeper *govtestutil.MockBankKeeper, distributionKeeper *govtestutil.MockDistributionKeeper) {\n\tbalances := make(map[string]sdk.Coins)\n\tbalances[distAcct.String()] = sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(0)))\n\n\t// We don't track module account balances.\n\tbankKeeper.EXPECT().MintCoins(gomock.Any(), mintModuleName, gomock.Any()).AnyTimes()\n\tbankKeeper.EXPECT().BurnCoins(gomock.Any(), types.ModuleName, gomock.Any()).AnyTimes()\n\tbankKeeper.EXPECT().SendCoinsFromModuleToModule(gomock.Any(), mintModuleName, types.ModuleName, gomock.Any()).AnyTimes()\n\n\t// But we do track normal account balances.\n\tbankKeeper.EXPECT().SendCoinsFromAccountToModule(gomock.Any(), gomock.Any(), types.ModuleName, gomock.Any()).DoAndReturn(func(_ sdk.Context, sender sdk.AccAddress, _ string, coins sdk.Coins) error {\n\t\tnewBalance, negative := balances[sender.String()].SafeSub(coins...)\n\t\tif negative {\n\t\t\treturn fmt.Errorf(\"not enough balance\")\n\t\t}\n\t\tbalances[sender.String()] = newBalance\n\t\treturn nil\n\t}).AnyTimes()\n\tbankKeeper.EXPECT().SendCoinsFromModuleToAccount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(_ sdk.Context, module string, rcpt sdk.AccAddress, coins sdk.Coins) error {\n\t\tbalances[rcpt.String()] = balances[rcpt.String()].Add(coins...)\n\t\treturn nil\n\t}).AnyTimes()\n\tbankKeeper.EXPECT().GetAllBalances(gomock.Any(), gomock.Any()).DoAndReturn(func(_ sdk.Context, addr sdk.AccAddress) sdk.Coins {\n\t\treturn balances[addr.String()]\n\t}).AnyTimes()\n\tbankKeeper.EXPECT().GetBalance(gomock.Any(), gomock.Any(), sdk.DefaultBondDenom).DoAndReturn(func(_ sdk.Context, addr sdk.AccAddress, _ string) sdk.Coin {\n\t\tbalances := balances[addr.String()]\n\t\tfor _, balance := range balances {\n\t\t\tif balance.Denom == sdk.DefaultBondDenom {\n\t\t\t\treturn balance\n\t\t\t}\n\t\t}\n\t\treturn sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(0))\n\t}).AnyTimes()\n\n\tdistributionKeeper.EXPECT().FundCommunityPool(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(_ sdk.Context, coins sdk.Coins, sender sdk.AccAddress) error {\n\t\t// sender balance\n\t\tnewBalance, negative := balances[sender.String()].SafeSub(coins...)\n\t\tif negative {\n\t\t\treturn fmt.Errorf(\"not enough balance\")\n\t\t}\n\t\tbalances[sender.String()] = newBalance\n\t\t// receiver balance\n\t\tbalances[distAcct.String()] = balances[distAcct.String()].Add(coins...)\n\t\treturn nil\n\t}).AnyTimes()\n}", "func NewAsyncBR(t mockConstructorTestingTNewAsyncBR) *AsyncBR {\n\tmock := &AsyncBR{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newAzureNetworkMapper() *AzureNetworkMapper {\n\treturn &AzureNetworkMapper{}\n}", "func NewTester(r *http.Request, conns int, dur, rate time.Duration, end string) *LoadTester {\n\treturn &LoadTester{\n\t\tendpoint: end,\n\t\trequest: r,\n\t\tclient: &http.Client{},\n\t\tconns: conns,\n\t\tdur: dur,\n\t\trate: rate,\n\t\tstats: &Stats{Endpoint: end},\n\t}\n}", "func (t *Blockchain) New() *Blockchain {\n\tt = new(Blockchain)\n\tt.NewBlock(100, \"1\")\n\treturn t\n}", "func NewMock() Client {\n\treturn &mockClient{}\n}", "func NewMockSupport() *MockSupport {\n\treturn &MockSupport{\n\t\tPublisher: NewBlockPublisher(),\n\t}\n}", "func newTruck(mk, mdl string) *truck {\n\treturn &truck{vehicle: vehicle{mk, mdl}}\n}", "func (m *MockLoadBalancerServiceIface) NewListNetscalerLoadBalancersParams() *ListNetscalerLoadBalancersParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListNetscalerLoadBalancersParams\")\n\tret0, _ := ret[0].(*ListNetscalerLoadBalancersParams)\n\treturn ret0\n}", "func NewLob(baseAPI, apiKey, userAgent string) *lob {\n\treturn &lob{\n\t\tBaseAPI: baseAPI,\n\t\tAPIKey: apiKey,\n\t\tUserAgent: userAgent,\n\t}\n}", "func Mock() Cluster { return mockCluster{} }", "func newBlockchain(opts ...emulator.Option) *emulator.Blockchain {\n\tb, err := emulator.NewBlockchain(\n\t\tappend(\n\t\t\t[]emulator.Option{\n\t\t\t\temulator.WithStorageLimitEnabled(false),\n\t\t\t},\n\t\t\topts...,\n\t\t)...,\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}", "func newBlockchain(opts ...emulator.Option) *emulator.Blockchain {\n\tb, err := emulator.NewBlockchain(\n\t\tappend(\n\t\t\t[]emulator.Option{\n\t\t\t\temulator.WithStorageLimitEnabled(false),\n\t\t\t},\n\t\t\topts...,\n\t\t)...,\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}", "func NewLoadManager(suiteCfg *SuiteConfig, genCfg *GeneratorConfig) *LoadManager {\n\tvar err error\n\tcsvLog := csv.NewWriter(createFileOrAppend(\"result.csv\"))\n\tscalingLog := csv.NewWriter(createFileOrAppend(\"scaling.csv\"))\n\n\tlm := &LoadManager{\n\t\tSuiteConfig: suiteCfg,\n\t\tGeneratorConfig: genCfg,\n\t\tCsvMu: &sync.Mutex{},\n\t\tCSVLogMu: &sync.Mutex{},\n\t\tCSVLog: csvLog,\n\t\tRPSScalingLog: scalingLog,\n\t\tSteps: make([]RunStep, 0),\n\t\tReports: make(map[string]*RunReport),\n\t\tCsvStore: make(map[string]*CSVData),\n\t\tDegradation: false,\n\t}\n\tif lm.ReportDir, err = filepath.Abs(filepath.Join(\"example_loadtest\", \"reports\")); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn lm\n}", "func NewMemLoadHandler(res http.ResponseWriter, req *http.Request) {\n\tlog.Println(\"Entered NewMemLoadHandler\")\n\tflusher, ok := res.(http.Flusher)\n\tif !ok {\n\t\thttp.Error(res, \"Server does not support flusher\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tmemUsed := parseOrDefault(req, \"mib\", 1000)\n\n\tfmt.Fprintf(res, \"Starting new mem test with %d MiB memory used ۞\\n\", memUsed)\n\tflusher.Flush()\n\n\tmemory := make([]byte, memUsed*1024*1024)\n\terr := syscall.Mlock(memory)\n\tif err != nil {\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\ttime.Sleep(10 * time.Second)\n\n\tfmt.Fprint(res, \"New memory test finished!! 🏁\\n\")\n\tlog.Println(\"Leaving NewMemLoadHandler\")\n}", "func (p *RoundRobinPool) LoadBalance(w http.ResponseWriter, r *http.Request) {\n\tattempts := getAttemptsFromContext(r)\n\tif attempts > 3 {\n\t\tlog.Printf(\"%s(%s) Max attempts reached, terminating\\n\", r.RemoteAddr, r.URL.Path)\n\t\thttp.Error(w, \"Service not available\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tpeer := p.Next()\n\tif peer != nil {\n\t\tpeer.ReverseProxy.ServeHTTP(w, r)\n\t\treturn\n\t}\n\thttp.Error(w, \"Service not available\", http.StatusServiceUnavailable)\n}", "func New(config contract.ConfigAccessor) Module {\n\treturn Module{config: config}\n}", "func newMockSubscriber() mockSubscriber {\n\treturn mockSubscriber{}\n}", "func newObject(db *StateDB, address common.Address, data model.StateAccount) *stateObject {\n\tif data.Balance == nil {\n\t\tdata.Balance = new(big.Int)\n\t}\n\tif data.CodeHash == nil {\n\t\tdata.CodeHash = emptyCodeHash\n\t}\n\tif data.Root == (common.Hash{}) {\n\t\tdata.Root = emptyRoot\n\t}\n\treturn &stateObject{\n\t\tdb: db,\n\t\taddress: address,\n\t\taddrHash: crypto.Keccak256Hash(address[:]),\n\t\tdata: data,\n\t\toriginStorage: make(Storage),\n\t\tpendingStorage: make(Storage),\n\t\tdirtyStorage: make(Storage),\n\t}\n}", "func NewRetrieveBalance(\n\tr *http.Request,\n) (Endpoint, error) {\n\treturn &RetrieveBalance{}, nil\n\n}", "func NewBridgeAccessor(t mockConstructorTestingTNewBridgeAccessor) *BridgeAccessor {\n\tmock := &BridgeAccessor{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func ExampleNew() {\n\ttoken := \"0xb64ef51c888972c908cfacf59b47c1afbc0ab8ac\"\n\twallet := \"0x9ea0c535b3eb166454c8ccbaba86850c8df3ee57\"\n\texample, _ = New(token, wallet)\n\tfmt.Printf(\"This wallet has %v %v tokens\", example.BalanceString(), example.Name)\n\t// Output: This wallet has 7.282 StorjToken tokens\n}", "func (m *MockLoadBalancerServiceIface) NewCreateLoadBalancerRuleParams(algorithm, name string, privateport, publicport int) *CreateLoadBalancerRuleParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewCreateLoadBalancerRuleParams\", algorithm, name, privateport, publicport)\n\tret0, _ := ret[0].(*CreateLoadBalancerRuleParams)\n\treturn ret0\n}", "func New(capacity int64) chainstore.Store {\n\tmemStore := &memStore{\n\t\tdata: make(map[string][]byte, 1000),\n\t}\n\tstore := lrumgr.New(capacity, memStore)\n\treturn store\n}", "func New(ctx context.Context, backupTables tablesmap.TablesMapInterface, cfg *config.Config) *basebackup {\n\tb := basebackup{\n\t\tctx: ctx,\n\t\tcfg: cfg,\n\t\twg: &sync.WaitGroup{},\n\t\tbackupTables: backupTables,\n\t\tqueue: queue.New(ctx),\n\t}\n\n\treturn &b\n}", "func NewMock() MockClient {\n\treturn NewMockWithLogger(&noopLogger{})\n}", "func New(options ...func(*sqlmock) error) (*sql.DB, Sqlmock, error) {\n\tpool.Lock()\n\tdsn := fmt.Sprintf(\"sqlmock_db_%d\", pool.counter)\n\tpool.counter++\n\n\tsmock := &sqlmock{dsn: dsn, drv: pool, ordered: true}\n\tpool.conns[dsn] = smock\n\tpool.Unlock()\n\n\treturn smock.open(options)\n}", "func NewMock() *Mock {\n\tc := &Mock{\n\t\tFakeIncoming: func() chan []byte {\n\t\t\treturn make(chan []byte, 2)\n\t\t},\n\t\tFakeName: func() string {\n\t\t\treturn \"TestClient\"\n\t\t},\n\t\tFakeGame: func() string {\n\t\t\treturn \"test\"\n\t\t},\n\t\tFakeClose: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeStopTimer: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeRoom: func() interfaces.Room {\n\t\t\treturn nil\n\t\t},\n\t\tFakeSetRoom: func(interfaces.Room) {\n\n\t\t},\n\t}\n\n\tc.FakeWritePump = func() {\n\t\tfor range c.Incoming() {\n\t\t\t// Do nothing\n\t\t}\n\t}\n\n\tc.FakeSetName = func(string) interfaces.Client {\n\t\treturn c\n\t}\n\treturn c\n}", "func New(conf common.Bitcoind) (Bitcoind, error) {\n\t// Check if theres a bitcoin conf defined\n\tif conf.Host == \"\" {\n\t\tconf.Host = DefaultHostname\n\t}\n\tif conf.Port == 0 {\n\t\tconf.Port = DefaultPort\n\t}\n\tif conf.User == \"\" {\n\t\tconf.User = DefaultUsername\n\t}\n\tclient := Bitcoind{\n\t\turl: fmt.Sprintf(\"http://%s:%d\", conf.Host, conf.Port),\n\t\tuser: conf.User,\n\t\tpass: conf.Pass,\n\t}\n\tfmt.Printf(\"Creating bitcoin client... %s\\n\", client.url)\n\t_, err := client.BlockCount()\n\tif err != nil {\n\t\treturn Bitcoind{}, fmt.Errorf(\"can't connect to Bitcoind: %w\", err)\n\t}\n\n\treturn client, nil\n}", "func NewLLRB(name string, setts s.Settings) *LLRB {\n\tllrb := &LLRB{name: name, finch: make(chan struct{})}\n\tllrb.logprefix = fmt.Sprintf(\"LLRB [%s]\", name)\n\tllrb.inittxns()\n\n\tsetts = make(s.Settings).Mixin(Defaultsettings(), setts)\n\tllrb.readsettings(setts)\n\tllrb.setts = setts\n\n\tllrb.nodearena = malloc.NewArena(llrb.memcapacity, llrb.allocator)\n\tllrb.valarena = malloc.NewArena(llrb.memcapacity, llrb.allocator)\n\n\t// statistics\n\tllrb.h_upsertdepth = lib.NewhistorgramInt64(10, 100, 10)\n\n\tinfof(\"%v started ...\\n\", llrb.logprefix)\n\tllrb.logarenasettings()\n\treturn llrb\n}", "func (t TestFactoryT) NewMockMemStore() *memStoreImpl {\n\tmetaStore := new(metaMocks.MetaStore)\n\tdiskStore := new(diskMocks.DiskStore)\n\tredoLogManagerMaster, _ := redolog.NewRedoLogManagerMaster(&common.RedoLogConfig{}, diskStore, metaStore)\n\tbootstrapToken := new(memComMocks.BootStrapToken)\n\tbootstrapToken.On(\"AcquireToken\", mock.Anything, mock.Anything).Return(true)\n\tbootstrapToken.On(\"ReleaseToken\", mock.Anything, mock.Anything).Return()\n\n\treturn NewMemStore(metaStore, diskStore, NewOptions(bootstrapToken, redoLogManagerMaster)).(*memStoreImpl)\n}", "func (s *deployerSuite) TestNew(c *gc.C) {\n\tdeployer := deployer.NewState(s.stateAPI)\n\tc.Assert(deployer, gc.NotNil)\n}", "func New(token string) *Clubhouse {\n\treturn &Clubhouse{\n\t\tToken: token,\n\t\tClient: &http.Client{},\n\t}\n}" ]
[ "0.66606605", "0.6075373", "0.598331", "0.5763374", "0.5699327", "0.55913365", "0.55694485", "0.556245", "0.5560254", "0.55587983", "0.5541633", "0.5503419", "0.54720664", "0.5430354", "0.5410432", "0.539768", "0.5386954", "0.5376529", "0.5348067", "0.5305424", "0.5278028", "0.52713996", "0.5226863", "0.5224033", "0.52120936", "0.52075875", "0.51883453", "0.5177859", "0.5159927", "0.51574343", "0.5150077", "0.5114677", "0.50905484", "0.50902206", "0.50838375", "0.5082365", "0.5074456", "0.5074212", "0.5055854", "0.5046343", "0.5044209", "0.50434196", "0.5030284", "0.50275207", "0.50215733", "0.5006062", "0.50038224", "0.4994567", "0.4988589", "0.497742", "0.4975709", "0.49734753", "0.49696302", "0.4951393", "0.4950888", "0.4949056", "0.49385667", "0.49304977", "0.49277586", "0.49272013", "0.49139535", "0.49103573", "0.49097794", "0.4898729", "0.48925376", "0.48856723", "0.4885342", "0.48780277", "0.4874944", "0.48742774", "0.4871485", "0.48605582", "0.4855711", "0.48485023", "0.48375642", "0.48302162", "0.48240983", "0.48215446", "0.48151988", "0.48151988", "0.48088554", "0.48081598", "0.48069808", "0.48064858", "0.48025095", "0.48013577", "0.4799388", "0.47961372", "0.47959346", "0.47895953", "0.478083", "0.47804177", "0.47799328", "0.47792956", "0.47780228", "0.47755092", "0.47669998", "0.4766711", "0.4764529", "0.47599077" ]
0.6756558
0
EXPECT returns an object that allows the caller to indicate expected use
func (m *MockLoadBalance) EXPECT() *MockLoadBalanceMockRecorder { return m.recorder }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mmGetObject *mClientMockGetObject) Expect(ctx context.Context, head insolar.Reference) *mClientMockGetObject {\n\tif mmGetObject.mock.funcGetObject != nil {\n\t\tmmGetObject.mock.t.Fatalf(\"ClientMock.GetObject mock is already set by Set\")\n\t}\n\n\tif mmGetObject.defaultExpectation == nil {\n\t\tmmGetObject.defaultExpectation = &ClientMockGetObjectExpectation{}\n\t}\n\n\tmmGetObject.defaultExpectation.params = &ClientMockGetObjectParams{ctx, head}\n\tfor _, e := range mmGetObject.expectations {\n\t\tif minimock.Equal(e.params, mmGetObject.defaultExpectation.params) {\n\t\t\tmmGetObject.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetObject.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetObject\n}", "func (r Requester) Assert(actual, expected interface{}) Requester {\n\t//r.actualResponse = actual\n\t//r.expectedResponse = expected\n\treturn r\n}", "func (r *Request) Expect(t *testing.T) *Response {\n\tr.apiTest.t = t\n\treturn r.apiTest.response\n}", "func (m *MockNotary) Notarize(arg0 string) (map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Notarize\", arg0)\n\tret0, _ := ret[0].(map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (tc TestCases) expect() {\n\tfmt.Println(cnt)\n\tcnt++\n\tif !reflect.DeepEqual(tc.resp, tc.respExp) {\n\t\ttc.t.Error(fmt.Sprintf(\"\\nRequested: \", tc.req, \"\\nExpected: \", tc.respExp, \"\\nFound: \", tc.resp))\n\t}\n}", "func (r *Request) Expect(t TestingT) *Response {\n\tr.apiTest.t = t\n\treturn r.apiTest.response\n}", "func Expect(t cbtest.T, actual interface{}, matcher matcher.Matcher, labelAndArgs ...interface{}) {\n\tt.Helper()\n\tres := ExpectE(t, actual, matcher, labelAndArgs...)\n\tif !res {\n\t\tt.FailNow()\n\t}\n}", "func (m *MockisObject_Obj) EXPECT() *MockisObject_ObjMockRecorder {\n\treturn m.recorder\n}", "func Expect(t *testing.T, v, m interface{}) {\n\tvt, vok := v.(Equaler)\n\tmt, mok := m.(Equaler)\n\n\tvar state bool\n\tif vok && mok {\n\t\tstate = vt.Equal(mt)\n\t} else {\n\t\tstate = reflect.DeepEqual(v, m)\n\t}\n\n\tif state {\n\t\tflux.FatalFailed(t, \"Value %+v and %+v are not a match\", v, m)\n\t\treturn\n\t}\n\tflux.LogPassed(t, \"Value %+v and %+v are a match\", v, m)\n}", "func (mmState *mClientMockState) Expect() *mClientMockState {\n\tif mmState.mock.funcState != nil {\n\t\tmmState.mock.t.Fatalf(\"ClientMock.State mock is already set by Set\")\n\t}\n\n\tif mmState.defaultExpectation == nil {\n\t\tmmState.defaultExpectation = &ClientMockStateExpectation{}\n\t}\n\n\treturn mmState\n}", "func (mmProvide *mContainerMockProvide) Expect(constructor interface{}) *mContainerMockProvide {\n\tif mmProvide.mock.funcProvide != nil {\n\t\tmmProvide.mock.t.Fatalf(\"ContainerMock.Provide mock is already set by Set\")\n\t}\n\n\tif mmProvide.defaultExpectation == nil {\n\t\tmmProvide.defaultExpectation = &ContainerMockProvideExpectation{}\n\t}\n\n\tmmProvide.defaultExpectation.params = &ContainerMockProvideParams{constructor}\n\tfor _, e := range mmProvide.expectations {\n\t\tif minimock.Equal(e.params, mmProvide.defaultExpectation.params) {\n\t\t\tmmProvide.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmProvide.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmProvide\n}", "func Mock() Env {\n\treturn mock.New()\n}", "func (mmGetCode *mClientMockGetCode) Expect(ctx context.Context, ref insolar.Reference) *mClientMockGetCode {\n\tif mmGetCode.mock.funcGetCode != nil {\n\t\tmmGetCode.mock.t.Fatalf(\"ClientMock.GetCode mock is already set by Set\")\n\t}\n\n\tif mmGetCode.defaultExpectation == nil {\n\t\tmmGetCode.defaultExpectation = &ClientMockGetCodeExpectation{}\n\t}\n\n\tmmGetCode.defaultExpectation.params = &ClientMockGetCodeParams{ctx, ref}\n\tfor _, e := range mmGetCode.expectations {\n\t\tif minimock.Equal(e.params, mmGetCode.defaultExpectation.params) {\n\t\t\tmmGetCode.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetCode.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetCode\n}", "func expect(t *testing.T, method, url string, testieOptions ...func(*http.Request)) *testie {\n\treq, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, opt := range testieOptions {\n\t\topt(req)\n\t}\n\n\treturn testReq(t, req)\n}", "func (_m *MockOStream) EXPECT() *MockOStreamMockRecorder {\n\treturn _m.recorder\n}", "func (mmGetUser *mStorageMockGetUser) Expect(ctx context.Context, userID int64) *mStorageMockGetUser {\n\tif mmGetUser.mock.funcGetUser != nil {\n\t\tmmGetUser.mock.t.Fatalf(\"StorageMock.GetUser mock is already set by Set\")\n\t}\n\n\tif mmGetUser.defaultExpectation == nil {\n\t\tmmGetUser.defaultExpectation = &StorageMockGetUserExpectation{}\n\t}\n\n\tmmGetUser.defaultExpectation.params = &StorageMockGetUserParams{ctx, userID}\n\tfor _, e := range mmGetUser.expectations {\n\t\tif minimock.Equal(e.params, mmGetUser.defaultExpectation.params) {\n\t\t\tmmGetUser.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetUser.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetUser\n}", "func (mmGetObject *mClientMockGetObject) Return(o1 ObjectDescriptor, err error) *ClientMock {\n\tif mmGetObject.mock.funcGetObject != nil {\n\t\tmmGetObject.mock.t.Fatalf(\"ClientMock.GetObject mock is already set by Set\")\n\t}\n\n\tif mmGetObject.defaultExpectation == nil {\n\t\tmmGetObject.defaultExpectation = &ClientMockGetObjectExpectation{mock: mmGetObject.mock}\n\t}\n\tmmGetObject.defaultExpectation.results = &ClientMockGetObjectResults{o1, err}\n\treturn mmGetObject.mock\n}", "func (mmGather *mGathererMockGather) Expect() *mGathererMockGather {\n\tif mmGather.mock.funcGather != nil {\n\t\tmmGather.mock.t.Fatalf(\"GathererMock.Gather mock is already set by Set\")\n\t}\n\n\tif mmGather.defaultExpectation == nil {\n\t\tmmGather.defaultExpectation = &GathererMockGatherExpectation{}\n\t}\n\n\treturn mmGather\n}", "func (m *MockParser) EXPECT() *MockParserMockRecorder {\n\treturn m.recorder\n}", "func (m *MockParser) EXPECT() *MockParserMockRecorder {\n\treturn m.recorder\n}", "func (mmWriteTo *mDigestHolderMockWriteTo) Expect(w io.Writer) *mDigestHolderMockWriteTo {\n\tif mmWriteTo.mock.funcWriteTo != nil {\n\t\tmmWriteTo.mock.t.Fatalf(\"DigestHolderMock.WriteTo mock is already set by Set\")\n\t}\n\n\tif mmWriteTo.defaultExpectation == nil {\n\t\tmmWriteTo.defaultExpectation = &DigestHolderMockWriteToExpectation{}\n\t}\n\n\tmmWriteTo.defaultExpectation.params = &DigestHolderMockWriteToParams{w}\n\tfor _, e := range mmWriteTo.expectations {\n\t\tif minimock.Equal(e.params, mmWriteTo.defaultExpectation.params) {\n\t\t\tmmWriteTo.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmWriteTo.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmWriteTo\n}", "func (rb *RequestBuilder) EXPECT() *ResponseAsserter {\n\treq := httptest.NewRequest(rb.method, rb.path, rb.body)\n\tfor k, v := range rb.hdr {\n\t\treq.Header[k] = v\n\t}\n\n\trec := httptest.NewRecorder()\n\trb.cas.h.ServeHTTP(rec, req)\n\n\treturn &ResponseAsserter{\n\t\trec: rec,\n\t\treq: req,\n\t\tb: rb,\n\t\tfail: rb.fail.\n\t\t\tCopy().\n\t\t\tWithRequest(req).\n\t\t\tWithResponse(rec),\n\t}\n}", "func (mmGetState *mGatewayMockGetState) Expect() *mGatewayMockGetState {\n\tif mmGetState.mock.funcGetState != nil {\n\t\tmmGetState.mock.t.Fatalf(\"GatewayMock.GetState mock is already set by Set\")\n\t}\n\n\tif mmGetState.defaultExpectation == nil {\n\t\tmmGetState.defaultExpectation = &GatewayMockGetStateExpectation{}\n\t}\n\n\treturn mmGetState\n}", "func (m *mParcelMockGetSign) Expect() *mParcelMockGetSign {\n\tm.mock.GetSignFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetSignExpectation{}\n\t}\n\n\treturn m\n}", "func (mmCreateTag *mTagCreatorMockCreateTag) Expect(t1 semantic.Tag) *mTagCreatorMockCreateTag {\n\tif mmCreateTag.mock.funcCreateTag != nil {\n\t\tmmCreateTag.mock.t.Fatalf(\"TagCreatorMock.CreateTag mock is already set by Set\")\n\t}\n\n\tif mmCreateTag.defaultExpectation == nil {\n\t\tmmCreateTag.defaultExpectation = &TagCreatorMockCreateTagExpectation{}\n\t}\n\n\tmmCreateTag.defaultExpectation.params = &TagCreatorMockCreateTagParams{t1}\n\tfor _, e := range mmCreateTag.expectations {\n\t\tif minimock.Equal(e.params, mmCreateTag.defaultExpectation.params) {\n\t\t\tmmCreateTag.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmCreateTag.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmCreateTag\n}", "func (m *MockActorUsecase) EXPECT() *MockActorUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockGetCaller) Expect() *mParcelMockGetCaller {\n\tm.mock.GetCallerFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetCallerExpectation{}\n\t}\n\n\treturn m\n}", "func mockAlwaysRun() bool { return true }", "func (m *MockArg) EXPECT() *MockArgMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (st *SDKTester) Test(resp interface{}) {\n\tif resp == nil || st.respWant == nil {\n\t\tst.t.Logf(\"response want/got is nil, abort\\n\")\n\t\treturn\n\t}\n\n\trespMap := st.getFieldMap(resp)\n\tfor i, v := range st.respWant {\n\t\tif reflect.DeepEqual(v, respMap[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tswitch x := respMap[i].(type) {\n\t\tcase Stringer:\n\t\t\tif !assert.Equal(st.t, v, x.String()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tif value, ok := x[\"Value\"]; ok {\n\t\t\t\tif !assert.Equal(st.t, v, value) {\n\t\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t\t}\n\t\t\t}\n\t\tcase Inter:\n\t\t\tif !assert.Equal(st.t, v, x.Int()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tdefault:\n\t\t\tif !assert.Equal(st.t, v, respMap[i]) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockCreator) EXPECT() *MockCreatorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockCreator) EXPECT() *MockCreatorMockRecorder {\n\treturn m.recorder\n}", "func TestCallFunc_arguments(t *testing.T) {\n\n}", "func (m *mParcelMockGetSender) Expect() *mParcelMockGetSender {\n\tm.mock.GetSenderFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetSenderExpectation{}\n\t}\n\n\treturn m\n}", "func TestGetNone4A(t *testing.T) {\n}", "func expectEqual(value, expected interface{}) {\n\tif value != expected {\n\t\tfmt.Printf(\"Fehler: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t} else {\n\t\tfmt.Printf(\"OK: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t}\n}", "func (mmHasPendings *mClientMockHasPendings) Expect(ctx context.Context, object insolar.Reference) *mClientMockHasPendings {\n\tif mmHasPendings.mock.funcHasPendings != nil {\n\t\tmmHasPendings.mock.t.Fatalf(\"ClientMock.HasPendings mock is already set by Set\")\n\t}\n\n\tif mmHasPendings.defaultExpectation == nil {\n\t\tmmHasPendings.defaultExpectation = &ClientMockHasPendingsExpectation{}\n\t}\n\n\tmmHasPendings.defaultExpectation.params = &ClientMockHasPendingsParams{ctx, object}\n\tfor _, e := range mmHasPendings.expectations {\n\t\tif minimock.Equal(e.params, mmHasPendings.defaultExpectation.params) {\n\t\t\tmmHasPendings.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmHasPendings.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmHasPendings\n}", "func (mmGetPacketSignature *mPacketParserMockGetPacketSignature) Expect() *mPacketParserMockGetPacketSignature {\n\tif mmGetPacketSignature.mock.funcGetPacketSignature != nil {\n\t\tmmGetPacketSignature.mock.t.Fatalf(\"PacketParserMock.GetPacketSignature mock is already set by Set\")\n\t}\n\n\tif mmGetPacketSignature.defaultExpectation == nil {\n\t\tmmGetPacketSignature.defaultExpectation = &PacketParserMockGetPacketSignatureExpectation{}\n\t}\n\n\treturn mmGetPacketSignature\n}", "func Run(t testing.TB, cloud cloud.Client, src string, opts ...RunOption) {\n\n\tif cloud == nil {\n\t\tcloud = mockcloud.Client(nil)\n\t}\n\n\tvm := otto.New()\n\n\tpkg, err := godotto.Apply(context.Background(), vm, cloud)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvm.Set(\"cloud\", pkg)\n\tvm.Set(\"equals\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tgot, err := call.Argument(0).Export()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\twant, err := call.Argument(1).Export()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\tok, cause := deepEqual(got, want)\n\t\tif ok {\n\t\t\treturn otto.UndefinedValue()\n\t\t}\n\t\tmsg := \"assertion failed!\\n\" + cause\n\n\t\tif len(call.ArgumentList) > 2 {\n\t\t\tformat, err := call.ArgumentList[2].ToString()\n\t\t\tif err != nil {\n\t\t\t\tottoutil.Throw(vm, err.Error())\n\t\t\t}\n\t\t\tmsg += \"\\n\" + format\n\t\t}\n\t\tottoutil.Throw(vm, msg)\n\t\treturn otto.UndefinedValue()\n\t})\n\tvm.Set(\"assert\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tv, err := call.Argument(0).ToBoolean()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\tif v {\n\t\t\treturn otto.UndefinedValue()\n\t\t}\n\t\tmsg := \"assertion failed!\"\n\t\tif len(call.ArgumentList) > 1 {\n\t\t\tformat, err := call.ArgumentList[1].ToString()\n\t\t\tif err != nil {\n\t\t\t\tottoutil.Throw(vm, err.Error())\n\t\t\t}\n\t\t\tmsg += \"\\n\" + format\n\t\t}\n\t\tottoutil.Throw(vm, msg)\n\t\treturn otto.UndefinedValue()\n\t})\n\tscript, err := vm.Compile(\"\", src)\n\tif err != nil {\n\t\tt.Fatalf(\"invalid code: %v\", err)\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(vm); err != nil {\n\t\t\tt.Fatalf(\"can't apply option: %v\", err)\n\t\t}\n\t}\n\n\tif _, err := vm.Run(script); err != nil {\n\t\tif oe, ok := err.(*otto.Error); ok {\n\t\t\tt.Fatal(oe.String())\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestSetGoodArgs(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetGoodArgs\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\n\t// Testing the init. It always return true. No parameters in init. \n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\"),\n\t[]byte(\"agentInfo.atype\"),[]byte(\"1.2.3.4\"),\n\t[]byte(\"agentInfo.id\"),[]byte(\"agentidentifier\"),\n\t[]byte(\"agentinfo.name\"),[]byte(\"7.8.9\"),\n\t[]byte(\"agentinfo.idp\"),[]byte(\"urn:tiani-spirit:sts\"),\n\t[]byte(\"locationInfo.id\"),[]byte(\"urn:oid:1.2.3\"),\n\t[]byte(\"locationInfo.name\"),[]byte(\"General Hospital\"),\n\t[]byte(\"locationInfo.locality\"),[]byte(\"Nashville, TN\"),\n\t[]byte(\"locationInfo.docid\"),[]byte(\"1.2.3\"),\n\t[]byte(\"action\"),[]byte(\"ex:CREATE\"),\n\t[]byte(\"date\"),[]byte(\"2018-11-10T12:15:55.028Z\")})\n\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n}", "func (mmRegisterResult *mClientMockRegisterResult) Expect(ctx context.Context, request insolar.Reference, result RequestResult) *mClientMockRegisterResult {\n\tif mmRegisterResult.mock.funcRegisterResult != nil {\n\t\tmmRegisterResult.mock.t.Fatalf(\"ClientMock.RegisterResult mock is already set by Set\")\n\t}\n\n\tif mmRegisterResult.defaultExpectation == nil {\n\t\tmmRegisterResult.defaultExpectation = &ClientMockRegisterResultExpectation{}\n\t}\n\n\tmmRegisterResult.defaultExpectation.params = &ClientMockRegisterResultParams{ctx, request, result}\n\tfor _, e := range mmRegisterResult.expectations {\n\t\tif minimock.Equal(e.params, mmRegisterResult.defaultExpectation.params) {\n\t\t\tmmRegisterResult.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmRegisterResult.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmRegisterResult\n}", "func Mock() Cluster { return mockCluster{} }", "func (m *MockS3API) EXPECT() *MockS3APIMockRecorder {\n\treturn m.recorder\n}", "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func (mmGetPendings *mClientMockGetPendings) Expect(ctx context.Context, objectRef insolar.Reference) *mClientMockGetPendings {\n\tif mmGetPendings.mock.funcGetPendings != nil {\n\t\tmmGetPendings.mock.t.Fatalf(\"ClientMock.GetPendings mock is already set by Set\")\n\t}\n\n\tif mmGetPendings.defaultExpectation == nil {\n\t\tmmGetPendings.defaultExpectation = &ClientMockGetPendingsExpectation{}\n\t}\n\n\tmmGetPendings.defaultExpectation.params = &ClientMockGetPendingsParams{ctx, objectRef}\n\tfor _, e := range mmGetPendings.expectations {\n\t\tif minimock.Equal(e.params, mmGetPendings.defaultExpectation.params) {\n\t\t\tmmGetPendings.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetPendings.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetPendings\n}", "func (m *MockOrg) EXPECT() *MockOrgMockRecorder {\n\treturn m.recorder\n}", "func (mmGetUserLocation *mStorageMockGetUserLocation) Expect(ctx context.Context, userID int64) *mStorageMockGetUserLocation {\n\tif mmGetUserLocation.mock.funcGetUserLocation != nil {\n\t\tmmGetUserLocation.mock.t.Fatalf(\"StorageMock.GetUserLocation mock is already set by Set\")\n\t}\n\n\tif mmGetUserLocation.defaultExpectation == nil {\n\t\tmmGetUserLocation.defaultExpectation = &StorageMockGetUserLocationExpectation{}\n\t}\n\n\tmmGetUserLocation.defaultExpectation.params = &StorageMockGetUserLocationParams{ctx, userID}\n\tfor _, e := range mmGetUserLocation.expectations {\n\t\tif minimock.Equal(e.params, mmGetUserLocation.defaultExpectation.params) {\n\t\t\tmmGetUserLocation.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetUserLocation.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetUserLocation\n}", "func (mmCreate *mPaymentRepositoryMockCreate) Expect(ctx context.Context, from int64, to int64, amount int64) *mPaymentRepositoryMockCreate {\n\tif mmCreate.mock.funcCreate != nil {\n\t\tmmCreate.mock.t.Fatalf(\"PaymentRepositoryMock.Create mock is already set by Set\")\n\t}\n\n\tif mmCreate.defaultExpectation == nil {\n\t\tmmCreate.defaultExpectation = &PaymentRepositoryMockCreateExpectation{}\n\t}\n\n\tmmCreate.defaultExpectation.params = &PaymentRepositoryMockCreateParams{ctx, from, to, amount}\n\tfor _, e := range mmCreate.expectations {\n\t\tif minimock.Equal(e.params, mmCreate.defaultExpectation.params) {\n\t\t\tmmCreate.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmCreate.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmCreate\n}", "func (mmAuther *mGatewayMockAuther) Expect() *mGatewayMockAuther {\n\tif mmAuther.mock.funcAuther != nil {\n\t\tmmAuther.mock.t.Fatalf(\"GatewayMock.Auther mock is already set by Set\")\n\t}\n\n\tif mmAuther.defaultExpectation == nil {\n\t\tmmAuther.defaultExpectation = &GatewayMockAutherExpectation{}\n\t}\n\n\treturn mmAuther\n}", "func TestObjectsMeetReq(t *testing.T) {\n\tvar kr verifiable.StorageReader\n\tvar kw verifiable.StorageWriter\n\n\tvar m verifiable.MutatorService\n\n\tvar o verifiable.AuthorizationOracle\n\n\tkr = &memory.TransientStorage{}\n\tkw = &memory.TransientStorage{}\n\n\tkr = &bolt.Storage{}\n\tkw = &bolt.Storage{}\n\n\tkr = &badger.Storage{}\n\tkw = &badger.Storage{}\n\n\tm = &instant.Mutator{}\n\tm = (&batch.Mutator{}).MustCreate()\n\n\to = policy.Open\n\to = &policy.Static{}\n\n\tlog.Println(kr, kw, m, o) // \"use\" these so that go compiler will be quiet\n}", "func (mmInvoke *mContainerMockInvoke) Expect(function interface{}) *mContainerMockInvoke {\n\tif mmInvoke.mock.funcInvoke != nil {\n\t\tmmInvoke.mock.t.Fatalf(\"ContainerMock.Invoke mock is already set by Set\")\n\t}\n\n\tif mmInvoke.defaultExpectation == nil {\n\t\tmmInvoke.defaultExpectation = &ContainerMockInvokeExpectation{}\n\t}\n\n\tmmInvoke.defaultExpectation.params = &ContainerMockInvokeParams{function}\n\tfor _, e := range mmInvoke.expectations {\n\t\tif minimock.Equal(e.params, mmInvoke.defaultExpectation.params) {\n\t\t\tmmInvoke.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmInvoke.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmInvoke\n}", "func (mmGetPosition *mStoreMockGetPosition) Expect(account string, contractID string) *mStoreMockGetPosition {\n\tif mmGetPosition.mock.funcGetPosition != nil {\n\t\tmmGetPosition.mock.t.Fatalf(\"StoreMock.GetPosition mock is already set by Set\")\n\t}\n\n\tif mmGetPosition.defaultExpectation == nil {\n\t\tmmGetPosition.defaultExpectation = &StoreMockGetPositionExpectation{}\n\t}\n\n\tmmGetPosition.defaultExpectation.params = &StoreMockGetPositionParams{account, contractID}\n\tfor _, e := range mmGetPosition.expectations {\n\t\tif minimock.Equal(e.params, mmGetPosition.defaultExpectation.params) {\n\t\t\tmmGetPosition.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetPosition.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetPosition\n}", "func (mmGetAbandonedRequest *mClientMockGetAbandonedRequest) Expect(ctx context.Context, objectRef insolar.Reference, reqRef insolar.Reference) *mClientMockGetAbandonedRequest {\n\tif mmGetAbandonedRequest.mock.funcGetAbandonedRequest != nil {\n\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"ClientMock.GetAbandonedRequest mock is already set by Set\")\n\t}\n\n\tif mmGetAbandonedRequest.defaultExpectation == nil {\n\t\tmmGetAbandonedRequest.defaultExpectation = &ClientMockGetAbandonedRequestExpectation{}\n\t}\n\n\tmmGetAbandonedRequest.defaultExpectation.params = &ClientMockGetAbandonedRequestParams{ctx, objectRef, reqRef}\n\tfor _, e := range mmGetAbandonedRequest.expectations {\n\t\tif minimock.Equal(e.params, mmGetAbandonedRequest.defaultExpectation.params) {\n\t\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetAbandonedRequest.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetAbandonedRequest\n}", "func (mmSend *mSenderMockSend) Expect(ctx context.Context, email Email) *mSenderMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"SenderMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &SenderMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &SenderMockSendParams{ctx, email}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func callAndVerify(msg string, client pb.GreeterClient, shouldFail bool) error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\t_, err := client.SayHello(ctx, &pb.HelloRequest{Name: msg})\n\tif want, got := shouldFail == true, err != nil; got != want {\n\t\treturn fmt.Errorf(\"want and got mismatch, want shouldFail=%v, got fail=%v, rpc error: %v\", want, got, err)\n\t}\n\treturn nil\n}", "func (m *Mockrequester) EXPECT() *MockrequesterMockRecorder {\n\treturn m.recorder\n}", "func expectEqual(actual interface{}, extra interface{}, explain ...interface{}) {\n\tgomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)\n}", "func (m *MockstackDescriber) EXPECT() *MockstackDescriberMockRecorder {\n\treturn m.recorder\n}", "func (req *outgoingRequest) Assert(t *testing.T, fixture *fixture) {\n\tassert.Equal(t, req.path, fixture.calledPath, \"called path not as expected\")\n\tassert.Equal(t, req.method, fixture.calledMethod, \"called path not as expected\")\n\tassert.Equal(t, req.body, fixture.requestBody, \"call body no as expected\")\n}", "func (mmVerify *mDelegationTokenFactoryMockVerify) Expect(parcel mm_insolar.Parcel) *mDelegationTokenFactoryMockVerify {\n\tif mmVerify.mock.funcVerify != nil {\n\t\tmmVerify.mock.t.Fatalf(\"DelegationTokenFactoryMock.Verify mock is already set by Set\")\n\t}\n\n\tif mmVerify.defaultExpectation == nil {\n\t\tmmVerify.defaultExpectation = &DelegationTokenFactoryMockVerifyExpectation{}\n\t}\n\n\tmmVerify.defaultExpectation.params = &DelegationTokenFactoryMockVerifyParams{parcel}\n\tfor _, e := range mmVerify.expectations {\n\t\tif minimock.Equal(e.params, mmVerify.defaultExpectation.params) {\n\t\t\tmmVerify.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmVerify.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmVerify\n}", "func (mmRead *mDigestHolderMockRead) Expect(p []byte) *mDigestHolderMockRead {\n\tif mmRead.mock.funcRead != nil {\n\t\tmmRead.mock.t.Fatalf(\"DigestHolderMock.Read mock is already set by Set\")\n\t}\n\n\tif mmRead.defaultExpectation == nil {\n\t\tmmRead.defaultExpectation = &DigestHolderMockReadExpectation{}\n\t}\n\n\tmmRead.defaultExpectation.params = &DigestHolderMockReadParams{p}\n\tfor _, e := range mmRead.expectations {\n\t\tif minimock.Equal(e.params, mmRead.defaultExpectation.params) {\n\t\t\tmmRead.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmRead.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmRead\n}", "func (mmSend *mClientMockSend) Expect(ctx context.Context, n *Notification) *mClientMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"ClientMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &ClientMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &ClientMockSendParams{ctx, n}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func (mmAsByteString *mDigestHolderMockAsByteString) Expect() *mDigestHolderMockAsByteString {\n\tif mmAsByteString.mock.funcAsByteString != nil {\n\t\tmmAsByteString.mock.t.Fatalf(\"DigestHolderMock.AsByteString mock is already set by Set\")\n\t}\n\n\tif mmAsByteString.defaultExpectation == nil {\n\t\tmmAsByteString.defaultExpectation = &DigestHolderMockAsByteStringExpectation{}\n\t}\n\n\treturn mmAsByteString\n}", "func Expect(msg string) error {\n\tif msg != \"\" {\n\t\treturn errors.New(msg)\n\t} else {\n\t\treturn nil\n\t}\n}", "func (mmEncrypt *mRingMockEncrypt) Expect(t1 secrets.Text) *mRingMockEncrypt {\n\tif mmEncrypt.mock.funcEncrypt != nil {\n\t\tmmEncrypt.mock.t.Fatalf(\"RingMock.Encrypt mock is already set by Set\")\n\t}\n\n\tif mmEncrypt.defaultExpectation == nil {\n\t\tmmEncrypt.defaultExpectation = &RingMockEncryptExpectation{}\n\t}\n\n\tmmEncrypt.defaultExpectation.params = &RingMockEncryptParams{t1}\n\tfor _, e := range mmEncrypt.expectations {\n\t\tif minimock.Equal(e.params, mmEncrypt.defaultExpectation.params) {\n\t\t\tmmEncrypt.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmEncrypt.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmEncrypt\n}", "func (mmBootstrapper *mGatewayMockBootstrapper) Expect() *mGatewayMockBootstrapper {\n\tif mmBootstrapper.mock.funcBootstrapper != nil {\n\t\tmmBootstrapper.mock.t.Fatalf(\"GatewayMock.Bootstrapper mock is already set by Set\")\n\t}\n\n\tif mmBootstrapper.defaultExpectation == nil {\n\t\tmmBootstrapper.defaultExpectation = &GatewayMockBootstrapperExpectation{}\n\t}\n\n\treturn mmBootstrapper\n}", "func (m *MockNotary) EXPECT() *MockNotaryMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockSetSender) Expect(p insolar.Reference) *mParcelMockSetSender {\n\tm.mock.SetSenderFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockSetSenderExpectation{}\n\t}\n\tm.mainExpectation.input = &ParcelMockSetSenderInput{p}\n\treturn m\n}", "func (mmGetPacketType *mPacketParserMockGetPacketType) Expect() *mPacketParserMockGetPacketType {\n\tif mmGetPacketType.mock.funcGetPacketType != nil {\n\t\tmmGetPacketType.mock.t.Fatalf(\"PacketParserMock.GetPacketType mock is already set by Set\")\n\t}\n\n\tif mmGetPacketType.defaultExpectation == nil {\n\t\tmmGetPacketType.defaultExpectation = &PacketParserMockGetPacketTypeExpectation{}\n\t}\n\n\treturn mmGetPacketType\n}", "func (mmParsePacketBody *mPacketParserMockParsePacketBody) Expect() *mPacketParserMockParsePacketBody {\n\tif mmParsePacketBody.mock.funcParsePacketBody != nil {\n\t\tmmParsePacketBody.mock.t.Fatalf(\"PacketParserMock.ParsePacketBody mock is already set by Set\")\n\t}\n\n\tif mmParsePacketBody.defaultExpectation == nil {\n\t\tmmParsePacketBody.defaultExpectation = &PacketParserMockParsePacketBodyExpectation{}\n\t}\n\n\treturn mmParsePacketBody\n}", "func (mmAsBytes *mDigestHolderMockAsBytes) Expect() *mDigestHolderMockAsBytes {\n\tif mmAsBytes.mock.funcAsBytes != nil {\n\t\tmmAsBytes.mock.t.Fatalf(\"DigestHolderMock.AsBytes mock is already set by Set\")\n\t}\n\n\tif mmAsBytes.defaultExpectation == nil {\n\t\tmmAsBytes.defaultExpectation = &DigestHolderMockAsBytesExpectation{}\n\t}\n\n\treturn mmAsBytes\n}", "func (m *MockArticleLogic) EXPECT() *MockArticleLogicMockRecorder {\n\treturn m.recorder\n}", "func (mmKey *mIteratorMockKey) Expect() *mIteratorMockKey {\n\tif mmKey.mock.funcKey != nil {\n\t\tmmKey.mock.t.Fatalf(\"IteratorMock.Key mock is already set by Set\")\n\t}\n\n\tif mmKey.defaultExpectation == nil {\n\t\tmmKey.defaultExpectation = &IteratorMockKeyExpectation{}\n\t}\n\n\treturn mmKey\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *mOutboundMockCanAccept) Expect(p Inbound) *mOutboundMockCanAccept {\n\tm.mock.CanAcceptFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockCanAcceptExpectation{}\n\t}\n\tm.mainExpectation.input = &OutboundMockCanAcceptInput{p}\n\treturn m\n}", "func (m *MockLoaderFactory) EXPECT() *MockLoaderFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockPKG) EXPECT() *MockPKGMockRecorder {\n\treturn m.recorder\n}", "func (m *MockbucketDescriber) EXPECT() *MockbucketDescriberMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockType) Expect() *mParcelMockType {\n\tm.mock.TypeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockTypeExpectation{}\n\t}\n\n\treturn m\n}", "func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {\n\treturn m.recorder\n}", "func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {\n\treturn m.recorder\n}", "func (mmExchange *mMDNSClientMockExchange) Expect(msg *mdns.Msg, address string) *mMDNSClientMockExchange {\n\tif mmExchange.mock.funcExchange != nil {\n\t\tmmExchange.mock.t.Fatalf(\"MDNSClientMock.Exchange mock is already set by Set\")\n\t}\n\n\tif mmExchange.defaultExpectation == nil {\n\t\tmmExchange.defaultExpectation = &MDNSClientMockExchangeExpectation{}\n\t}\n\n\tmmExchange.defaultExpectation.params = &MDNSClientMockExchangeParams{msg, address}\n\tfor _, e := range mmExchange.expectations {\n\t\tif minimock.Equal(e.params, mmExchange.defaultExpectation.params) {\n\t\t\tmmExchange.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmExchange.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmExchange\n}", "func (m *MockStream) EXPECT() *MockStreamMockRecorder {\n\treturn m.recorder\n}", "func (c Chkr) Expect(v validator, args ...interface{}) {\n\tif c.runTest(v, args...) {\n\t\tc.Fail()\n\t}\n}", "func (mmClone *mStorageMockClone) Expect(ctx context.Context, from insolar.PulseNumber, to insolar.PulseNumber, keepActual bool) *mStorageMockClone {\n\tif mmClone.mock.funcClone != nil {\n\t\tmmClone.mock.t.Fatalf(\"StorageMock.Clone mock is already set by Set\")\n\t}\n\n\tif mmClone.defaultExpectation == nil {\n\t\tmmClone.defaultExpectation = &StorageMockCloneExpectation{}\n\t}\n\n\tmmClone.defaultExpectation.params = &StorageMockCloneParams{ctx, from, to, keepActual}\n\tfor _, e := range mmClone.expectations {\n\t\tif minimock.Equal(e.params, mmClone.defaultExpectation.params) {\n\t\t\tmmClone.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmClone.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmClone\n}", "func (m *MockCodeGenerator) EXPECT() *MockCodeGeneratorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockNodeAttestor) EXPECT() *MockNodeAttestorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockNodeAttestor) EXPECT() *MockNodeAttestorMockRecorder {\n\treturn m.recorder\n}", "func (_m *MockIStream) EXPECT() *MockIStreamMockRecorder {\n\treturn _m.recorder\n}", "func (m *mOutboundMockGetEndpointType) Expect() *mOutboundMockGetEndpointType {\n\tm.mock.GetEndpointTypeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockGetEndpointTypeExpectation{}\n\t}\n\n\treturn m\n}", "func (m *MockAZInfoProvider) EXPECT() *MockAZInfoProviderMockRecorder {\n\treturn m.recorder\n}" ]
[ "0.58157563", "0.5714918", "0.5672776", "0.5639812", "0.56273276", "0.5573085", "0.5567367", "0.5529613", "0.55066866", "0.5486919", "0.54729885", "0.54647803", "0.5460882", "0.54414886", "0.5440682", "0.5405729", "0.54035264", "0.53890616", "0.53831995", "0.53831995", "0.5369224", "0.53682834", "0.5358863", "0.5340405", "0.5338385", "0.5327707", "0.53230935", "0.53132576", "0.5307127", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.53035146", "0.5295391", "0.5295391", "0.5291368", "0.52822006", "0.52821374", "0.52767164", "0.5273333", "0.5273239", "0.5265769", "0.52593946", "0.52572596", "0.5256972", "0.52545565", "0.5249454", "0.52421427", "0.52410823", "0.5238541", "0.52360845", "0.5235068", "0.5227199", "0.5227038", "0.52227145", "0.52144563", "0.5212412", "0.52120364", "0.5211835", "0.5211705", "0.5208191", "0.5194654", "0.5190334", "0.51877177", "0.5187148", "0.5185659", "0.51827794", "0.51817787", "0.5175451", "0.51730126", "0.5169131", "0.5167294", "0.5162394", "0.51599216", "0.51597583", "0.5159494", "0.51442164", "0.51442164", "0.51442164", "0.5143891", "0.51437116", "0.51395434", "0.51341194", "0.5133995", "0.51337904", "0.51337904", "0.51298875", "0.5129523", "0.5128482", "0.5123544", "0.51224196", "0.51162475", "0.51162475", "0.51148367", "0.51146877", "0.51091874" ]
0.0
-1
DescribeLoadBalancer mocks base method
func (m *MockLoadBalance) DescribeLoadBalancer(region, lbID, name string) (*cloud.LoadBalanceObject, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DescribeLoadBalancer", region, lbID, name) ret0, _ := ret[0].(*cloud.LoadBalanceObject) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) DescribeLoadBalancerWithNs(ns, region, lbID, name string) (*cloud.LoadBalanceObject, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeLoadBalancerWithNs\", ns, region, lbID, name)\n\tret0, _ := ret[0].(*cloud.LoadBalanceObject)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalancerServiceIface) ListLoadBalancers(p *ListLoadBalancersParams) (*ListLoadBalancersResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListLoadBalancers\", p)\n\tret0, _ := ret[0].(*ListLoadBalancersResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (p *MockProvisionerClient) LoadBalancer(string) client.GenericLoadBalancerInterface {\n\treturn &MockLoadBalancerClient{}\n}", "func (p *MockProvisionerClient) LoadBalancer(string) client.GenericLoadBalancerInterface {\n\treturn &MockLoadBalancerClient{}\n}", "func (c *MockLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string) (*network.LoadBalancer, error) {\n\tfor _, lb := range c.LBs {\n\t\tif *lb.Name == loadBalancerName {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\treturn nil, nil\n}", "func CreateLoadBalancer(ctx context.Context, lbName, pipName string) (lb network.LoadBalancer, err error) {\n\tprobeName := \"probe\"\n\tfrontEndIPConfigName := \"fip\"\n\tbackEndAddressPoolName := \"backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", config.SubscriptionID(), config.GroupName())\n\n\tpip, err := GetPublicIP(ctx, pipName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlbClient := getLBClient()\n\tfuture, err := lbClient.CreateOrUpdate(ctx,\n\t\tconfig.GroupName(),\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tLocation: to.StringPtr(config.Location()),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Dynamic,\n\t\t\t\t\t\t\tPublicIPAddress: &pip,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolHTTP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tRequestPath: to.StringPtr(\"healthprobe.aspx\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"lbRule\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.Default,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tInboundNatRules: &[]network.InboundNatRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"natRule1\"),\n\t\t\t\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(21),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"natRule2\"),\n\t\t\t\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(23),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn lb, fmt.Errorf(\"cannot create load balancer: %v\", err)\n\t}\n\n\terr = future.WaitForCompletion(ctx, lbClient.Client)\n\tif err != nil {\n\t\treturn lb, fmt.Errorf(\"cannot get load balancer create or update future response: %v\", err)\n\t}\n\n\treturn future.Result(lbClient)\n}", "func (s *Service) CreateOrUpdate(ctx context.Context, spec azure.Spec) error {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid internal load balancer specification\")\n\t}\n\tklog.V(2).Infof(\"creating internal load balancer %s\", internalLBSpec.Name)\n\tprobeName := \"tcpHTTPSProbe\"\n\tfrontEndIPConfigName := \"controlplane-internal-lbFrontEnd\"\n\tbackEndAddressPoolName := \"controlplane-internal-backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", s.Scope.SubscriptionID, s.Scope.ClusterConfig.ResourceGroup)\n\tlbName := internalLBSpec.Name\n\n\tklog.V(2).Infof(\"getting subnet %s\", internalLBSpec.SubnetName)\n\tsubnetInterface, err := subnets.NewService(s.Scope).Get(ctx, &subnets.Spec{Name: internalLBSpec.SubnetName, VnetName: internalLBSpec.VnetName})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubnet, ok := subnetInterface.(network.Subnet)\n\tif !ok {\n\t\treturn errors.New(\"subnet Get returned invalid interface\")\n\t}\n\tklog.V(2).Infof(\"successfully got subnet %s\", internalLBSpec.SubnetName)\n\n\tfuture, err := s.Client.CreateOrUpdate(ctx,\n\t\ts.Scope.ClusterConfig.ResourceGroup,\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tSku: &network.LoadBalancerSku{Name: network.LoadBalancerSkuNameStandard},\n\t\t\tLocation: to.StringPtr(s.Scope.ClusterConfig.Location),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Static,\n\t\t\t\t\t\t\tSubnet: &subnet,\n\t\t\t\t\t\t\tPrivateIPAddress: to.StringPtr(internalLBSpec.IPAddress),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolTCP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"LBRuleHTTPS\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.LoadDistributionDefault,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create load balancer\")\n\t}\n\n\terr = future.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot get internal load balancer create or update future response\")\n\t}\n\n\t_, err = future.Result(s.Client)\n\tklog.V(2).Infof(\"successfully created internal load balancer %s\", internalLBSpec.Name)\n\treturn err\n}", "func (_m *ELBv2APIClient) DescribeLoadBalancers(_a0 context.Context, _a1 *elasticloadbalancingv2.DescribeLoadBalancersInput, _a2 ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error) {\n\t_va := make([]interface{}, len(_a2))\n\tfor _i := range _a2 {\n\t\t_va[_i] = _a2[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 *elasticloadbalancingv2.DescribeLoadBalancersOutput\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error)); ok {\n\t\treturn rf(_a0, _a1, _a2...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) *elasticloadbalancingv2.DescribeLoadBalancersOutput); ok {\n\t\tr0 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*elasticloadbalancingv2.DescribeLoadBalancersOutput)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockLoadBalancerServiceIface) ListNetscalerLoadBalancers(p *ListNetscalerLoadBalancersParams) (*ListNetscalerLoadBalancersResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListNetscalerLoadBalancers\", p)\n\tret0, _ := ret[0].(*ListNetscalerLoadBalancersResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestNonMatchingLBClass(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tlbClass := \"net.example/some-other-class\"\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerClass: &lbClass,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"Unexpected patch to a service\")\n\n\t\treturn true\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n}", "func (el *ELBV2Manager) describeLoadbalancers(marker *string, loadbalancers []*elbv2.LoadBalancer) ([]*elbv2.LoadBalancer, error) {\n\n\tinput := &elbv2.DescribeLoadBalancersInput{\n\t\tMarker: marker,\n\t}\n\n\tresp, err := el.client.DescribeLoadBalancers(input)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err).Error(\"could not describe elb instances\")\n\t\treturn nil, err\n\t}\n\n\tif loadbalancers == nil {\n\t\tloadbalancers = []*elbv2.LoadBalancer{}\n\t}\n\n\tloadbalancers = append(loadbalancers, resp.LoadBalancers...)\n\n\tif resp.NextMarker != nil {\n\t\treturn el.describeLoadbalancers(resp.NextMarker, loadbalancers)\n\t}\n\n\treturn loadbalancers, nil\n}", "func (mr *MockLoadBalanceMockRecorder) DescribeLoadBalancer(region, lbID, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeLoadBalancer\", reflect.TypeOf((*MockLoadBalance)(nil).DescribeLoadBalancer), region, lbID, name)\n}", "func HandleFullyPopulatedLoadbalancerCreationSuccessfully(t *testing.T, response string) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestJSONRequest(t, r, `{\n\t\t\t\"loadbalancer\": {\n\t\t\t\t\"admin_state_up\": true,\n\t\t\t\t\"flavor_id\": \"bba40eb2-ee8c-11e9-81b4-2a2ae2dbcce4\",\n\t\t\t\t\"listeners\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"default_pool\": {\n\t\t\t\t\t\t\t\"healthmonitor\": {\n\t\t\t\t\t\t\t\t\"delay\": 3,\n\t\t\t\t\t\t\t\t\"expected_codes\": \"200\",\n\t\t\t\t\t\t\t\t\"http_method\": \"GET\",\n\t\t\t\t\t\t\t\t\"max_retries\": 2,\n\t\t\t\t\t\t\t\t\"max_retries_down\": 3,\n\t\t\t\t\t\t\t\t\"name\": \"db\",\n\t\t\t\t\t\t\t\t\"timeout\": 1,\n\t\t\t\t\t\t\t\t\"type\": \"HTTP\",\n\t\t\t\t\t\t\t\t\"url_path\": \"/index.html\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"lb_algorithm\": \"ROUND_ROBIN\",\n\t\t\t\t\t\t\t\"members\": [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.51\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.52\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\"name\": \"Example pool\",\n\t\t\t\t\t\t\t\"protocol\": \"HTTP\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"l7policies\": [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"action\": \"REDIRECT_TO_URL\",\n\t\t\t\t\t\t\t\t\"name\": \"redirect-example.com\",\n\t\t\t\t\t\t\t\t\"redirect_url\": \"http://www.example.com\",\n\t\t\t\t\t\t\t\t\"rules\": [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"compare_type\": \"REGEX\",\n\t\t\t\t\t\t\t\t\t\t\"type\": \"PATH\",\n\t\t\t\t\t\t\t\t\t\t\"value\": \"/images*\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t],\n\t\t\t\t\t\t\"name\": \"redirect_listener\",\n\t\t\t\t\t\t\"protocol\": \"HTTP\",\n\t\t\t\t\t\t\"protocol_port\": 8080\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"name\": \"db_lb\",\n\t\t\t\t\"provider\": \"octavia\",\n\t\t\t\t\"tags\": [\n\t\t\t\t\t\"test\",\n\t\t\t\t\t\"stage\"\n\t\t\t\t],\n\t\t\t\t\"vip_address\": \"10.30.176.48\",\n\t\t\t\t\"vip_port_id\": \"2bf413c8-41a9-4477-b505-333d5cbe8b55\",\n\t\t\t\t\"vip_subnet_id\": \"9cedb85d-0759-4898-8a4b-fa5a5ea10086\"\n\t\t\t}\n\t\t}`)\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, response)\n\t})\n}", "func TestAddPool(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\ttwentyPool := mkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"})\n\t_, err := fixture.poolClient.Create(context.Background(), twentyPool, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestAllocOnInit(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.123\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.124\",\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.124\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n}", "func (c *MockAzureCloud) LoadBalancer() azure.LoadBalancersClient {\n\treturn c.LoadBalancersClient\n}", "func (lb *GetLoadbalancerInput) GetAllLoadbalancer() (GetLoadbalancerResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(lb.Cloud.Name)); status != true {\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetNetworks\")\n\t}\n\n\tswitch strings.ToLower(lb.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsess := (lb.Cloud.Client).(*session.Session)\n\n\t\t// authorizing further request\n\t\tauthinpt := new(auth.EstablishConnectionInput)\n\t\tauthinpt.Region = lb.Cloud.Region\n\t\tauthinpt.Session = sess\n\t\tauthinpt.Resource = \"elb12\"\n\t\tlbin := new(loadbalance.GetLoadbalancerInput)\n\t\tlbin.GetRaw = lb.Cloud.GetRaw\n\n\t\tswitch strings.ToLower(lb.Type) {\n\t\tcase \"classic\":\n\t\t\tresponse, lberr := lbin.GetAllClassicLb(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tcase \"application\":\n\t\t\tresponse, lberr := lbin.GetAllApplicationLb(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tcase \"\":\n\t\t\tresponse, lberr := lbin.GetAllLoadbalancer(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tdefault:\n\t\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(\"The loadbalancer type you entered is unknown to me\")\n\t\t}\n\n\tcase \"azure\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetLoadbalancers\")\n\t}\n}", "func (m *MockNetworkDescriber) APIServerLBName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLBName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockLoadBalancerServiceIface) CreateLoadBalancer(p *CreateLoadBalancerParams) (*CreateLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateLoadBalancer\", p)\n\tret0, _ := ret[0].(*CreateLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalancerServiceIface) GetLoadBalancerByName(name string, opts ...OptionFunc) (*LoadBalancer, int, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{name}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"GetLoadBalancerByName\", varargs...)\n\tret0, _ := ret[0].(*LoadBalancer)\n\tret1, _ := ret[1].(int)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (m *MockNetworkDescriber) OutboundLBName(arg0 string) string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OutboundLBName\", arg0)\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockLoadBalancerServiceIface) NewListLoadBalancersParams() *ListLoadBalancersParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListLoadBalancersParams\")\n\tret0, _ := ret[0].(*ListLoadBalancersParams)\n\treturn ret0\n}", "func (m *MockLoadBalancerServiceIface) UpdateLoadBalancer(p *UpdateLoadBalancerParams) (*UpdateLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateLoadBalancer\", p)\n\tret0, _ := ret[0].(*UpdateLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *Service) Get(ctx context.Context, spec azure.Spec) (interface{}, error) {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn network.LoadBalancer{}, errors.New(\"invalid internal load balancer specification\")\n\t}\n\t//lbName := fmt.Sprintf(\"%s-api-internallb\", s.Scope.Cluster.Name)\n\tlb, err := s.Client.Get(ctx, s.Scope.ClusterConfig.ResourceGroup, internalLBSpec.Name, \"\")\n\tif err != nil && azure.ResourceNotFound(err) {\n\t\treturn nil, errors.Wrapf(err, \"load balancer %s not found\", internalLBSpec.Name)\n\t} else if err != nil {\n\t\treturn lb, err\n\t}\n\treturn lb, nil\n}", "func (lb *GetLoadbalancerInput) GetLoadbalancers() (GetLoadbalancerResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(lb.Cloud.Name)); status != true {\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetNetworks\")\n\t}\n\n\tswitch strings.ToLower(lb.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// Gets the established session so that it can carry out the process in cloud\n\t\tsess := (lb.Cloud.Client).(*session.Session)\n\n\t\t//authorizing to request further\n\t\tauthinpt := new(auth.EstablishConnectionInput)\n\t\tauthinpt.Region = lb.Cloud.Region\n\t\tauthinpt.Session = sess\n\t\tswitch strings.ToLower(lb.Type) {\n\t\tcase \"classic\":\n\t\t\tauthinpt.Resource = \"elb\"\n\t\tcase \"application\":\n\t\t\tauthinpt.Resource = \"elb2\"\n\t\t}\n\n\t\tlbin := new(loadbalance.GetLoadbalancerInput)\n\t\tlbin.GetRaw = lb.Cloud.GetRaw\n\t\tlbin.LbNames = lb.LbNames\n\t\tlbin.LbArns = lb.LbArns\n\t\tlbin.Type = lb.Type\n\t\tresponse, lberr := lbin.Getloadbalancers(*authinpt)\n\t\tif lberr != nil {\n\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t}\n\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetLoadbalancers\")\n\t}\n}", "func (m *MockNetworkDescriber) APIServerLBPoolName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLBPoolName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestRequestIPs(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.20\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.10.20\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.20'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tfirst := false\n\t\tsecond := false\n\t\tthird := false\n\n\t\tfor _, ingress := range svc.Status.LoadBalancer.Ingress {\n\t\t\tswitch ingress.IP {\n\t\t\tcase \"10.0.10.21\":\n\t\t\t\tfirst = true\n\t\t\tcase \"10.0.10.22\":\n\t\t\t\tsecond = true\n\t\t\tcase \"10.0.10.23\":\n\t\t\t\tthird = true\n\t\t\tdefault:\n\t\t\t\tt.Error(\"Unexpected ingress IP\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif !first {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.21'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !second {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.22'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !third {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.23'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tciliumSvcLBIPSAnnotation: \"10.0.10.22,10.0.10.23\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-c\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Reason != \"already_allocated\" {\n\t\t\tt.Error(\"Expected condition reason to be 'already_allocated'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// request an already allocated IP\n\tserviceC := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-c\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceCUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceC, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (_m *Client) GetLoadBalancers(ctx context.Context, rgname string, logger log.Logger) ([]network.LoadBalancer, error) {\n\tret := _m.Called(ctx, rgname, logger)\n\n\tvar r0 []network.LoadBalancer\n\tif rf, ok := ret.Get(0).(func(context.Context, string, log.Logger) []network.LoadBalancer); ok {\n\t\tr0 = rf(ctx, rgname, logger)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]network.LoadBalancer)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string, log.Logger) error); ok {\n\t\tr1 = rf(ctx, rgname, logger)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (c *MockLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) {\n\tvar l []network.LoadBalancer\n\tfor _, lb := range c.LBs {\n\t\tl = append(l, lb)\n\t}\n\treturn l, nil\n}", "func (m *MockClusterScoper) OutboundLBName(arg0 string) string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OutboundLBName\", arg0)\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {\n\tctx, cancel := getContextWithCancel()\n\tdefer cancel()\n\n\tlb = cleanupSubnetInFrontendIPConfigurations(&lb)\n\n\trgName := az.getLoadBalancerResourceGroup()\n\trerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, pointer.StringDeref(lb.Name, \"\"), lb, pointer.StringDeref(lb.Etag, \"\"))\n\tklog.V(10).Infof(\"LoadBalancerClient.CreateOrUpdate(%s): end\", *lb.Name)\n\tif rerr == nil {\n\t\t// Invalidate the cache right after updating\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t\treturn nil\n\t}\n\n\tlbJSON, _ := json.Marshal(lb)\n\tklog.Warningf(\"LoadBalancerClient.CreateOrUpdate(%s) failed: %v, LoadBalancer request: %s\", pointer.StringDeref(lb.Name, \"\"), rerr.Error(), string(lbJSON))\n\n\t// Invalidate the cache because ETAG precondition mismatch.\n\tif rerr.HTTPStatusCode == http.StatusPreconditionFailed {\n\t\tklog.V(3).Infof(\"LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed\", pointer.StringDeref(lb.Name, \"\"))\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\tretryErrorMessage := rerr.Error().Error()\n\t// Invalidate the cache because another new operation has canceled the current request.\n\tif strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) {\n\t\tklog.V(3).Infof(\"LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation\", pointer.StringDeref(lb.Name, \"\"))\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\t// The LB update may fail because the referenced PIP is not in the Succeeded provisioning state\n\tif strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(consts.ReferencedResourceNotProvisionedMessageCode)) {\n\t\tmatches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage)\n\t\tif len(matches) != 3 {\n\t\t\tklog.Errorf(\"Failed to parse the retry error message %s\", retryErrorMessage)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\tpipRG, pipName := matches[1], matches[2]\n\t\tklog.V(3).Infof(\"The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it\", pipName, pointer.StringDeref(lb.Name, \"\"))\n\t\tpip, _, err := az.getPublicIPAddress(pipRG, pipName, azcache.CacheReadTypeDefault)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to get the public IP %s in resource group %s: %v\", pipName, pipRG, err)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\t// Perform a dummy update to fix the provisioning state\n\t\terr = az.CreateOrUpdatePIP(service, pipRG, pip)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to update the public IP %s in resource group %s: %v\", pipName, pipRG, err)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\t// Invalidate the LB cache, return the error, and the controller manager\n\t\t// would retry the LB update in the next reconcile loop\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\treturn rerr.Error()\n}", "func TestRequestIPWithMismatchedLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"color\": \"red\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\t\tif svc.Status.Conditions[0].Reason != \"pool_selector_mismatch\" {\n\t\t\tt.Error(\"Expected service to receive 'pool_selector_mismatch' condition\")\n\t\t}\n\n\t\treturn true\n\t}, 1*time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected status update of service\")\n\t}\n}", "func TestAddRange(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (m *MockClusterScoper) APIServerLBName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLBName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func NewMocklbDescriber(ctrl *gomock.Controller) *MocklbDescriber {\n\tmock := &MocklbDescriber{ctrl: ctrl}\n\tmock.recorder = &MocklbDescriberMockRecorder{mock}\n\treturn mock\n}", "func TestDisablePool(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\tpoolA.Spec.Disabled = true\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].externallyDisabled {\n\t\tt.Fatal(\"The range has not been externally disabled\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected service status update to occur on service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolA.Spec.Disabled = false\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestGetAllBackendServer(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\t// bs is nil\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\t// get bs\n\tbs, err = cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 2 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}", "func TestPoolDelete(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tmkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tvar allocPool string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.10\") {\n\t\t\tallocPool = \"pool-a\"\n\t\t} else {\n\t\t\tallocPool = \"pool-b\"\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\t<-initDone\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.10\") {\n\t\t\tif allocPool == \"pool-a\" {\n\t\t\t\tt.Error(\"New IP was allocated from deleted pool\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif allocPool == \"pool-b\" {\n\t\t\t\tt.Error(\"New IP was allocated from deleted pool\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\terr := fixture.poolClient.Delete(context.Background(), allocPool, meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func NewLoadBalancerCollector(logger log.Logger, client *hcloud.Client, failures *prometheus.CounterVec, duration *prometheus.HistogramVec, cfg config.Target) *LoadBalancerCollector {\n\tif failures != nil {\n\t\tfailures.WithLabelValues(\"load_balancer\").Add(0)\n\t}\n\n\tlabels := []string{\"id\", \"name\", \"datacenter\"}\n\treturn &LoadBalancerCollector{\n\t\tclient: client,\n\t\tlogger: log.With(logger, \"collector\", \"load-balancer\"),\n\t\tfailures: failures,\n\t\tduration: duration,\n\t\tconfig: cfg,\n\n\t\tCreated: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_created_timestamp\",\n\t\t\t\"Timestamp when the load balancer have been created\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tServices: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_services\",\n\t\t\t\"The number of configured services\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxServices: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_services\",\n\t\t\t\"The maximum number of services that can be configured\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargets: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets\",\n\t\t\t\"The number of targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxTargets: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_targets\",\n\t\t\t\"The maximum number of targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsHealthy: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_healthy\",\n\t\t\t\"The number of healthy targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsUnhealthy: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_unhealthy\",\n\t\t\t\"The number of unhealthy targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsUnknown: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_unknown\",\n\t\t\t\"The number of unknown targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tAssignedCertificates: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_assigned_certificates\",\n\t\t\t\"The number of assigned certificates\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxAssignedCertificates: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_assigned_certificates\",\n\t\t\t\"The maximum number of certificates that can be assigned\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIngoingTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_ingoing_traffic\",\n\t\t\t\"The total amount of ingoing traffic in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutgoingTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_outgoing_traffic\",\n\t\t\t\"The total amount of outgoing traffic in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIncludedTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_included_traffic\",\n\t\t\t\"The amount of traffic that is included for the load balancer type in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tConnections: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections\",\n\t\t\t\"The number of open connections\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxConnections: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_open_connections\",\n\t\t\t\"The maximum number of open connections\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tConnectionsPerSecond: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_connections_per_second\",\n\t\t\t\"The number of new connections per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tRequestsPerSecond: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_requests_per_second\",\n\t\t\t\"The number of requests per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIncomingBandwidth: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections_bandwidth_in\",\n\t\t\t\"The incoming bandwidth in bytes per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutgoingBandwidth: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections_bandwidth_out\",\n\t\t\t\"The outgoing bandwidth in bytes per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t}\n}", "func (bc *Baiducloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer(%v, %v, %v, %v, %v)\",\n\t\tclusterName, service.Namespace, service.Name, bc.Region, service.Spec.LoadBalancerIP, service.Spec.Ports, service.Annotations)\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = bc.validateService(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure BLB\n\tlb, err := bc.ensureBLB(ctx, clusterName, service, nodes, result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif result.LoadBalancerInternalVpc == \"true\" {\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer: use LoadBalancerInternalVpc, EXTERNAL-IP is %s\", service.Namespace, service.Name, lb.Address)\n\t\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: lb.Address}}}, nil\n\t}\n\n\t// ensure EIP\n\tpubIP, err := bc.ensureEIP(ctx, clusterName, service, nodes, result, lb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer: EXTERNAL-IP is %s\", service.Namespace, service.Name, pubIP)\n\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: pubIP}}}, nil\n}", "func (m *MockNetworkDescriber) APIServerLB() *v1beta1.LoadBalancerSpec {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLB\")\n\tret0, _ := ret[0].(*v1beta1.LoadBalancerSpec)\n\treturn ret0\n}", "func (m *MockClusterScoper) APIServerLBPoolName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLBPoolName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestServiceDelete(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tvar svcIP string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tsvcIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been allocated\")\n\t}\n\n\terr := fixture.svcClient.Services(\"default\").Delete(context.Background(), \"service-a\", meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been released\")\n\t}\n}", "func TestReallocOnInit(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"192.168.1.12\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP == \"192.168.1.12\" {\n\t\t\tt.Error(\"Expected ingress IP to not be the initial, bad IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n}", "func (m *MocklbDescriber) DescribeRule(arg0 context.Context, arg1 string) (elbv2.Rule, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeRule\", arg0, arg1)\n\tret0, _ := ret[0].(elbv2.Rule)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func desiredLoadBalancerService(ci *operatorv1.IngressController, deploymentRef metav1.OwnerReference, platform *configv1.PlatformStatus) (bool, *corev1.Service, error) {\n\tif ci.Status.EndpointPublishingStrategy.Type != operatorv1.LoadBalancerServiceStrategyType {\n\t\treturn false, nil, nil\n\t}\n\tservice := manifests.LoadBalancerService()\n\n\tname := controller.LoadBalancerServiceName(ci)\n\n\tservice.Namespace = name.Namespace\n\tservice.Name = name.Name\n\n\tif service.Labels == nil {\n\t\tservice.Labels = map[string]string{}\n\t}\n\tservice.Labels[\"router\"] = name.Name\n\tservice.Labels[manifests.OwningIngressControllerLabel] = ci.Name\n\n\tservice.Spec.Selector = controller.IngressControllerDeploymentPodSelector(ci).MatchLabels\n\n\tlb := ci.Status.EndpointPublishingStrategy.LoadBalancer\n\tisInternal := lb != nil && lb.Scope == operatorv1.InternalLoadBalancer\n\n\tif service.Annotations == nil {\n\t\tservice.Annotations = map[string]string{}\n\t}\n\n\tproxyNeeded, err := IsProxyProtocolNeeded(ci, platform)\n\tif err != nil {\n\t\treturn false, nil, fmt.Errorf(\"failed to determine if proxy protocol is proxyNeeded for ingresscontroller %q: %v\", ci.Name, err)\n\t}\n\n\tif platform != nil {\n\t\tif isInternal {\n\t\t\tannotation := InternalLBAnnotations[platform.Type]\n\t\t\tfor name, value := range annotation {\n\t\t\t\tservice.Annotations[name] = value\n\t\t\t}\n\n\t\t\t// Set the GCP Global Access annotation for internal load balancers on GCP only\n\t\t\tif platform.Type == configv1.GCPPlatformType {\n\t\t\t\tif lb != nil && lb.ProviderParameters != nil &&\n\t\t\t\t\tlb.ProviderParameters.Type == operatorv1.GCPLoadBalancerProvider &&\n\t\t\t\t\tlb.ProviderParameters.GCP != nil {\n\t\t\t\t\tglobalAccessEnabled := lb.ProviderParameters.GCP.ClientAccess == operatorv1.GCPGlobalAccess\n\t\t\t\t\tservice.Annotations[GCPGlobalAccessAnnotation] = strconv.FormatBool(globalAccessEnabled)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tannotation := externalLBAnnotations[platform.Type]\n\t\t\tfor name, value := range annotation {\n\t\t\t\tservice.Annotations[name] = value\n\t\t\t}\n\t\t}\n\t\tswitch platform.Type {\n\t\tcase configv1.AWSPlatformType:\n\t\t\tservice.Annotations[awsLBHealthCheckIntervalAnnotation] = awsLBHealthCheckIntervalDefault\n\t\t\tif proxyNeeded {\n\t\t\t\tservice.Annotations[awsLBProxyProtocolAnnotation] = \"*\"\n\t\t\t}\n\t\t\tif lb != nil && lb.ProviderParameters != nil {\n\t\t\t\tif aws := lb.ProviderParameters.AWS; aws != nil && lb.ProviderParameters.Type == operatorv1.AWSLoadBalancerProvider {\n\t\t\t\t\tswitch aws.Type {\n\t\t\t\t\tcase operatorv1.AWSNetworkLoadBalancer:\n\t\t\t\t\t\tservice.Annotations[AWSLBTypeAnnotation] = AWSNLBAnnotation\n\t\t\t\t\t\t// NLBs require a different health check interval than CLBs.\n\t\t\t\t\t\t// See <https://bugzilla.redhat.com/show_bug.cgi?id=1908758>.\n\t\t\t\t\t\tservice.Annotations[awsLBHealthCheckIntervalAnnotation] = awsLBHealthCheckIntervalNLB\n\t\t\t\t\tcase operatorv1.AWSClassicLoadBalancer:\n\t\t\t\t\t\tif aws.ClassicLoadBalancerParameters != nil {\n\t\t\t\t\t\t\tif v := aws.ClassicLoadBalancerParameters.ConnectionIdleTimeout; v.Duration > 0 {\n\t\t\t\t\t\t\t\tservice.Annotations[awsELBConnectionIdleTimeoutAnnotation] = strconv.FormatUint(uint64(v.Round(time.Second).Seconds()), 10)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif platform.AWS != nil && len(platform.AWS.ResourceTags) > 0 {\n\t\t\t\tvar additionalTags []string\n\t\t\t\tfor _, userTag := range platform.AWS.ResourceTags {\n\t\t\t\t\tif len(userTag.Key) > 0 {\n\t\t\t\t\t\tadditionalTags = append(additionalTags, userTag.Key+\"=\"+userTag.Value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(additionalTags) > 0 {\n\t\t\t\t\tservice.Annotations[awsLBAdditionalResourceTags] = strings.Join(additionalTags, \",\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Set the load balancer for AWS to be as aggressive as Azure (2 fail @ 5s interval, 2 healthy)\n\t\t\tservice.Annotations[awsLBHealthCheckTimeoutAnnotation] = awsLBHealthCheckTimeoutDefault\n\t\t\tservice.Annotations[awsLBHealthCheckUnhealthyThresholdAnnotation] = awsLBHealthCheckUnhealthyThresholdDefault\n\t\t\tservice.Annotations[awsLBHealthCheckHealthyThresholdAnnotation] = awsLBHealthCheckHealthyThresholdDefault\n\t\tcase configv1.IBMCloudPlatformType, configv1.PowerVSPlatformType:\n\t\t\t// Set ExternalTrafficPolicy to type Cluster - IBM's LoadBalancer impl is created within the cluster.\n\t\t\t// LB places VIP on one of the worker nodes, using keepalived to maintain the VIP and ensuring redundancy\n\t\t\t// LB relies on iptable rules kube-proxy puts in to send traffic from the VIP node to the cluster\n\t\t\t// If policy is local, traffic is only sent to pods on the local node, as such Cluster enables traffic to flow to all the pods in the cluster\n\t\t\tservice.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeCluster\n\t\t\tif proxyNeeded {\n\t\t\t\tservice.Annotations[iksLBEnableFeaturesAnnotation] = iksLBEnableFeaturesProxyProtocol\n\t\t\t}\n\n\t\tcase configv1.AlibabaCloudPlatformType:\n\t\t\tif !isInternal {\n\t\t\t\tservice.Annotations[alibabaCloudLBAddressTypeAnnotation] = alibabaCloudLBAddressTypeInternet\n\t\t\t}\n\t\t}\n\t\t// Azure load balancers are not customizable and are set to (2 fail @ 5s interval, 2 healthy)\n\t\t// GCP load balancers are not customizable and are set to (3 fail @ 8s interval, 1 healthy)\n\n\t\tif v, err := shouldUseLocalWithFallback(ci, service); err != nil {\n\t\t\treturn true, service, err\n\t\t} else if v {\n\t\t\tservice.Annotations[localWithFallbackAnnotation] = \"\"\n\t\t}\n\t}\n\n\tif ci.Spec.EndpointPublishingStrategy != nil {\n\t\tlb := ci.Spec.EndpointPublishingStrategy.LoadBalancer\n\t\tif lb != nil && len(lb.AllowedSourceRanges) > 0 {\n\t\t\tcidrs := make([]string, len(lb.AllowedSourceRanges))\n\t\t\tfor i, cidr := range lb.AllowedSourceRanges {\n\t\t\t\tcidrs[i] = string(cidr)\n\t\t\t}\n\t\t\tservice.Spec.LoadBalancerSourceRanges = cidrs\n\t\t}\n\t}\n\n\tservice.SetOwnerReferences([]metav1.OwnerReference{deploymentRef})\n\treturn true, service, nil\n}", "func (c *MockLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName, loadBalancerName string, parameters network.LoadBalancer) error {\n\tif _, ok := c.LBs[loadBalancerName]; ok {\n\t\treturn nil\n\t}\n\tparameters.Name = &loadBalancerName\n\tc.LBs[loadBalancerName] = parameters\n\treturn nil\n}", "func TestRandomLBWhenNodeFailBalanced(t *testing.T) {\n\tdefer func() {\n\t\t// clear healthStore\n\t\thealthStore = sync.Map{}\n\t}()\n\n\tpool := makePool(4)\n\tvar hosts []types.Host\n\tvar unhealthyIdx = 2\n\tfor i := 0; i < 4; i++ {\n\t\thost := &mockHost{\n\t\t\taddr: pool.Get(),\n\t\t}\n\t\tif i == unhealthyIdx {\n\t\t\thost.SetHealthFlag(api.FAILED_ACTIVE_HC)\n\t\t}\n\t\thosts = append(hosts, host)\n\t}\n\n\ths := &hostSet{}\n\ths.setFinalHost(hosts)\n\tlb := newRandomLoadBalancer(nil, hs)\n\ttotal := 1000000\n\trunCase := func(subTotal int) {\n\t\tresults := map[string]int{}\n\t\tfor i := 0; i < subTotal; i++ {\n\t\t\th := lb.ChooseHost(nil)\n\t\t\tv, ok := results[h.AddressString()]\n\t\t\tif !ok {\n\t\t\t\tv = 0\n\t\t\t}\n\t\t\tresults[h.AddressString()] = v + 1\n\t\t}\n\t\tfor i := 0; i < 4; i++ {\n\t\t\taddr := hosts[i].AddressString()\n\t\t\trate := float64(results[addr]) / float64(subTotal)\n\t\t\texpected := 0.33333\n\t\t\tif i == unhealthyIdx {\n\t\t\t\texpected = 0.000\n\t\t\t}\n\t\t\tif math.Abs(rate-expected) > 0.1 { // no lock, have deviation 10% is acceptable\n\t\t\t\tt.Errorf(\"%s request rate is %f, expected %f\", addr, rate, expected)\n\t\t\t}\n\t\t\tt.Logf(\"%s request rate is %f, request count: %d\", addr, rate, results[addr])\n\t\t}\n\t}\n\t// simple test\n\trunCase(total)\n\t// concurr\n\twg := sync.WaitGroup{}\n\tsubTotal := total / 10\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\trunCase(subTotal)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}", "func TestRemoveServiceLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"blue\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive exactly zero ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Labels = map[string]string{\n\t\t\"color\": \"green\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func createLBServiceWithIngressIP(cs kubernetes.Interface, namespace, name string, protocol v1.IPFamily, annotations, selector map[string]string, port int32, tweak ...func(svc *v1.Service)) *v1.Service {\n\tsvc := &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: selector,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t\tPort: port,\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilies: []v1.IPFamily{protocol},\n\t\t},\n\t}\n\n\tfor _, f := range tweak {\n\t\tf(svc)\n\t}\n\n\tsvc, err := cs.CoreV1().Services(namespace).Create(context.TODO(), svc, metav1.CreateOptions{})\n\tframework.ExpectNoError(err, \"failed to create loadbalancer service\")\n\n\tgomega.Eventually(func() error {\n\t\tsvc, err = cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn fmt.Errorf(\"expected 1 lb ingress ip, got %v as ips\", svc.Status.LoadBalancer.Ingress)\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress[0].IP) == 0 {\n\t\t\treturn fmt.Errorf(\"expected lb ingress to be set\")\n\t\t}\n\n\t\treturn nil\n\t}, 5*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred(), \"failed to set loadbalancer's ingress ip\")\n\n\treturn svc\n}", "func HandleLoadbalancerGetSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"GET\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\n\t\tfmt.Fprintf(w, SingleLoadbalancerBody)\n\t})\n}", "func createLBServiceWithIngressIP(cs kubernetes.Interface, namespace, name string, protocol v1.IPFamily, selector map[string]string, port int32, tweak ...func(svc *v1.Service)) *v1.Service {\n\tsvc := &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: selector,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t\tPort: port,\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilies: []v1.IPFamily{protocol},\n\t\t},\n\t}\n\n\tfor _, f := range tweak {\n\t\tf(svc)\n\t}\n\n\tsvc, err := cs.CoreV1().Services(namespace).Create(context.TODO(), svc, metav1.CreateOptions{})\n\tframework.ExpectNoError(err, \"failed to create loadbalancer service\")\n\n\tgomega.Eventually(func() error {\n\t\tsvc, err = cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn fmt.Errorf(\"expected 1 lb ingress ip, got %v as ips\", svc.Status.LoadBalancer.Ingress)\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress[0].IP) == 0 {\n\t\t\treturn fmt.Errorf(\"expected lb ingress to be set\")\n\t\t}\n\n\t\treturn nil\n\t}, 5*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred(), \"failed to set loadbalancer's ingress ip\")\n\n\treturn svc\n}", "func (m *MockLoadBalancerServiceIface) NewListNetscalerLoadBalancersParams() *ListNetscalerLoadBalancersParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListNetscalerLoadBalancersParams\")\n\tret0, _ := ret[0].(*ListNetscalerLoadBalancersParams)\n\treturn ret0\n}", "func TestChangePoolSelector(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\"color\": \"red\"},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"color\": \"red\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive exactly zero ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolA.Spec.ServiceSelector.MatchLabels = map[string]string{\"color\": \"green\"}\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (m *MockLoadBalancerServiceIface) ConfigureNetscalerLoadBalancer(p *ConfigureNetscalerLoadBalancerParams) (*NetscalerLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ConfigureNetscalerLoadBalancer\", p)\n\tret0, _ := ret[0].(*NetscalerLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (l *SharedLoadBalancer) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tklog.Infof(\"EnsureLoadBalancer: called with service %s/%s, node: %d\",\n\t\tservice.Namespace, service.Name, len(nodes))\n\n\tif err := ensureLoadBalancerValidation(service, nodes); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get exits or create a new ELB instance\n\tloadbalancer, err := l.getLoadBalancerInstance(ctx, clusterName, service)\n\tspecifiedID := getStringFromSvsAnnotation(service, ElbID, \"\")\n\tif common.IsNotFound(err) && specifiedID != \"\" {\n\t\treturn nil, err\n\t}\n\tif err != nil && common.IsNotFound(err) {\n\t\tsubnetID, e := l.getSubnetID(service, nodes[0])\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tloadbalancer, err = l.createLoadbalancer(clusterName, subnetID, service)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// query ELB listeners list\n\tlisteners, err := l.sharedELBClient.ListListeners(&elbmodel.ListListenersRequest{LoadbalancerId: &loadbalancer.Id})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, port := range service.Spec.Ports {\n\t\tlistener := l.filterListenerByPort(listeners, service, port)\n\t\t// add or update listener\n\t\tif listener == nil {\n\t\t\tlistener, err = l.createListener(loadbalancer.Id, service, port)\n\t\t} else {\n\t\t\terr = l.updateListener(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlisteners = popListener(listeners, listener.Id)\n\n\t\t// query pool or create pool\n\t\tpool, err := l.getPool(loadbalancer.Id, listener.Id)\n\t\tif err != nil && common.IsNotFound(err) {\n\t\t\tpool, err = l.createPool(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add new members and remove the obsolete members.\n\t\tif err = l.addOrRemoveMembers(loadbalancer, service, pool, port, nodes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add or remove health monitor\n\t\tif err = l.ensureHealthCheck(loadbalancer.Id, pool, port, service, nodes[0]); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif specifiedID == \"\" {\n\t\t// All remaining listeners are obsolete, delete them\n\t\terr = l.deleteListeners(loadbalancer.Id, listeners)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tingressIP := loadbalancer.VipAddress\n\tpublicIPAddr, err := l.createOrAssociateEIP(loadbalancer, service)\n\tif err == nil {\n\t\tif publicIPAddr != \"\" {\n\t\t\tingressIP = publicIPAddr\n\t\t}\n\n\t\treturn &corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{{IP: ingressIP}},\n\t\t}, nil\n\t}\n\n\t// rollback\n\tklog.Errorf(\"rollback:failed to create the EIP, delete ELB instance created, error: %s\", err)\n\terrs := []error{err}\n\terr = l.EnsureLoadBalancerDeleted(ctx, clusterName, service)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\tklog.Errorf(\"rollback: error deleting ELB instance: %s\", err)\n\t}\n\treturn nil, errors.NewAggregate(errs)\n}", "func (m *MockLoadBalancerServiceIface) NewCreateLoadBalancerParams(algorithm string, instanceport int, name, networkid, scheme, sourceipaddressnetworkid string, sourceport int) *CreateLoadBalancerParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewCreateLoadBalancerParams\", algorithm, instanceport, name, networkid, scheme, sourceipaddressnetworkid, sourceport)\n\tret0, _ := ret[0].(*CreateLoadBalancerParams)\n\treturn ret0\n}", "func HandleLoadbalancerCreationSuccessfully(t *testing.T, response string) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestJSONRequest(t, r, `{\n\t\t\t\"loadbalancer\": {\n\t\t\t\t\"name\": \"db_lb\",\n\t\t\t\t\"vip_port_id\": \"2bf413c8-41a9-4477-b505-333d5cbe8b55\",\n\t\t\t\t\"vip_subnet_id\": \"9cedb85d-0759-4898-8a4b-fa5a5ea10086\",\n\t\t\t\t\"vip_address\": \"10.30.176.48\",\n\t\t\t\t\"flavor_id\": \"bba40eb2-ee8c-11e9-81b4-2a2ae2dbcce4\",\n\t\t\t\t\"provider\": \"haproxy\",\n\t\t\t\t\"admin_state_up\": true,\n\t\t\t\t\"tags\": [\"test\", \"stage\"]\n\t\t\t}\n\t\t}`)\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, response)\n\t})\n}", "func TestRemoveRequestedIP(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124,10.0.10.125\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 2 {\n\t\t\tt.Error(\"Expected service to receive exactly two ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Annotations = map[string]string{\n\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.123' to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.124' to be allocated\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.125\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.125' to be released\")\n\t}\n}", "func TestAllocHappyPath(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\", \"FF::0/48\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Unexpected condition type assigned to service\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Unexpected condition status assigned to service\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tsvc, err := fixture.svcClient.Services(\"default\").Get(context.Background(), \"service-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Switch to requesting an IPv6 address\n\tsvc.Spec.IPFamilies = []slim_core_v1.IPFamily{\n\t\tslim_core_v1.IPv6Protocol,\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\t// The second update allocates the new IPv6\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() != nil {\n\t\t\tt.Error(\"Expected service to receive a IPv6 address\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), svc, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update after update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\t// Allow time for additional events to fire\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsvc, err = fixture.svcClient.Services(\"default\").Get(context.Background(), \"service-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Switch back to requesting an IPv4 address\n\tsvc.Spec.IPFamilies = []slim_core_v1.IPFamily{\n\t\tslim_core_v1.IPv4Protocol,\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\t// The second update allocates the new IPv4\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), svc, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update after update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n}", "func (s *ClusterScope) LBSpecs() []azure.ResourceSpecGetter {\n\tspecs := []azure.ResourceSpecGetter{\n\t\t&loadbalancers.LBSpec{\n\t\t\t// API Server LB\n\t\t\tName: s.APIServerLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tSubnetName: s.ControlPlaneSubnet().Name,\n\t\t\tFrontendIPConfigs: s.APIServerLB().FrontendIPs,\n\t\t\tAPIServerPort: s.APIServerPort(),\n\t\t\tType: s.APIServerLB().Type,\n\t\t\tSKU: s.APIServerLB().SKU,\n\t\t\tRole: infrav1.APIServerRole,\n\t\t\tBackendPoolName: s.APIServerLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.APIServerLB().IdleTimeoutInMinutes,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t},\n\t}\n\n\t// Node outbound LB\n\tif s.NodeOutboundLB() != nil {\n\t\tspecs = append(specs, &loadbalancers.LBSpec{\n\t\t\tName: s.NodeOutboundLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tFrontendIPConfigs: s.NodeOutboundLB().FrontendIPs,\n\t\t\tType: s.NodeOutboundLB().Type,\n\t\t\tSKU: s.NodeOutboundLB().SKU,\n\t\t\tBackendPoolName: s.NodeOutboundLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.NodeOutboundLB().IdleTimeoutInMinutes,\n\t\t\tRole: infrav1.NodeOutboundRole,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t})\n\t}\n\n\t// Control Plane Outbound LB\n\tif s.ControlPlaneOutboundLB() != nil {\n\t\tspecs = append(specs, &loadbalancers.LBSpec{\n\t\t\tName: s.ControlPlaneOutboundLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tFrontendIPConfigs: s.ControlPlaneOutboundLB().FrontendIPs,\n\t\t\tType: s.ControlPlaneOutboundLB().Type,\n\t\t\tSKU: s.ControlPlaneOutboundLB().SKU,\n\t\t\tBackendPoolName: s.ControlPlaneOutboundLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.ControlPlaneOutboundLB().IdleTimeoutInMinutes,\n\t\t\tRole: infrav1.ControlPlaneOutboundRole,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t})\n\t}\n\n\treturn specs\n}", "func CreateLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, subnetID string) (*loadbalancers.LoadBalancer, error) {\n\tlbName := tools.RandomString(\"TESTACCT-\", 8)\n\tlbDescription := tools.RandomString(\"TESTACCT-DESC-\", 8)\n\n\tt.Logf(\"Attempting to create loadbalancer %s on subnet %s\", lbName, subnetID)\n\n\tcreateOpts := loadbalancers.CreateOpts{\n\t\tName: lbName,\n\t\tDescription: lbDescription,\n\t\tVipSubnetID: subnetID,\n\t\tAdminStateUp: gophercloud.Enabled,\n\t}\n\n\tlb, err := loadbalancers.Create(client, createOpts).Extract()\n\tif err != nil {\n\t\treturn lb, err\n\t}\n\n\tt.Logf(\"Successfully created loadbalancer %s on subnet %s\", lbName, subnetID)\n\tt.Logf(\"Waiting for loadbalancer %s to become active\", lbName)\n\n\tif err := WaitForLoadBalancerState(client, lb.ID, \"ACTIVE\"); err != nil {\n\t\treturn lb, err\n\t}\n\n\tt.Logf(\"LoadBalancer %s is active\", lbName)\n\n\tth.AssertEquals(t, lb.Name, lbName)\n\tth.AssertEquals(t, lb.Description, lbDescription)\n\tth.AssertEquals(t, lb.VipSubnetID, subnetID)\n\tth.AssertEquals(t, lb.AdminStateUp, true)\n\n\treturn lb, nil\n}", "func (m *MockLoadBalancerServiceIface) GetLoadBalancerID(name string, opts ...OptionFunc) (string, int, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{name}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"GetLoadBalancerID\", varargs...)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(int)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (p *provider) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn p.loadBalancer, true\n}", "func TestPoolSelectorBasic(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tselector := slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"red\",\n\t\t},\n\t}\n\tpoolA.Spec.ServiceSelector = &selector\n\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"red-service\" {\n\t\t\tt.Error(\"Expected update from 'red-service'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tmatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"red-service\",\n\t\t\tUID: serviceAUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"red\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Create(context.Background(), matchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"blue-service\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to not receive any ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tnonMatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"blue-service\",\n\t\t\tUID: serviceBUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"blue\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), nonMatchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func generateLoadBalancers(cr *svcapitypes.Service) []*svcsdk.LoadBalancer {\n\tloadBalancers := []*svcsdk.LoadBalancer{}\n\n\tif cr.Spec.ForProvider.LoadBalancers == nil {\n\t\treturn loadBalancers\n\t}\n\n\tfor _, loadBalancer := range cr.Spec.ForProvider.LoadBalancers {\n\t\tconvertedLB := &svcsdk.LoadBalancer{}\n\t\tconvertedLB.ContainerName = loadBalancer.ContainerName\n\t\tconvertedLB.ContainerPort = loadBalancer.ContainerPort\n\t\tconvertedLB.LoadBalancerName = loadBalancer.LoadBalancerName\n\t\tconvertedLB.TargetGroupArn = loadBalancer.TargetGroupARN\n\n\t\tloadBalancers = append(loadBalancers, convertedLB)\n\t}\n\treturn loadBalancers\n}", "func TestRangesFull(t *testing.T) {\n\tinitDone := make(chan struct{})\n\t// A single /32 can't be used to allocate since we always reserve 2 IPs,\n\t// the network and broadcast address, which in the case of a /32 means it is always full.\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.123/32\", \"FF::123/128\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-a\" {\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t}\n\n\t\tif svc.Name != \"service-b\" {\n\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected two service updates\")\n\t}\n}", "func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tlb, err := l.fetchLoadBalancer(ctx, clusterName, service)\n\tswitch err {\n\tcase nil:\n\t\t// continue\n\tcase LoadBalancerNotFound:\n\t\t// create LoadBalancer\n\t\tlb, err = l.createLoadBalancer(ctx, clusterName, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\t// any kind of Error\n\t\tklog.Errorf(\"error getting loadbalancer for service %s: %v\", service.Name, err)\n\t\treturn nil, err\n\t}\n\n\tif service.Spec.LoadBalancerIP != \"\" && service.Spec.LoadBalancerIP != lb.IP[0].IPAddress {\n\t\terr = l.deleteLoadBalancer(ctx, lb, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlb, err = l.createLoadBalancer(ctx, clusterName, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif lb.Status != scwlb.LbStatusReady {\n\t\treturn nil, LoadBalancerNotReady\n\t}\n\n\terr = l.updateLoadBalancer(ctx, lb, service, nodes)\n\tif err != nil {\n\t\tklog.Errorf(\"error updating loadbalancer for service %s: %v\", service.Name, err)\n\t\treturn nil, err\n\t}\n\n\tstatus := &v1.LoadBalancerStatus{}\n\tstatus.Ingress = make([]v1.LoadBalancerIngress, len(lb.IP))\n\tfor idx, ip := range lb.IP {\n\t\tif getUseHostname(service) {\n\t\t\tstatus.Ingress[idx].Hostname = ip.Reverse\n\t\t} else {\n\t\t\tstatus.Ingress[idx].IP = ip.IPAddress\n\t\t}\n\t}\n\n\treturn status, nil\n}", "func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn nil, false\n}", "func checkLoadBalancerServiceType(clientset kubernetes.Interface, log logger.Logger) error {\n\tconst (\n\t\ttestServiceName = \"kubernetes-test-service\"\n\t\ttestNamespace = \"default\"\n\t\twaitTimeoutSec = 300\n\t)\n\tlog.Infof(\"Creating test service %s/%s\", testServiceName, testNamespace)\n\tsvc, err := createTestLoadBalancer(testServiceName, testNamespace, clientset)\n\tif err != nil || svc == nil {\n\t\treturn errors.Wrap(err, \"error creating test service\")\n\t}\n\n\t// handle service deletion\n\tdefer func() {\n\t\tclientset.CoreV1().Services(testNamespace).Delete(context.TODO(), testServiceName, v1.DeleteOptions{})\n\t\tlog.Debugf(\"test service %s deleted\", testNamespace, testServiceName)\n\t}()\n\n\tlog.Infof(\"Checking LoadBalancer service type\")\n\tfor i := 1; i < waitTimeoutSec; i += 1 {\n\t\ttime.Sleep(time.Second)\n\t\ts, err := clientset.CoreV1().Services(testNamespace).Get(context.TODO(), testServiceName, v1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error getting test service\")\n\t\t}\n\t\tif loadBalancerProvisioned(s) {\n\t\t\treturn nil\n\t\t}\n\t\tcontinue\n\t}\n\treturn errors.New(\"Service with LoadBalancer didn't get ingress address\")\n}", "func (c *LoadBalancerClient) Details(uniqID string) (*LoadBalancer, error) {\n\tvar result LoadBalancer\n\tparams := LoadBalancerParams{UniqID: uniqID}\n\n\terr := c.Backend.CallIntoInterface(\"v1/Network/LoadBalancer/details\", params, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &result, nil\n}", "func (bc *Baiducloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn bc, true\n}", "func ExampleELB_DescribeLoadBalancers_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DescribeLoadBalancersInput{\n\t\tLoadBalancerNames: []*string{\n\t\t\taws.String(\"my-load-balancer\"),\n\t\t},\n\t}\n\n\tresult, err := svc.DescribeLoadBalancers(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeDependencyThrottleException:\n\t\t\t\tfmt.Println(elb.ErrCodeDependencyThrottleException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func ELBv2LoadBalancerList() (*elbv2.DescribeLoadBalancersOutput, error) {\n\tsvc := elbv2.New(session.New(), &aws.Config{\n\t\tRegion: aws.String(beego.AppConfig.String(\"awsRegion\")),\n\t})\n\n\tparams := &elbv2.DescribeLoadBalancersInput{}\n\n\tresp, err := svc.DescribeLoadBalancers(params)\n\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\tbeego.Error(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())\n\t\t\tif reqErr, ok := err.(awserr.RequestFailure); ok {\n\t\t\t\tbeego.Error(\n\t\t\t\t\treqErr.Code(),\n\t\t\t\t\treqErr.Message(),\n\t\t\t\t\treqErr.StatusCode(),\n\t\t\t\t\treqErr.RequestID(),\n\t\t\t\t)\n\t\t\t}\n\t\t} else {\n\t\t\tbeego.Debug(err.Error())\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (c *LoadBalancerCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Created\n\tch <- c.Services\n\tch <- c.MaxServices\n\tch <- c.Targets\n\tch <- c.MaxTargets\n\tch <- c.TargetsHealthy\n\tch <- c.TargetsUnhealthy\n\tch <- c.TargetsUnknown\n\tch <- c.AssignedCertificates\n\tch <- c.MaxAssignedCertificates\n\tch <- c.IngoingTraffic\n\tch <- c.OutgoingTraffic\n\tch <- c.IncludedTraffic\n\tch <- c.Connections\n\tch <- c.MaxConnections\n\tch <- c.ConnectionsPerSecond\n\tch <- c.RequestsPerSecond\n\tch <- c.IncomingBandwidth\n\tch <- c.OutgoingBandwidth\n}", "func (c *NSXClient) CreateLoadBalancer(opts LoadBalancerOptions) (string, bool, error) {\n\tctx := c.api.Context\n\tapi := c.api.ServicesApi\n\trouting := c.api.LogicalRoutingAndServicesApi\n\n\texistingServer, err := c.GetLoadBalancer(opts.Name)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\tif existingServer != nil {\n\t\treturn existingServer.IpAddress, true, nil\n\t}\n\n\tt0, resp, err := routing.ReadLogicalRouter(ctx, opts.Tier0)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"failed to read T0 router %s: %s\", opts.Tier0, errorString(resp, err))\n\t}\n\n\tt0Port, resp, err := routing.CreateLogicalRouterLinkPortOnTier0(ctx, manager.LogicalRouterLinkPortOnTier0{\n\t\tLogicalRouterId: t0.Id,\n\t\tDisplayName: \"lb-\" + opts.Name + \"-T1\",\n\t})\n\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create T0 Local router port %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tt1, resp, err := routing.CreateLogicalRouter(ctx, manager.LogicalRouter{\n\t\tRouterType: \"TIER1\",\n\t\tDisplayName: \"lb-\" + opts.Name,\n\t\tEdgeClusterId: t0.EdgeClusterId,\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create T1 router %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\t_, resp, err = routing.UpdateAdvertisementConfig(ctx, t1.Id, manager.AdvertisementConfig{\n\t\tAdvertiseLbVip: true,\n\t\tAdvertiseLbSnatIp: true,\n\t\tEnabled: true,\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to update advertisement config %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tc.Infof(\"Created T1 router %s/%s\", t1.DisplayName, t1.Id)\n\n\t_, resp, err = routing.CreateLogicalRouterLinkPortOnTier1(ctx, manager.LogicalRouterLinkPortOnTier1{\n\t\tLogicalRouterId: t1.Id,\n\t\tDisplayName: t0.DisplayName + \"-uplink\",\n\t\tLinkedLogicalRouterPortId: &common.ResourceReference{\n\t\t\tTargetType: \"LogicalPort\",\n\t\t\tTargetId: t0Port.Id,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"failed to link T1 (%s) to T0 (%s): %s\", t1.Id, t0Port.Id, errorString(resp, err))\n\t}\n\n\tgroup, err := c.CreateOrUpdateNSGroup(opts.Name, \"LogicalPort\", opts.MemberTags)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\tvar monitorID string\n\tif opts.Protocol == TCPProtocol {\n\t\tmonitorID, err = c.GetOrCreateTCPHealthCheck(opts.Ports[0])\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"unable to create tcp loadbalancer monitor: %v\", err)\n\t\t}\n\t} else {\n\t\tmonitorID, err = c.GetOrCreateHTTPHealthCheck(opts.Ports[0])\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"unable to create http loadbalancer monitor: %v\", err)\n\t\t}\n\t}\n\tpool, resp, err := api.CreateLoadBalancerPool(ctx, loadbalancer.LbPool{\n\t\tId: opts.Name,\n\t\tActiveMonitorIds: []string{monitorID},\n\t\tSnatTranslation: &loadbalancer.LbSnatTranslation{\n\t\t\tType_: \"LbSnatAutoMap\",\n\t\t},\n\t\tMemberGroup: &loadbalancer.PoolMemberGroup{\n\t\t\tGroupingObject: &common.ResourceReference{\n\t\t\t\tTargetType: \"NSGroup\",\n\t\t\t\tTargetId: group.Id,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create load balancer pool %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tip, err := c.AllocateIP(opts.IPPool)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to allocate VIP %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tserver, resp, err := api.CreateLoadBalancerVirtualServer(ctx, loadbalancer.LbVirtualServer{\n\t\tId: opts.Name,\n\t\tEnabled: true,\n\t\tIpAddress: ip,\n\t\tIpProtocol: opts.Protocol,\n\t\tPorts: opts.Ports,\n\t\tPoolId: pool.Id,\n\t})\n\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create virtual server %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tlb := loadbalancer.LbService{\n\t\tDisplayName: opts.Name,\n\t\tAttachment: &common.ResourceReference{\n\t\t\tTargetType: \"LogicalRouter\",\n\t\t\tTargetId: t1.Id,\n\t\t},\n\t\tEnabled: true,\n\t\tErrorLogLevel: \"INFO\",\n\t\tSize: \"SMALL\",\n\t\tVirtualServerIds: []string{server.Id},\n\t}\n\n\t_, resp, err = api.CreateLoadBalancerService(c.api.Context, lb)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create load balancer %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tc.Infof(\"Created LoadBalancer service: %s/%s\", server.Id, ip)\n\treturn ip, false, nil\n}", "func (bc *Baiducloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif len(result.CceAutoAddLoadBalancerId) == 0 {\n\t\treturn nil, false, nil\n\t}\n\tlb, exists, err := bc.getBCELoadBalancerById(result.CceAutoAddLoadBalancerId)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tif !exists {\n\t\treturn nil, false, nil\n\t}\n\n\tvar ip string\n\tif result.LoadBalancerInternalVpc == \"true\" {\n\t\tip = lb.Address // internal vpc ip\n\t} else {\n\t\tip = lb.PublicIp // EIP\n\t}\n\tglog.V(3).Infof(\"[%v %v] GetLoadBalancer ip: %s\", service.Namespace, service.Name, ip)\n\n\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: ip}}}, true, nil\n}", "func (r *serviceStatusResolver) LoadBalancer(ctx context.Context) (edgecluster.LoadBalancerStatusResolverContract, error) {\n\tif r.serviceStatus.LoadBalancer == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn r.resolverCreator.NewLoadBalancerStatusResolver(ctx, r.serviceStatus.LoadBalancer)\n}", "func (c *cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\tklog.V(4).Infof(\"LoadBalancer called\")\n\treturn c, true\n}", "func (m *MockLoadBalancerServiceIface) AddNetscalerLoadBalancer(p *AddNetscalerLoadBalancerParams) (*AddNetscalerLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddNetscalerLoadBalancer\", p)\n\tret0, _ := ret[0].(*AddNetscalerLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Test_testHealth(t *testing.T) {\n\tinitMetadata() // Used from metadata_test.go\n\n\tport := \"80\"\n\tunhealthyTHDs := []*elbv2.TargetHealthDescription{}\n\thealthyTHDs := []*elbv2.TargetHealthDescription{\n\t\t{\n\t\t\tHealthCheckPort: &port,\n\t\t},\n\t}\n\ttgArn := \"arn:1234\"\n\tcontainerID := \"123123412\"\n\tinvalidContainerID := \"111111\"\n\n\tsetupCache(\"123123412\", \"instance-123\", \"correct-lb-dnsname\", 1234, 9001, tgArn, unhealthyTHDs)\n\n\tt.Run(\"Should return STARTING because of unhealthy targets\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, unhealthyTHDs)\n\t\tvar previousStatus fargo.StatusType\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.STARTING\n\t\twantedNow := fargo.STARTING\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of healthy targets 1\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should fail gracefully\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.STARTING\n\t\twantedNow := fargo.UNKNOWN\n\n\t\tchange := determineNewEurekaStatus(invalidContainerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of eureka status\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, unhealthyTHDs)\n\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UP\n\t\twantedReg := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wantedReg {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wantedReg, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of healthy targets 2\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\n\t\tpreviousStatus := fargo.STARTING\n\t\teurekaStatus := fargo.STARTING\n\t\twantedReg := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wantedReg {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wantedReg, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n}", "func init() {\n\tbalancer.Register(orcaLBBuilder{})\n}", "func (c *cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn &loadbalancer{\n\t\tnamespace: c.namespace,\n\t\tkubevirt: c.kubevirt,\n\t}, true\n}", "func (m *MockLoadBalancerServiceIface) DeleteLoadBalancer(p *DeleteLoadBalancerParams) (*DeleteLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteLoadBalancer\", p)\n\tret0, _ := ret[0].(*DeleteLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (bc *Baiducloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\t// if annotation has error, then creation must be failed. So return nil to tell k8s lb has been deleted.\n\t\treturn nil\n\t}\n\tserviceName := getServiceName(service)\n\tif len(result.CceAutoAddLoadBalancerId) == 0 {\n\t\tglog.V(1).Infof(\"[%v %v] EnsureLoadBalancerDeleted: target load balancer not create successful. So, no need to delete BLB and EIP\", serviceName, clusterName)\n\t\treturn nil\n\t}\n\n\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: START lbId=%q\", serviceName, clusterName, result.CceAutoAddLoadBalancerId)\n\n\t// reconcile logic is capable of fully reconcile, so we can use this to delete\n\tservice.Spec.Ports = []v1.ServicePort{}\n\n\tlb, existsLb, err := bc.getBCELoadBalancerById(result.CceAutoAddLoadBalancerId)\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: getBCELoadBalancer : %s\", serviceName, clusterName, lb.BlbId)\n\tif err != nil {\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted get error: %s\", serviceName, clusterName, err.Error())\n\t\treturn err\n\t}\n\tif !existsLb {\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: target blb not exist\", serviceName, clusterName)\n\t\treturn nil\n\t}\n\n\tif len(result.LoadBalancerExistId) == 0 { //user does not assign the blbid in the annotation\n\t\t// start delete blb and eip, delete blb first\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: Start delete BLB: %s\", serviceName, clusterName, lb.BlbId)\n\t\targs := blb.DeleteLoadBalancerArgs{\n\t\t\tLoadBalancerId: lb.BlbId,\n\t\t}\n\t\terr = bc.clientSet.Blb().DeleteLoadBalancer(&args)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if result.LoadBalancerExistId == \"error_blb_has_been_used\" {\n\t\treturn nil\n\t} else {\n\t\t//get allListeners & delete Listeners\n\t\tallListeners, err := bc.getAllListeners(lb)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(allListeners) > 0 {\n\t\t\terr = bc.deleteListener(lb, allListeners)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t//get allServers & delete BackendServers\n\t\tallServers, err := bc.getAllBackendServer(lb)\n\t\tvar removeList []string\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, server := range allServers {\n\t\t\tremoveList = append(removeList, server.InstanceId)\n\t\t}\n\n\t\tif len(removeList) > 0 {\n\t\t\targs := blb.RemoveBackendServersArgs{\n\t\t\t\tLoadBalancerId: lb.BlbId,\n\t\t\t\tBackendServerList: removeList,\n\t\t\t}\n\t\t\terr = bc.clientSet.Blb().RemoveBackendServers(&args)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// annotation \"LoadBalancerInternalVpc\" exists\n\t\tif result.LoadBalancerInternalVpc == \"true\" { //do not assign the eip\n\t\t\tif service.Annotations != nil {\n\t\t\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t\t\t}\n\t\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: use LoadBalancerInternalVpc, no EIP to delete\", service.Namespace, service.Name)\n\t\t\t//todo recover eip for blb which has eip in the begin.\n\t\t\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: delete %v FINISH\", serviceName, clusterName, serviceName)\n\t\t\treturn nil\n\t\t}\n\n\t\t//annotation \"LoadBalancerIP\" exists\n\t\t//unbind eip & blb when user assigned the eip\n\t\tif len(service.Spec.LoadBalancerIP) != 0 { //use user’s eip, do not delete\n\t\t\tunbindArgs := eip.EipArgs{\n\t\t\t\tIp: service.Spec.LoadBalancerIP,\n\t\t\t}\n\t\t\t// just unbind, not delete\n\t\t\terr := bc.clientSet.Eip().UnbindEip(&unbindArgs)\n\t\t\tif err != nil {\n\t\t\t\tglog.V(3).Infof(\"Unbind Eip error : %s\", err.Error())\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\t//get targetEip\n\t\tvar targetEip string\n\t\tif len(service.Status.LoadBalancer.Ingress) != 0 { // P0: use service EXTERNAL_IP\n\t\t\ttargetEip = service.Status.LoadBalancer.Ingress[0].IP\n\t\t}\n\t\tif len(targetEip) == 0 { // P1: use BLB public ip\n\t\t\ttargetEip = lb.PublicIp\n\t\t}\n\t\t//users may unbind eip manually\n\t\tif len(targetEip) == 0 { // get none EIP\n\t\t\tglog.V(3).Infof(\"Eip does not exist, Delete completed \")\n\t\t\treturn nil\n\t\t}\n\n\t\t// blb if has eip in the begin\n\t\tif strings.Contains(lb.Desc, \"cce_auto_create_eip\") {\n\t\t\tglog.V(3).Infof(\"EnsureLoadBalancerDeleted: delete eip created by cce: %s\", lb.Desc)\n\t\t\tunbindArgs := eip.EipArgs{\n\t\t\t\tIp: targetEip,\n\t\t\t}\n\t\t\tlb.Desc = strings.TrimPrefix(lb.Desc, \"cce_auto_create_eip\")\n\t\t\tnewLbArg := blb.UpdateLoadBalancerArgs{\n\t\t\t\tLoadBalancerId: lb.BlbId,\n\t\t\t\tDesc: lb.Desc,\n\t\t\t\tName: lb.Name,\n\t\t\t}\n\t\t\terr = bc.clientSet.Blb().UpdateLoadBalancer(&newLbArg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// unbind & delete\n\t\t\terr := bc.clientSet.Eip().UnbindEip(&unbindArgs)\n\t\t\tif err != nil {\n\t\t\t\tglog.V(3).Infof(\"Unbind Eip error : %s\", err.Error())\n\t\t\t\tif strings.Contains(err.Error(), \"EipNotFound\") {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = bc.deleteEIP(targetEip)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif service.Annotations != nil {\n\t\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// delete EIP\n\tif result.LoadBalancerInternalVpc == \"true\" { //do not assign the eip\n\t\tif service.Annotations != nil {\n\t\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t\t}\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: use LoadBalancerInternalVpc, no EIP to delete\", service.Namespace, service.Name)\n\t\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: delete %v FINISH\", serviceName, clusterName, serviceName)\n\t\treturn nil\n\t}\n\tif len(service.Spec.LoadBalancerIP) != 0 { //use user’s eip, do not delete\n\t\tif service.Annotations != nil {\n\t\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t\t}\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: LoadBalancerIP is set, not delete EIP.\", serviceName, clusterName)\n\t\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: delete %v FINISH\", serviceName, clusterName, serviceName)\n\t\treturn nil\n\t}\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: Start delete EIP: %s\", serviceName, clusterName, lb.PublicIp)\n\tvar targetEip string\n\tif len(service.Status.LoadBalancer.Ingress) != 0 { // P0: use service EXTERNAL_IP\n\t\ttargetEip = service.Status.LoadBalancer.Ingress[0].IP\n\t}\n\tif len(targetEip) == 0 { // P1: use BLB public ip\n\t\ttargetEip = lb.PublicIp\n\t}\n\tif len(targetEip) == 0 { // get none EIP\n\t\treturn fmt.Errorf(\"EnsureLoadBalancerDeleted failed: can not get a EIP to delete\")\n\t}\n\terr = bc.deleteEIP(targetEip)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif service.Annotations != nil {\n\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t}\n\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: delete %v FINISH\", serviceName, clusterName, serviceName)\n\treturn nil\n}", "func (cloud *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn nil, false\n}", "func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn az, true\n}", "func (m *MockClusterScoper) APIServerLB() *v1beta1.LoadBalancerSpec {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLB\")\n\tret0, _ := ret[0].(*v1beta1.LoadBalancerSpec)\n\treturn ret0\n}", "func (m *MockLoadBalancerServiceIface) GetLoadBalancerByID(id string, opts ...OptionFunc) (*LoadBalancer, int, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{id}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"GetLoadBalancerByID\", varargs...)\n\tret0, _ := ret[0].(*LoadBalancer)\n\tret1, _ := ret[1].(int)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (m *MockLoadBalancerServiceIface) ListLBHealthCheckPolicies(p *ListLBHealthCheckPoliciesParams) (*ListLBHealthCheckPoliciesResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListLBHealthCheckPolicies\", p)\n\tret0, _ := ret[0].(*ListLBHealthCheckPoliciesResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalancerServiceIface) NewConfigureNetscalerLoadBalancerParams(lbdeviceid string) *ConfigureNetscalerLoadBalancerParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewConfigureNetscalerLoadBalancerParams\", lbdeviceid)\n\tret0, _ := ret[0].(*ConfigureNetscalerLoadBalancerParams)\n\treturn ret0\n}", "func TestDeleteAllBackendServers(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\t// bs is nil\n\terr = cloud.deleteAllBackendServers(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"deleteAllBackendServers err, err : %v\", err)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\terr = cloud.deleteAllBackendServers(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"deleteAllBackendServers err, err : %v\", err)\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}", "func ExampleELB_CreateLoadBalancer_shared04() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tScheme: aws.String(\"internal\"),\n\t\tSecurityGroups: []*string{\n\t\t\taws.String(\"sg-a61988c3\"),\n\t\t},\n\t\tSubnets: []*string{\n\t\t\taws.String(\"subnet-15aaab61\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (s) TestOutlierDetection(t *testing.T) {\n\tedsLBCh := testutils.NewChannel()\n\txdsC, cleanup := setup(edsLBCh)\n\tdefer cleanup()\n\tbuilder := balancer.Get(Name)\n\tedsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{})\n\tif edsB == nil {\n\t\tt.Fatalf(\"builder.Build(%s) failed and returned nil\", Name)\n\t}\n\tdefer edsB.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\t// Update Cluster Resolver with Client Conn State with Outlier Detection\n\t// configuration present. This is what will be passed down to this balancer,\n\t// as CDS Balancer gets the Cluster Update and converts the Outlier\n\t// Detection data to an Outlier Detection configuration and sends it to this\n\t// level.\n\tif err := edsB.UpdateClientConnState(balancer.ClientConnState{\n\t\tResolverState: xdsclient.SetClient(resolver.State{}, xdsC),\n\t\tBalancerConfig: newLBConfigWithOneEDSAndOutlierDetection(testEDSServcie, noopODCfg),\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := xdsC.WaitForWatchEDS(ctx); err != nil {\n\t\tt.Fatalf(\"xdsClient.WatchEndpoints failed with error: %v\", err)\n\t}\n\n\t// Invoke EDS Callback - causes child balancer to be built and then\n\t// UpdateClientConnState called on it with Outlier Detection as a direct\n\t// child.\n\txdsC.InvokeWatchEDSCallback(\"\", defaultEndpointsUpdate, nil)\n\tedsLB, err := waitForNewChildLB(ctx, edsLBCh)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlocalityID := xdsinternal.LocalityID{Zone: \"zone\"}\n\t// The priority configuration generated should have Outlier Detection as a\n\t// direct child due to Outlier Detection being turned on.\n\tpCfgWant := &priority.LBConfig{\n\t\tChildren: map[string]*priority.Child{\n\t\t\t\"priority-0-0\": {\n\t\t\t\tConfig: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\tName: outlierdetection.Name,\n\t\t\t\t\tConfig: &outlierdetection.LBConfig{\n\t\t\t\t\t\tInterval: 1<<63 - 1,\n\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\t\t\tName: clusterimpl.Name,\n\t\t\t\t\t\t\tConfig: &clusterimpl.LBConfig{\n\t\t\t\t\t\t\t\tCluster: testClusterName,\n\t\t\t\t\t\t\t\tEDSServiceName: \"test-eds-service-name\",\n\t\t\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\t\t\t\t\tName: weightedtarget.Name,\n\t\t\t\t\t\t\t\t\tConfig: &weightedtarget.LBConfig{\n\t\t\t\t\t\t\t\t\t\tTargets: map[string]weightedtarget.Target{\n\t\t\t\t\t\t\t\t\t\t\tassertString(localityID.ToString): {\n\t\t\t\t\t\t\t\t\t\t\t\tWeight: 100,\n\t\t\t\t\t\t\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tIgnoreReresolutionRequests: true,\n\t\t\t},\n\t\t},\n\t\tPriorities: []string{\"priority-0-0\"},\n\t}\n\n\tif err := edsLB.waitForClientConnStateChangeVerifyBalancerConfig(ctx, balancer.ClientConnState{\n\t\tBalancerConfig: pCfgWant,\n\t}); err != nil {\n\t\tt.Fatalf(\"EDS impl got unexpected update: %v\", err)\n\t}\n}", "func TestChangeServiceType(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\t// This existing ClusterIP service should be ignored\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeClusterIP,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tvar assignedIP string\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tassignedIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tupdatedService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Update(context.Background(), updatedService, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 0 {\n\t\t\tt.Error(\"Expected service to have no conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tupdatedService = &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeNodePort,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), updatedService, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(assignedIP)) {\n\t\tt.Fatal(\"Expected assigned IP to be released\")\n\t}\n}", "func TestRangeDelete(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\t// Add a new CIDR, this should not have any effect on the existing service.\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.20\") {\n\t\t\tt.Error(\"Expected new ingress to be in the 10.0.20.0/24 range\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// Remove the existing range, this should trigger the re-allocation of the existing service\n\tpoolA.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.20.0/24\",\n\t\t},\n\t}\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func ExampleELB_CreateLoadBalancer_shared03() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tAvailabilityZones: []*string{\n\t\t\taws.String(\"us-west-2a\"),\n\t\t},\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(443),\n\t\t\t\tProtocol: aws.String(\"HTTPS\"),\n\t\t\t\tSSLCertificateId: aws.String(\"arn:aws:iam::123456789012:server-certificate/my-server-cert\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func endpointFromLoadBalancer(lb kitloadbalancer.Balancer) kitendpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\tvar ep kitendpoint.Endpoint\n\t\tvar err error\n\t\tfor i := 0; i < LoadBalancerRetryCount; i++ {\n\t\t\tep, err = lb.Endpoint()\n\t\t\tif err != nil && err != kitloadbalancer.ErrNoEndpoints {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn ep(ctx, request)\n\t}\n}", "func NewLoadBalancer(c config.LoadBalancerConfig) *LoadBalancer {\n\tvar lb LoadBalancer\n\tif c.Hosts != nil && len(c.Hosts) > 0 {\n\t\tlb.hosts = make([]string, len(c.Hosts))\n\t\tfor i, server := range c.Hosts {\n\t\t\tlb.hosts[i] = server\n\t\t\tgloballog.WithFields(logrus.Fields{\n\t\t\t\t\"host\": server,\n\t\t\t\t\"index\": i,\n\t\t\t}).Debug(\"adding lb host\")\n\t\t}\n\t} else {\n\t\tlb.hosts = make([]string, 10)\n\t}\n\tlb.mode = c.BalanceMode\n\tlb.hostLock = new(sync.RWMutex)\n\treturn &lb\n}", "func TestPoolSelectorNamespace(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tselector := slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"io.kubernetes.service.namespace\": \"tenant-one\",\n\t\t},\n\t}\n\tpoolA.Spec.ServiceSelector = &selector\n\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"red-service\" {\n\t\t\tt.Error(\"Expected update from 'red-service'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tmatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"red-service\",\n\t\t\tNamespace: \"tenant-one\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"tenant-one\").Create(context.Background(), matchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"blue-service\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to not receive any ingress IPs\")\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tnonMatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"blue-service\",\n\t\t\tNamespace: \"tenant-two\",\n\t\t\tUID: serviceBUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t// Setting the same label in an attempt to escalate privileges doesn't work\n\t\t\t\t\"io.kubernetes.service.namespace\": \"tenant-one\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"tenant-two\").Create(context.Background(), nonMatchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}" ]
[ "0.64188784", "0.63519067", "0.61989796", "0.61989796", "0.5979418", "0.5972563", "0.59587514", "0.5949928", "0.5905317", "0.5873976", "0.5857136", "0.58154726", "0.5803268", "0.5797885", "0.57977414", "0.579544", "0.57392776", "0.57254845", "0.57204443", "0.570759", "0.5682965", "0.567726", "0.56583595", "0.56367743", "0.5616479", "0.56128913", "0.55761", "0.5571696", "0.55620307", "0.5561613", "0.5535155", "0.550699", "0.550116", "0.54896504", "0.54867953", "0.5453919", "0.54406786", "0.54403454", "0.5435469", "0.5425922", "0.5424571", "0.53993905", "0.53872514", "0.5380945", "0.53644544", "0.5353607", "0.53488743", "0.53316396", "0.532191", "0.5300891", "0.5300626", "0.5297274", "0.5292445", "0.5275935", "0.5259745", "0.5259484", "0.5254508", "0.5254209", "0.52520066", "0.5247954", "0.5239227", "0.5227191", "0.51986605", "0.5178376", "0.51744163", "0.517104", "0.5165667", "0.514221", "0.513412", "0.5128925", "0.512762", "0.51235396", "0.5108379", "0.5088754", "0.5083458", "0.50797385", "0.5077634", "0.50764465", "0.5071112", "0.5057012", "0.5056886", "0.5056292", "0.5044678", "0.5040788", "0.5038789", "0.5035187", "0.5035036", "0.5028746", "0.50160307", "0.50130373", "0.50120866", "0.5007321", "0.50056124", "0.49984005", "0.49816284", "0.4981241", "0.49793783", "0.4957029", "0.49567986", "0.4956669" ]
0.7403344
0
DescribeLoadBalancer indicates an expected call of DescribeLoadBalancer
func (mr *MockLoadBalanceMockRecorder) DescribeLoadBalancer(region, lbID, name interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancer", reflect.TypeOf((*MockLoadBalance)(nil).DescribeLoadBalancer), region, lbID, name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) DescribeLoadBalancer(region, lbID, name string) (*cloud.LoadBalanceObject, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeLoadBalancer\", region, lbID, name)\n\tret0, _ := ret[0].(*cloud.LoadBalanceObject)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (el *ELBV2Manager) describeLoadbalancers(marker *string, loadbalancers []*elbv2.LoadBalancer) ([]*elbv2.LoadBalancer, error) {\n\n\tinput := &elbv2.DescribeLoadBalancersInput{\n\t\tMarker: marker,\n\t}\n\n\tresp, err := el.client.DescribeLoadBalancers(input)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err).Error(\"could not describe elb instances\")\n\t\treturn nil, err\n\t}\n\n\tif loadbalancers == nil {\n\t\tloadbalancers = []*elbv2.LoadBalancer{}\n\t}\n\n\tloadbalancers = append(loadbalancers, resp.LoadBalancers...)\n\n\tif resp.NextMarker != nil {\n\t\treturn el.describeLoadbalancers(resp.NextMarker, loadbalancers)\n\t}\n\n\treturn loadbalancers, nil\n}", "func (mr *MockLoadBalanceMockRecorder) DescribeLoadBalancerWithNs(ns, region, lbID, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeLoadBalancerWithNs\", reflect.TypeOf((*MockLoadBalance)(nil).DescribeLoadBalancerWithNs), ns, region, lbID, name)\n}", "func (lb *GetLoadbalancerInput) GetAllLoadbalancer() (GetLoadbalancerResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(lb.Cloud.Name)); status != true {\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetNetworks\")\n\t}\n\n\tswitch strings.ToLower(lb.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsess := (lb.Cloud.Client).(*session.Session)\n\n\t\t// authorizing further request\n\t\tauthinpt := new(auth.EstablishConnectionInput)\n\t\tauthinpt.Region = lb.Cloud.Region\n\t\tauthinpt.Session = sess\n\t\tauthinpt.Resource = \"elb12\"\n\t\tlbin := new(loadbalance.GetLoadbalancerInput)\n\t\tlbin.GetRaw = lb.Cloud.GetRaw\n\n\t\tswitch strings.ToLower(lb.Type) {\n\t\tcase \"classic\":\n\t\t\tresponse, lberr := lbin.GetAllClassicLb(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tcase \"application\":\n\t\t\tresponse, lberr := lbin.GetAllApplicationLb(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tcase \"\":\n\t\t\tresponse, lberr := lbin.GetAllLoadbalancer(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tdefault:\n\t\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(\"The loadbalancer type you entered is unknown to me\")\n\t\t}\n\n\tcase \"azure\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetLoadbalancers\")\n\t}\n}", "func (lb *GetLoadbalancerInput) GetLoadbalancers() (GetLoadbalancerResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(lb.Cloud.Name)); status != true {\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetNetworks\")\n\t}\n\n\tswitch strings.ToLower(lb.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// Gets the established session so that it can carry out the process in cloud\n\t\tsess := (lb.Cloud.Client).(*session.Session)\n\n\t\t//authorizing to request further\n\t\tauthinpt := new(auth.EstablishConnectionInput)\n\t\tauthinpt.Region = lb.Cloud.Region\n\t\tauthinpt.Session = sess\n\t\tswitch strings.ToLower(lb.Type) {\n\t\tcase \"classic\":\n\t\t\tauthinpt.Resource = \"elb\"\n\t\tcase \"application\":\n\t\t\tauthinpt.Resource = \"elb2\"\n\t\t}\n\n\t\tlbin := new(loadbalance.GetLoadbalancerInput)\n\t\tlbin.GetRaw = lb.Cloud.GetRaw\n\t\tlbin.LbNames = lb.LbNames\n\t\tlbin.LbArns = lb.LbArns\n\t\tlbin.Type = lb.Type\n\t\tresponse, lberr := lbin.Getloadbalancers(*authinpt)\n\t\tif lberr != nil {\n\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t}\n\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetLoadbalancers\")\n\t}\n}", "func CreateLoadBalancer(ctx context.Context, lbName, pipName string) (lb network.LoadBalancer, err error) {\n\tprobeName := \"probe\"\n\tfrontEndIPConfigName := \"fip\"\n\tbackEndAddressPoolName := \"backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", config.SubscriptionID(), config.GroupName())\n\n\tpip, err := GetPublicIP(ctx, pipName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlbClient := getLBClient()\n\tfuture, err := lbClient.CreateOrUpdate(ctx,\n\t\tconfig.GroupName(),\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tLocation: to.StringPtr(config.Location()),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Dynamic,\n\t\t\t\t\t\t\tPublicIPAddress: &pip,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolHTTP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tRequestPath: to.StringPtr(\"healthprobe.aspx\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"lbRule\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.Default,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tInboundNatRules: &[]network.InboundNatRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"natRule1\"),\n\t\t\t\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(21),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"natRule2\"),\n\t\t\t\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(23),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn lb, fmt.Errorf(\"cannot create load balancer: %v\", err)\n\t}\n\n\terr = future.WaitForCompletion(ctx, lbClient.Client)\n\tif err != nil {\n\t\treturn lb, fmt.Errorf(\"cannot get load balancer create or update future response: %v\", err)\n\t}\n\n\treturn future.Result(lbClient)\n}", "func ExampleELB_DescribeLoadBalancers_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DescribeLoadBalancersInput{\n\t\tLoadBalancerNames: []*string{\n\t\t\taws.String(\"my-load-balancer\"),\n\t\t},\n\t}\n\n\tresult, err := svc.DescribeLoadBalancers(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeDependencyThrottleException:\n\t\t\t\tfmt.Println(elb.ErrCodeDependencyThrottleException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (c *MockLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string) (*network.LoadBalancer, error) {\n\tfor _, lb := range c.LBs {\n\t\tif *lb.Name == loadBalancerName {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\treturn nil, nil\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) ListLoadBalancers(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListLoadBalancers\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).ListLoadBalancers), p)\n}", "func (bc *Baiducloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif len(result.CceAutoAddLoadBalancerId) == 0 {\n\t\treturn nil, false, nil\n\t}\n\tlb, exists, err := bc.getBCELoadBalancerById(result.CceAutoAddLoadBalancerId)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tif !exists {\n\t\treturn nil, false, nil\n\t}\n\n\tvar ip string\n\tif result.LoadBalancerInternalVpc == \"true\" {\n\t\tip = lb.Address // internal vpc ip\n\t} else {\n\t\tip = lb.PublicIp // EIP\n\t}\n\tglog.V(3).Infof(\"[%v %v] GetLoadBalancer ip: %s\", service.Namespace, service.Name, ip)\n\n\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: ip}}}, true, nil\n}", "func (m *MockLoadBalancerServiceIface) ListLoadBalancers(p *ListLoadBalancersParams) (*ListLoadBalancersResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListLoadBalancers\", p)\n\tret0, _ := ret[0].(*ListLoadBalancersResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) GetLoadBalancerID(name interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{name}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetLoadBalancerID\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).GetLoadBalancerID), varargs...)\n}", "func (s *Service) Get(ctx context.Context, spec azure.Spec) (interface{}, error) {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn network.LoadBalancer{}, errors.New(\"invalid internal load balancer specification\")\n\t}\n\t//lbName := fmt.Sprintf(\"%s-api-internallb\", s.Scope.Cluster.Name)\n\tlb, err := s.Client.Get(ctx, s.Scope.ClusterConfig.ResourceGroup, internalLBSpec.Name, \"\")\n\tif err != nil && azure.ResourceNotFound(err) {\n\t\treturn nil, errors.Wrapf(err, \"load balancer %s not found\", internalLBSpec.Name)\n\t} else if err != nil {\n\t\treturn lb, err\n\t}\n\treturn lb, nil\n}", "func ELBv2LoadBalancerList() (*elbv2.DescribeLoadBalancersOutput, error) {\n\tsvc := elbv2.New(session.New(), &aws.Config{\n\t\tRegion: aws.String(beego.AppConfig.String(\"awsRegion\")),\n\t})\n\n\tparams := &elbv2.DescribeLoadBalancersInput{}\n\n\tresp, err := svc.DescribeLoadBalancers(params)\n\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\tbeego.Error(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())\n\t\t\tif reqErr, ok := err.(awserr.RequestFailure); ok {\n\t\t\t\tbeego.Error(\n\t\t\t\t\treqErr.Code(),\n\t\t\t\t\treqErr.Message(),\n\t\t\t\t\treqErr.StatusCode(),\n\t\t\t\t\treqErr.RequestID(),\n\t\t\t\t)\n\t\t\t}\n\t\t} else {\n\t\t\tbeego.Debug(err.Error())\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn nil, false\n}", "func (m *MockLoadBalance) DescribeLoadBalancerWithNs(ns, region, lbID, name string) (*cloud.LoadBalanceObject, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeLoadBalancerWithNs\", ns, region, lbID, name)\n\tret0, _ := ret[0].(*cloud.LoadBalanceObject)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) ListNetscalerLoadBalancers(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListNetscalerLoadBalancers\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).ListNetscalerLoadBalancers), p)\n}", "func (bc *Baiducloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer(%v, %v, %v, %v, %v)\",\n\t\tclusterName, service.Namespace, service.Name, bc.Region, service.Spec.LoadBalancerIP, service.Spec.Ports, service.Annotations)\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = bc.validateService(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure BLB\n\tlb, err := bc.ensureBLB(ctx, clusterName, service, nodes, result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif result.LoadBalancerInternalVpc == \"true\" {\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer: use LoadBalancerInternalVpc, EXTERNAL-IP is %s\", service.Namespace, service.Name, lb.Address)\n\t\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: lb.Address}}}, nil\n\t}\n\n\t// ensure EIP\n\tpubIP, err := bc.ensureEIP(ctx, clusterName, service, nodes, result, lb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer: EXTERNAL-IP is %s\", service.Namespace, service.Name, pubIP)\n\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: pubIP}}}, nil\n}", "func isSDKLoadBalancerRequiresReplacement(sdkLB LoadBalancerWithTags, resLB *elbv2model.LoadBalancer) bool {\n\tif string(resLB.Spec.Type) != awssdk.StringValue(sdkLB.LoadBalancer.Type) {\n\t\treturn true\n\t}\n\tif resLB.Spec.Scheme != nil && string(*resLB.Spec.Scheme) != awssdk.StringValue(sdkLB.LoadBalancer.Scheme) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (_m *ELBv2APIClient) DescribeLoadBalancers(_a0 context.Context, _a1 *elasticloadbalancingv2.DescribeLoadBalancersInput, _a2 ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error) {\n\t_va := make([]interface{}, len(_a2))\n\tfor _i := range _a2 {\n\t\t_va[_i] = _a2[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 *elasticloadbalancingv2.DescribeLoadBalancersOutput\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error)); ok {\n\t\treturn rf(_a0, _a1, _a2...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) *elasticloadbalancingv2.DescribeLoadBalancersOutput); ok {\n\t\tr0 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*elasticloadbalancingv2.DescribeLoadBalancersOutput)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (cloud *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn nil, false\n}", "func (s *Service) CreateOrUpdate(ctx context.Context, spec azure.Spec) error {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid internal load balancer specification\")\n\t}\n\tklog.V(2).Infof(\"creating internal load balancer %s\", internalLBSpec.Name)\n\tprobeName := \"tcpHTTPSProbe\"\n\tfrontEndIPConfigName := \"controlplane-internal-lbFrontEnd\"\n\tbackEndAddressPoolName := \"controlplane-internal-backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", s.Scope.SubscriptionID, s.Scope.ClusterConfig.ResourceGroup)\n\tlbName := internalLBSpec.Name\n\n\tklog.V(2).Infof(\"getting subnet %s\", internalLBSpec.SubnetName)\n\tsubnetInterface, err := subnets.NewService(s.Scope).Get(ctx, &subnets.Spec{Name: internalLBSpec.SubnetName, VnetName: internalLBSpec.VnetName})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubnet, ok := subnetInterface.(network.Subnet)\n\tif !ok {\n\t\treturn errors.New(\"subnet Get returned invalid interface\")\n\t}\n\tklog.V(2).Infof(\"successfully got subnet %s\", internalLBSpec.SubnetName)\n\n\tfuture, err := s.Client.CreateOrUpdate(ctx,\n\t\ts.Scope.ClusterConfig.ResourceGroup,\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tSku: &network.LoadBalancerSku{Name: network.LoadBalancerSkuNameStandard},\n\t\t\tLocation: to.StringPtr(s.Scope.ClusterConfig.Location),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Static,\n\t\t\t\t\t\t\tSubnet: &subnet,\n\t\t\t\t\t\t\tPrivateIPAddress: to.StringPtr(internalLBSpec.IPAddress),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolTCP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"LBRuleHTTPS\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.LoadDistributionDefault,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create load balancer\")\n\t}\n\n\terr = future.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot get internal load balancer create or update future response\")\n\t}\n\n\t_, err = future.Result(s.Client)\n\tklog.V(2).Infof(\"successfully created internal load balancer %s\", internalLBSpec.Name)\n\treturn err\n}", "func ExampleELB_DescribeTags_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DescribeTagsInput{\n\t\tLoadBalancerNames: []*string{\n\t\t\taws.String(\"my-load-balancer\"),\n\t\t},\n\t}\n\n\tresult, err := svc.DescribeTags(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (c *LoadBalancerClient) Details(uniqID string) (*LoadBalancer, error) {\n\tvar result LoadBalancer\n\tparams := LoadBalancerParams{UniqID: uniqID}\n\n\terr := c.Backend.CallIntoInterface(\"v1/Network/LoadBalancer/details\", params, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &result, nil\n}", "func (c *LoadBalancerCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Created\n\tch <- c.Services\n\tch <- c.MaxServices\n\tch <- c.Targets\n\tch <- c.MaxTargets\n\tch <- c.TargetsHealthy\n\tch <- c.TargetsUnhealthy\n\tch <- c.TargetsUnknown\n\tch <- c.AssignedCertificates\n\tch <- c.MaxAssignedCertificates\n\tch <- c.IngoingTraffic\n\tch <- c.OutgoingTraffic\n\tch <- c.IncludedTraffic\n\tch <- c.Connections\n\tch <- c.MaxConnections\n\tch <- c.ConnectionsPerSecond\n\tch <- c.RequestsPerSecond\n\tch <- c.IncomingBandwidth\n\tch <- c.OutgoingBandwidth\n}", "func (c *cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\tklog.V(4).Infof(\"LoadBalancer called\")\n\treturn c, true\n}", "func (bc *Baiducloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn bc, true\n}", "func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn az, true\n}", "func HandleLoadbalancerGetSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"GET\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\n\t\tfmt.Fprintf(w, SingleLoadbalancerBody)\n\t})\n}", "func (c *Client) WaitUntilLoadBalancerAvailable(ctx context.Context, input *DescribeLoadBalancersInput, opts ...aws.WaiterOption) error {\n\tw := aws.Waiter{\n\t\tName: \"WaitUntilLoadBalancerAvailable\",\n\t\tMaxAttempts: 40,\n\t\tDelay: aws.ConstantWaiterDelay(15 * time.Second),\n\t\tAcceptors: []aws.WaiterAcceptor{\n\t\t\t{\n\t\t\t\tState: aws.SuccessWaiterState,\n\t\t\t\tMatcher: aws.PathAllWaiterMatch, Argument: \"LoadBalancers[].State.Code\",\n\t\t\t\tExpected: \"active\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: aws.RetryWaiterState,\n\t\t\t\tMatcher: aws.PathAnyWaiterMatch, Argument: \"LoadBalancers[].State.Code\",\n\t\t\t\tExpected: \"provisioning\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: aws.RetryWaiterState,\n\t\t\t\tMatcher: aws.ErrorWaiterMatch,\n\t\t\t\tExpected: \"LoadBalancerNotFound\",\n\t\t\t},\n\t\t},\n\t\tLogger: c.Config.Logger,\n\t\tNewRequest: func(opts []aws.Option) (*aws.Request, error) {\n\t\t\tvar inCpy *DescribeLoadBalancersInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq := c.DescribeLoadBalancersRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req.Request, nil\n\t\t},\n\t}\n\tw.ApplyOptions(opts...)\n\n\treturn w.Wait(ctx)\n}", "func CreateLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, subnetID string) (*loadbalancers.LoadBalancer, error) {\n\tlbName := tools.RandomString(\"TESTACCT-\", 8)\n\tlbDescription := tools.RandomString(\"TESTACCT-DESC-\", 8)\n\n\tt.Logf(\"Attempting to create loadbalancer %s on subnet %s\", lbName, subnetID)\n\n\tcreateOpts := loadbalancers.CreateOpts{\n\t\tName: lbName,\n\t\tDescription: lbDescription,\n\t\tVipSubnetID: subnetID,\n\t\tAdminStateUp: gophercloud.Enabled,\n\t}\n\n\tlb, err := loadbalancers.Create(client, createOpts).Extract()\n\tif err != nil {\n\t\treturn lb, err\n\t}\n\n\tt.Logf(\"Successfully created loadbalancer %s on subnet %s\", lbName, subnetID)\n\tt.Logf(\"Waiting for loadbalancer %s to become active\", lbName)\n\n\tif err := WaitForLoadBalancerState(client, lb.ID, \"ACTIVE\"); err != nil {\n\t\treturn lb, err\n\t}\n\n\tt.Logf(\"LoadBalancer %s is active\", lbName)\n\n\tth.AssertEquals(t, lb.Name, lbName)\n\tth.AssertEquals(t, lb.Description, lbDescription)\n\tth.AssertEquals(t, lb.VipSubnetID, subnetID)\n\tth.AssertEquals(t, lb.AdminStateUp, true)\n\n\treturn lb, nil\n}", "func (p *MockProvisionerClient) LoadBalancer(string) client.GenericLoadBalancerInterface {\n\treturn &MockLoadBalancerClient{}\n}", "func (p *MockProvisionerClient) LoadBalancer(string) client.GenericLoadBalancerInterface {\n\treturn &MockLoadBalancerClient{}\n}", "func (az AzureClient) GetAllLoadBalancer() (network.LoadBalancerListResultPage, error) {\n\tlbClient := GetLbClient(Client.config)\n\tctx, cancel := context.WithTimeout(context.Background(), 6000*time.Second)\n\tdefer cancel()\n\treturn lbClient.List(ctx, Client.config.ResourceGroup)\n}", "func (c *MockLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) {\n\tvar l []network.LoadBalancer\n\tfor _, lb := range c.LBs {\n\t\tl = append(l, lb)\n\t}\n\treturn l, nil\n}", "func (c *Client) WaitUntilLoadBalancerExists(ctx context.Context, input *DescribeLoadBalancersInput, opts ...aws.WaiterOption) error {\n\tw := aws.Waiter{\n\t\tName: \"WaitUntilLoadBalancerExists\",\n\t\tMaxAttempts: 40,\n\t\tDelay: aws.ConstantWaiterDelay(15 * time.Second),\n\t\tAcceptors: []aws.WaiterAcceptor{\n\t\t\t{\n\t\t\t\tState: aws.SuccessWaiterState,\n\t\t\t\tMatcher: aws.StatusWaiterMatch,\n\t\t\t\tExpected: 200,\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: aws.RetryWaiterState,\n\t\t\t\tMatcher: aws.ErrorWaiterMatch,\n\t\t\t\tExpected: \"LoadBalancerNotFound\",\n\t\t\t},\n\t\t},\n\t\tLogger: c.Config.Logger,\n\t\tNewRequest: func(opts []aws.Option) (*aws.Request, error) {\n\t\t\tvar inCpy *DescribeLoadBalancersInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq := c.DescribeLoadBalancersRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req.Request, nil\n\t\t},\n\t}\n\tw.ApplyOptions(opts...)\n\n\treturn w.Wait(ctx)\n}", "func (p *provider) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn p.loadBalancer, true\n}", "func (c *MockAzureCloud) LoadBalancer() azure.LoadBalancersClient {\n\treturn c.LoadBalancersClient\n}", "func (m *MockLoadBalancerServiceIface) ListNetscalerLoadBalancers(p *ListNetscalerLoadBalancersParams) (*ListNetscalerLoadBalancersResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListNetscalerLoadBalancers\", p)\n\tret0, _ := ret[0].(*ListNetscalerLoadBalancersResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestNonMatchingLBClass(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tlbClass := \"net.example/some-other-class\"\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerClass: &lbClass,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"Unexpected patch to a service\")\n\n\t\treturn true\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n}", "func NewLoadBalancerCollector(logger log.Logger, client *hcloud.Client, failures *prometheus.CounterVec, duration *prometheus.HistogramVec, cfg config.Target) *LoadBalancerCollector {\n\tif failures != nil {\n\t\tfailures.WithLabelValues(\"load_balancer\").Add(0)\n\t}\n\n\tlabels := []string{\"id\", \"name\", \"datacenter\"}\n\treturn &LoadBalancerCollector{\n\t\tclient: client,\n\t\tlogger: log.With(logger, \"collector\", \"load-balancer\"),\n\t\tfailures: failures,\n\t\tduration: duration,\n\t\tconfig: cfg,\n\n\t\tCreated: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_created_timestamp\",\n\t\t\t\"Timestamp when the load balancer have been created\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tServices: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_services\",\n\t\t\t\"The number of configured services\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxServices: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_services\",\n\t\t\t\"The maximum number of services that can be configured\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargets: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets\",\n\t\t\t\"The number of targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxTargets: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_targets\",\n\t\t\t\"The maximum number of targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsHealthy: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_healthy\",\n\t\t\t\"The number of healthy targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsUnhealthy: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_unhealthy\",\n\t\t\t\"The number of unhealthy targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsUnknown: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_unknown\",\n\t\t\t\"The number of unknown targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tAssignedCertificates: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_assigned_certificates\",\n\t\t\t\"The number of assigned certificates\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxAssignedCertificates: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_assigned_certificates\",\n\t\t\t\"The maximum number of certificates that can be assigned\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIngoingTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_ingoing_traffic\",\n\t\t\t\"The total amount of ingoing traffic in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutgoingTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_outgoing_traffic\",\n\t\t\t\"The total amount of outgoing traffic in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIncludedTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_included_traffic\",\n\t\t\t\"The amount of traffic that is included for the load balancer type in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tConnections: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections\",\n\t\t\t\"The number of open connections\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxConnections: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_open_connections\",\n\t\t\t\"The maximum number of open connections\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tConnectionsPerSecond: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_connections_per_second\",\n\t\t\t\"The number of new connections per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tRequestsPerSecond: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_requests_per_second\",\n\t\t\t\"The number of requests per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIncomingBandwidth: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections_bandwidth_in\",\n\t\t\t\"The incoming bandwidth in bytes per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutgoingBandwidth: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections_bandwidth_out\",\n\t\t\t\"The outgoing bandwidth in bytes per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t}\n}", "func (mr *MockClientMockRecorder) DescribeSubnets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeSubnets\", reflect.TypeOf((*MockClient)(nil).DescribeSubnets), arg0)\n}", "func (mr *MockClientMockRecorder) DescribeSubnets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeSubnets\", reflect.TypeOf((*MockClient)(nil).DescribeSubnets), arg0)\n}", "func ExampleELB_CreateLoadBalancer_shared04() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tScheme: aws.String(\"internal\"),\n\t\tSecurityGroups: []*string{\n\t\t\taws.String(\"sg-a61988c3\"),\n\t\t},\n\t\tSubnets: []*string{\n\t\t\taws.String(\"subnet-15aaab61\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (o LookupRegionNetworkEndpointGroupResultOutput) LoadBalancer() NetworkEndpointGroupLbNetworkEndpointGroupResponseOutput {\n\treturn o.ApplyT(func(v LookupRegionNetworkEndpointGroupResult) NetworkEndpointGroupLbNetworkEndpointGroupResponse {\n\t\treturn v.LoadBalancer\n\t}).(NetworkEndpointGroupLbNetworkEndpointGroupResponseOutput)\n}", "func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {\n\tlb, err := l.fetchLoadBalancer(ctx, clusterName, service)\n\tif err != nil {\n\t\tif err == LoadBalancerNotFound {\n\t\t\tklog.Infof(\"no load balancer found for service %s\", service.Name)\n\t\t\treturn nil, false, nil\n\t\t}\n\n\t\tklog.Errorf(\"error getting load balancer for service %s: %v\", service.Name, err)\n\t\treturn nil, false, err\n\t}\n\n\tstatus := &v1.LoadBalancerStatus{}\n\tstatus.Ingress = make([]v1.LoadBalancerIngress, len(lb.IP))\n\tfor idx, ip := range lb.IP {\n\t\tif getUseHostname(service) {\n\t\t\tstatus.Ingress[idx].Hostname = ip.Reverse\n\t\t} else {\n\t\t\tstatus.Ingress[idx].IP = ip.IPAddress\n\t\t}\n\t}\n\n\treturn status, true, nil\n}", "func ExampleELB_CreateLoadBalancer_shared03() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tAvailabilityZones: []*string{\n\t\t\taws.String(\"us-west-2a\"),\n\t\t},\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(443),\n\t\t\t\tProtocol: aws.String(\"HTTPS\"),\n\t\t\t\tSSLCertificateId: aws.String(\"arn:aws:iam::123456789012:server-certificate/my-server-cert\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func TestRequestIPs(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.20\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.10.20\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.20'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tfirst := false\n\t\tsecond := false\n\t\tthird := false\n\n\t\tfor _, ingress := range svc.Status.LoadBalancer.Ingress {\n\t\t\tswitch ingress.IP {\n\t\t\tcase \"10.0.10.21\":\n\t\t\t\tfirst = true\n\t\t\tcase \"10.0.10.22\":\n\t\t\t\tsecond = true\n\t\t\tcase \"10.0.10.23\":\n\t\t\t\tthird = true\n\t\t\tdefault:\n\t\t\t\tt.Error(\"Unexpected ingress IP\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif !first {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.21'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !second {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.22'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !third {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.23'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tciliumSvcLBIPSAnnotation: \"10.0.10.22,10.0.10.23\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-c\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Reason != \"already_allocated\" {\n\t\t\tt.Error(\"Expected condition reason to be 'already_allocated'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// request an already allocated IP\n\tserviceC := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-c\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceCUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceC, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func checkLoadBalancerServiceType(clientset kubernetes.Interface, log logger.Logger) error {\n\tconst (\n\t\ttestServiceName = \"kubernetes-test-service\"\n\t\ttestNamespace = \"default\"\n\t\twaitTimeoutSec = 300\n\t)\n\tlog.Infof(\"Creating test service %s/%s\", testServiceName, testNamespace)\n\tsvc, err := createTestLoadBalancer(testServiceName, testNamespace, clientset)\n\tif err != nil || svc == nil {\n\t\treturn errors.Wrap(err, \"error creating test service\")\n\t}\n\n\t// handle service deletion\n\tdefer func() {\n\t\tclientset.CoreV1().Services(testNamespace).Delete(context.TODO(), testServiceName, v1.DeleteOptions{})\n\t\tlog.Debugf(\"test service %s deleted\", testNamespace, testServiceName)\n\t}()\n\n\tlog.Infof(\"Checking LoadBalancer service type\")\n\tfor i := 1; i < waitTimeoutSec; i += 1 {\n\t\ttime.Sleep(time.Second)\n\t\ts, err := clientset.CoreV1().Services(testNamespace).Get(context.TODO(), testServiceName, v1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error getting test service\")\n\t\t}\n\t\tif loadBalancerProvisioned(s) {\n\t\t\treturn nil\n\t\t}\n\t\tcontinue\n\t}\n\treturn errors.New(\"Service with LoadBalancer didn't get ingress address\")\n}", "func (_m *Client) GetLoadBalancers(ctx context.Context, rgname string, logger log.Logger) ([]network.LoadBalancer, error) {\n\tret := _m.Called(ctx, rgname, logger)\n\n\tvar r0 []network.LoadBalancer\n\tif rf, ok := ret.Get(0).(func(context.Context, string, log.Logger) []network.LoadBalancer); ok {\n\t\tr0 = rf(ctx, rgname, logger)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]network.LoadBalancer)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string, log.Logger) error); ok {\n\t\tr1 = rf(ctx, rgname, logger)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (s LoadBalancer) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c *Client) ListLoadBalancers() ([]LoadBalancer, *bmc.Error) {\n\tloadBalancers := []LoadBalancer{}\n\tresp, err := c.Request(\"GET\", fmt.Sprintf(\"/loadBalancers\"), nil)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\tbmcError := bmc.Error{Code: string(resp.StatusCode), Message: err.Error()}\n\t\treturn loadBalancers, &bmcError\n\t}\n\tlogrus.Debug(\"StatusCode: \", resp.StatusCode)\n\tif resp.StatusCode != 200 {\n\t\treturn loadBalancers, bmc.NewError(*resp)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tlogrus.Debug(\"Body: \", string(body))\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Could not read JSON response: %s\", err)\n\t}\n\n\tif err = json.Unmarshal(body, &loadBalancers); err != nil {\n\t\tlogrus.Fatalf(\"Unmarshal impossible: %s\", err)\n\t}\n\treturn loadBalancers, nil\n}", "func TestAddPool(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\ttwentyPool := mkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"})\n\t_, err := fixture.poolClient.Create(context.Background(), twentyPool, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (bc *Baiducloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\t// if annotation has error, then creation must be failed. So return nil to tell k8s lb has been deleted.\n\t\treturn nil\n\t}\n\tserviceName := getServiceName(service)\n\tif len(result.CceAutoAddLoadBalancerId) == 0 {\n\t\tglog.V(1).Infof(\"[%v %v] EnsureLoadBalancerDeleted: target load balancer not create successful. So, no need to delete BLB and EIP\", serviceName, clusterName)\n\t\treturn nil\n\t}\n\n\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: START lbId=%q\", serviceName, clusterName, result.CceAutoAddLoadBalancerId)\n\n\t// reconcile logic is capable of fully reconcile, so we can use this to delete\n\tservice.Spec.Ports = []v1.ServicePort{}\n\n\tlb, existsLb, err := bc.getBCELoadBalancerById(result.CceAutoAddLoadBalancerId)\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: getBCELoadBalancer : %s\", serviceName, clusterName, lb.BlbId)\n\tif err != nil {\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted get error: %s\", serviceName, clusterName, err.Error())\n\t\treturn err\n\t}\n\tif !existsLb {\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: target blb not exist\", serviceName, clusterName)\n\t\treturn nil\n\t}\n\n\tif len(result.LoadBalancerExistId) == 0 { //user does not assign the blbid in the annotation\n\t\t// start delete blb and eip, delete blb first\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: Start delete BLB: %s\", serviceName, clusterName, lb.BlbId)\n\t\targs := blb.DeleteLoadBalancerArgs{\n\t\t\tLoadBalancerId: lb.BlbId,\n\t\t}\n\t\terr = bc.clientSet.Blb().DeleteLoadBalancer(&args)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if result.LoadBalancerExistId == \"error_blb_has_been_used\" {\n\t\treturn nil\n\t} else {\n\t\t//get allListeners & delete Listeners\n\t\tallListeners, err := bc.getAllListeners(lb)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(allListeners) > 0 {\n\t\t\terr = bc.deleteListener(lb, allListeners)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t//get allServers & delete BackendServers\n\t\tallServers, err := bc.getAllBackendServer(lb)\n\t\tvar removeList []string\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, server := range allServers {\n\t\t\tremoveList = append(removeList, server.InstanceId)\n\t\t}\n\n\t\tif len(removeList) > 0 {\n\t\t\targs := blb.RemoveBackendServersArgs{\n\t\t\t\tLoadBalancerId: lb.BlbId,\n\t\t\t\tBackendServerList: removeList,\n\t\t\t}\n\t\t\terr = bc.clientSet.Blb().RemoveBackendServers(&args)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// annotation \"LoadBalancerInternalVpc\" exists\n\t\tif result.LoadBalancerInternalVpc == \"true\" { //do not assign the eip\n\t\t\tif service.Annotations != nil {\n\t\t\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t\t\t}\n\t\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: use LoadBalancerInternalVpc, no EIP to delete\", service.Namespace, service.Name)\n\t\t\t//todo recover eip for blb which has eip in the begin.\n\t\t\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: delete %v FINISH\", serviceName, clusterName, serviceName)\n\t\t\treturn nil\n\t\t}\n\n\t\t//annotation \"LoadBalancerIP\" exists\n\t\t//unbind eip & blb when user assigned the eip\n\t\tif len(service.Spec.LoadBalancerIP) != 0 { //use user’s eip, do not delete\n\t\t\tunbindArgs := eip.EipArgs{\n\t\t\t\tIp: service.Spec.LoadBalancerIP,\n\t\t\t}\n\t\t\t// just unbind, not delete\n\t\t\terr := bc.clientSet.Eip().UnbindEip(&unbindArgs)\n\t\t\tif err != nil {\n\t\t\t\tglog.V(3).Infof(\"Unbind Eip error : %s\", err.Error())\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\t//get targetEip\n\t\tvar targetEip string\n\t\tif len(service.Status.LoadBalancer.Ingress) != 0 { // P0: use service EXTERNAL_IP\n\t\t\ttargetEip = service.Status.LoadBalancer.Ingress[0].IP\n\t\t}\n\t\tif len(targetEip) == 0 { // P1: use BLB public ip\n\t\t\ttargetEip = lb.PublicIp\n\t\t}\n\t\t//users may unbind eip manually\n\t\tif len(targetEip) == 0 { // get none EIP\n\t\t\tglog.V(3).Infof(\"Eip does not exist, Delete completed \")\n\t\t\treturn nil\n\t\t}\n\n\t\t// blb if has eip in the begin\n\t\tif strings.Contains(lb.Desc, \"cce_auto_create_eip\") {\n\t\t\tglog.V(3).Infof(\"EnsureLoadBalancerDeleted: delete eip created by cce: %s\", lb.Desc)\n\t\t\tunbindArgs := eip.EipArgs{\n\t\t\t\tIp: targetEip,\n\t\t\t}\n\t\t\tlb.Desc = strings.TrimPrefix(lb.Desc, \"cce_auto_create_eip\")\n\t\t\tnewLbArg := blb.UpdateLoadBalancerArgs{\n\t\t\t\tLoadBalancerId: lb.BlbId,\n\t\t\t\tDesc: lb.Desc,\n\t\t\t\tName: lb.Name,\n\t\t\t}\n\t\t\terr = bc.clientSet.Blb().UpdateLoadBalancer(&newLbArg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// unbind & delete\n\t\t\terr := bc.clientSet.Eip().UnbindEip(&unbindArgs)\n\t\t\tif err != nil {\n\t\t\t\tglog.V(3).Infof(\"Unbind Eip error : %s\", err.Error())\n\t\t\t\tif strings.Contains(err.Error(), \"EipNotFound\") {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = bc.deleteEIP(targetEip)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif service.Annotations != nil {\n\t\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// delete EIP\n\tif result.LoadBalancerInternalVpc == \"true\" { //do not assign the eip\n\t\tif service.Annotations != nil {\n\t\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t\t}\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: use LoadBalancerInternalVpc, no EIP to delete\", service.Namespace, service.Name)\n\t\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: delete %v FINISH\", serviceName, clusterName, serviceName)\n\t\treturn nil\n\t}\n\tif len(service.Spec.LoadBalancerIP) != 0 { //use user’s eip, do not delete\n\t\tif service.Annotations != nil {\n\t\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t\t}\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: LoadBalancerIP is set, not delete EIP.\", serviceName, clusterName)\n\t\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: delete %v FINISH\", serviceName, clusterName, serviceName)\n\t\treturn nil\n\t}\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancerDeleted: Start delete EIP: %s\", serviceName, clusterName, lb.PublicIp)\n\tvar targetEip string\n\tif len(service.Status.LoadBalancer.Ingress) != 0 { // P0: use service EXTERNAL_IP\n\t\ttargetEip = service.Status.LoadBalancer.Ingress[0].IP\n\t}\n\tif len(targetEip) == 0 { // P1: use BLB public ip\n\t\ttargetEip = lb.PublicIp\n\t}\n\tif len(targetEip) == 0 { // get none EIP\n\t\treturn fmt.Errorf(\"EnsureLoadBalancerDeleted failed: can not get a EIP to delete\")\n\t}\n\terr = bc.deleteEIP(targetEip)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif service.Annotations != nil {\n\t\tdelete(service.Annotations, ServiceAnnotationCceAutoAddLoadBalancerId)\n\t}\n\tglog.V(2).Infof(\"[%v %v] EnsureLoadBalancerDeleted: delete %v FINISH\", serviceName, clusterName, serviceName)\n\treturn nil\n}", "func DeleteLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, lbID string) {\n\tt.Logf(\"Attempting to delete loadbalancer %s\", lbID)\n\n\tif err := loadbalancers.Delete(client, lbID).ExtractErr(); err != nil {\n\t\tif _, ok := err.(gophercloud.ErrDefault404); !ok {\n\t\t\tt.Fatalf(\"Unable to delete loadbalancer: %v\", err)\n\t\t}\n\t}\n\n\tt.Logf(\"Waiting for loadbalancer %s to delete\", lbID)\n\n\tif err := WaitForLoadBalancerState(client, lbID, \"DELETED\"); err != nil {\n\t\tt.Fatalf(\"Loadbalancer did not delete in time: %s\", err)\n\t}\n\n\tt.Logf(\"Successfully deleted loadbalancer %s\", lbID)\n}", "func (nat *NATCloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {\n\tstatus = &v1.LoadBalancerStatus{}\n\tnatClient, err := nat.getNATClient()\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\treturn nil, false, nil\n\t\t}\n\n\t\treturn nil, false, err\n\t}\n\n\t//get dnat rules binded to the dnat instance\n\tnatGatewayId := service.ObjectMeta.Annotations[AnnotationsNATID]\n\tif natGatewayId == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"The id of natGateway should be set by %v in annotations \", AnnotationsNATID)\n\t}\n\tdnatRuleList, err := listDnatRule(natClient, natGatewayId)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif len(dnatRuleList.DNATRules) == 0 {\n\t\treturn nil, false, nil\n\t}\n\n\tfor _, externalPort := range service.Spec.Ports {\n\t\t//check if the DNAT rule exists\n\t\tif nat.getDNATRule(dnatRuleList, &externalPort) == nil {\n\t\t\treturn nil, false, nil\n\t\t}\n\t}\n\tstatus.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: service.Spec.LoadBalancerIP})\n\treturn status, true, nil\n}", "func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tlb, err := l.fetchLoadBalancer(ctx, clusterName, service)\n\tswitch err {\n\tcase nil:\n\t\t// continue\n\tcase LoadBalancerNotFound:\n\t\t// create LoadBalancer\n\t\tlb, err = l.createLoadBalancer(ctx, clusterName, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\t// any kind of Error\n\t\tklog.Errorf(\"error getting loadbalancer for service %s: %v\", service.Name, err)\n\t\treturn nil, err\n\t}\n\n\tif service.Spec.LoadBalancerIP != \"\" && service.Spec.LoadBalancerIP != lb.IP[0].IPAddress {\n\t\terr = l.deleteLoadBalancer(ctx, lb, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlb, err = l.createLoadBalancer(ctx, clusterName, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif lb.Status != scwlb.LbStatusReady {\n\t\treturn nil, LoadBalancerNotReady\n\t}\n\n\terr = l.updateLoadBalancer(ctx, lb, service, nodes)\n\tif err != nil {\n\t\tklog.Errorf(\"error updating loadbalancer for service %s: %v\", service.Name, err)\n\t\treturn nil, err\n\t}\n\n\tstatus := &v1.LoadBalancerStatus{}\n\tstatus.Ingress = make([]v1.LoadBalancerIngress, len(lb.IP))\n\tfor idx, ip := range lb.IP {\n\t\tif getUseHostname(service) {\n\t\t\tstatus.Ingress[idx].Hostname = ip.Reverse\n\t\t} else {\n\t\t\tstatus.Ingress[idx].IP = ip.IPAddress\n\t\t}\n\t}\n\n\treturn status, nil\n}", "func NewGetLoadBalancerOK() *GetLoadBalancerOK {\n\treturn &GetLoadBalancerOK{}\n}", "func ExampleELB_CreateLoadBalancer_shared01() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tAvailabilityZones: []*string{\n\t\t\taws.String(\"us-west-2a\"),\n\t\t},\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func ExampleELB_CreateLoadBalancer_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tSecurityGroups: []*string{\n\t\t\taws.String(\"sg-a61988c3\"),\n\t\t},\n\t\tSubnets: []*string{\n\t\t\taws.String(\"subnet-15aaab61\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func desiredLoadBalancerService(ci *operatorv1.IngressController, deploymentRef metav1.OwnerReference, platform *configv1.PlatformStatus) (bool, *corev1.Service, error) {\n\tif ci.Status.EndpointPublishingStrategy.Type != operatorv1.LoadBalancerServiceStrategyType {\n\t\treturn false, nil, nil\n\t}\n\tservice := manifests.LoadBalancerService()\n\n\tname := controller.LoadBalancerServiceName(ci)\n\n\tservice.Namespace = name.Namespace\n\tservice.Name = name.Name\n\n\tif service.Labels == nil {\n\t\tservice.Labels = map[string]string{}\n\t}\n\tservice.Labels[\"router\"] = name.Name\n\tservice.Labels[manifests.OwningIngressControllerLabel] = ci.Name\n\n\tservice.Spec.Selector = controller.IngressControllerDeploymentPodSelector(ci).MatchLabels\n\n\tlb := ci.Status.EndpointPublishingStrategy.LoadBalancer\n\tisInternal := lb != nil && lb.Scope == operatorv1.InternalLoadBalancer\n\n\tif service.Annotations == nil {\n\t\tservice.Annotations = map[string]string{}\n\t}\n\n\tproxyNeeded, err := IsProxyProtocolNeeded(ci, platform)\n\tif err != nil {\n\t\treturn false, nil, fmt.Errorf(\"failed to determine if proxy protocol is proxyNeeded for ingresscontroller %q: %v\", ci.Name, err)\n\t}\n\n\tif platform != nil {\n\t\tif isInternal {\n\t\t\tannotation := InternalLBAnnotations[platform.Type]\n\t\t\tfor name, value := range annotation {\n\t\t\t\tservice.Annotations[name] = value\n\t\t\t}\n\n\t\t\t// Set the GCP Global Access annotation for internal load balancers on GCP only\n\t\t\tif platform.Type == configv1.GCPPlatformType {\n\t\t\t\tif lb != nil && lb.ProviderParameters != nil &&\n\t\t\t\t\tlb.ProviderParameters.Type == operatorv1.GCPLoadBalancerProvider &&\n\t\t\t\t\tlb.ProviderParameters.GCP != nil {\n\t\t\t\t\tglobalAccessEnabled := lb.ProviderParameters.GCP.ClientAccess == operatorv1.GCPGlobalAccess\n\t\t\t\t\tservice.Annotations[GCPGlobalAccessAnnotation] = strconv.FormatBool(globalAccessEnabled)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tannotation := externalLBAnnotations[platform.Type]\n\t\t\tfor name, value := range annotation {\n\t\t\t\tservice.Annotations[name] = value\n\t\t\t}\n\t\t}\n\t\tswitch platform.Type {\n\t\tcase configv1.AWSPlatformType:\n\t\t\tservice.Annotations[awsLBHealthCheckIntervalAnnotation] = awsLBHealthCheckIntervalDefault\n\t\t\tif proxyNeeded {\n\t\t\t\tservice.Annotations[awsLBProxyProtocolAnnotation] = \"*\"\n\t\t\t}\n\t\t\tif lb != nil && lb.ProviderParameters != nil {\n\t\t\t\tif aws := lb.ProviderParameters.AWS; aws != nil && lb.ProviderParameters.Type == operatorv1.AWSLoadBalancerProvider {\n\t\t\t\t\tswitch aws.Type {\n\t\t\t\t\tcase operatorv1.AWSNetworkLoadBalancer:\n\t\t\t\t\t\tservice.Annotations[AWSLBTypeAnnotation] = AWSNLBAnnotation\n\t\t\t\t\t\t// NLBs require a different health check interval than CLBs.\n\t\t\t\t\t\t// See <https://bugzilla.redhat.com/show_bug.cgi?id=1908758>.\n\t\t\t\t\t\tservice.Annotations[awsLBHealthCheckIntervalAnnotation] = awsLBHealthCheckIntervalNLB\n\t\t\t\t\tcase operatorv1.AWSClassicLoadBalancer:\n\t\t\t\t\t\tif aws.ClassicLoadBalancerParameters != nil {\n\t\t\t\t\t\t\tif v := aws.ClassicLoadBalancerParameters.ConnectionIdleTimeout; v.Duration > 0 {\n\t\t\t\t\t\t\t\tservice.Annotations[awsELBConnectionIdleTimeoutAnnotation] = strconv.FormatUint(uint64(v.Round(time.Second).Seconds()), 10)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif platform.AWS != nil && len(platform.AWS.ResourceTags) > 0 {\n\t\t\t\tvar additionalTags []string\n\t\t\t\tfor _, userTag := range platform.AWS.ResourceTags {\n\t\t\t\t\tif len(userTag.Key) > 0 {\n\t\t\t\t\t\tadditionalTags = append(additionalTags, userTag.Key+\"=\"+userTag.Value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(additionalTags) > 0 {\n\t\t\t\t\tservice.Annotations[awsLBAdditionalResourceTags] = strings.Join(additionalTags, \",\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Set the load balancer for AWS to be as aggressive as Azure (2 fail @ 5s interval, 2 healthy)\n\t\t\tservice.Annotations[awsLBHealthCheckTimeoutAnnotation] = awsLBHealthCheckTimeoutDefault\n\t\t\tservice.Annotations[awsLBHealthCheckUnhealthyThresholdAnnotation] = awsLBHealthCheckUnhealthyThresholdDefault\n\t\t\tservice.Annotations[awsLBHealthCheckHealthyThresholdAnnotation] = awsLBHealthCheckHealthyThresholdDefault\n\t\tcase configv1.IBMCloudPlatformType, configv1.PowerVSPlatformType:\n\t\t\t// Set ExternalTrafficPolicy to type Cluster - IBM's LoadBalancer impl is created within the cluster.\n\t\t\t// LB places VIP on one of the worker nodes, using keepalived to maintain the VIP and ensuring redundancy\n\t\t\t// LB relies on iptable rules kube-proxy puts in to send traffic from the VIP node to the cluster\n\t\t\t// If policy is local, traffic is only sent to pods on the local node, as such Cluster enables traffic to flow to all the pods in the cluster\n\t\t\tservice.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeCluster\n\t\t\tif proxyNeeded {\n\t\t\t\tservice.Annotations[iksLBEnableFeaturesAnnotation] = iksLBEnableFeaturesProxyProtocol\n\t\t\t}\n\n\t\tcase configv1.AlibabaCloudPlatformType:\n\t\t\tif !isInternal {\n\t\t\t\tservice.Annotations[alibabaCloudLBAddressTypeAnnotation] = alibabaCloudLBAddressTypeInternet\n\t\t\t}\n\t\t}\n\t\t// Azure load balancers are not customizable and are set to (2 fail @ 5s interval, 2 healthy)\n\t\t// GCP load balancers are not customizable and are set to (3 fail @ 8s interval, 1 healthy)\n\n\t\tif v, err := shouldUseLocalWithFallback(ci, service); err != nil {\n\t\t\treturn true, service, err\n\t\t} else if v {\n\t\t\tservice.Annotations[localWithFallbackAnnotation] = \"\"\n\t\t}\n\t}\n\n\tif ci.Spec.EndpointPublishingStrategy != nil {\n\t\tlb := ci.Spec.EndpointPublishingStrategy.LoadBalancer\n\t\tif lb != nil && len(lb.AllowedSourceRanges) > 0 {\n\t\t\tcidrs := make([]string, len(lb.AllowedSourceRanges))\n\t\t\tfor i, cidr := range lb.AllowedSourceRanges {\n\t\t\t\tcidrs[i] = string(cidr)\n\t\t\t}\n\t\t\tservice.Spec.LoadBalancerSourceRanges = cidrs\n\t\t}\n\t}\n\n\tservice.SetOwnerReferences([]metav1.OwnerReference{deploymentRef})\n\treturn true, service, nil\n}", "func (c *cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn &loadbalancer{\n\t\tnamespace: c.namespace,\n\t\tkubevirt: c.kubevirt,\n\t}, true\n}", "func HandleLoadbalancerListSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"GET\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tr.ParseForm()\n\t\tmarker := r.Form.Get(\"marker\")\n\t\tswitch marker {\n\t\tcase \"\":\n\t\t\tfmt.Fprintf(w, LoadbalancersListBody)\n\t\tcase \"45e08a3e-a78f-4b40-a229-1e7e23eee1ab\":\n\t\t\tfmt.Fprintf(w, `{ \"loadbalancers\": [] }`)\n\t\tdefault:\n\t\t\tt.Fatalf(\"/v2.0/lbaas/loadbalancers invoked with unexpected marker=[%s]\", marker)\n\t\t}\n\t})\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) GetLoadBalancerByName(name interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{name}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetLoadBalancerByName\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).GetLoadBalancerByName), varargs...)\n}", "func (l *LoadBalancer) String() string {\n\treturn l.Name\n}", "func (o GetLoadBalancersBalancerOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v GetLoadBalancersBalancer) map[string]interface{} { return v.Tags }).(pulumi.MapOutput)\n}", "func TestRequestIPWithMismatchedLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"color\": \"red\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\t\tif svc.Status.Conditions[0].Reason != \"pool_selector_mismatch\" {\n\t\t\tt.Error(\"Expected service to receive 'pool_selector_mismatch' condition\")\n\t\t}\n\n\t\treturn true\n\t}, 1*time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected status update of service\")\n\t}\n}", "func HandleLoadbalancerCreationSuccessfully(t *testing.T, response string) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestJSONRequest(t, r, `{\n\t\t\t\"loadbalancer\": {\n\t\t\t\t\"name\": \"db_lb\",\n\t\t\t\t\"vip_port_id\": \"2bf413c8-41a9-4477-b505-333d5cbe8b55\",\n\t\t\t\t\"vip_subnet_id\": \"9cedb85d-0759-4898-8a4b-fa5a5ea10086\",\n\t\t\t\t\"vip_address\": \"10.30.176.48\",\n\t\t\t\t\"flavor_id\": \"bba40eb2-ee8c-11e9-81b4-2a2ae2dbcce4\",\n\t\t\t\t\"provider\": \"haproxy\",\n\t\t\t\t\"admin_state_up\": true,\n\t\t\t\t\"tags\": [\"test\", \"stage\"]\n\t\t\t}\n\t\t}`)\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, response)\n\t})\n}", "func (c *NSXClient) CreateLoadBalancer(opts LoadBalancerOptions) (string, bool, error) {\n\tctx := c.api.Context\n\tapi := c.api.ServicesApi\n\trouting := c.api.LogicalRoutingAndServicesApi\n\n\texistingServer, err := c.GetLoadBalancer(opts.Name)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\tif existingServer != nil {\n\t\treturn existingServer.IpAddress, true, nil\n\t}\n\n\tt0, resp, err := routing.ReadLogicalRouter(ctx, opts.Tier0)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"failed to read T0 router %s: %s\", opts.Tier0, errorString(resp, err))\n\t}\n\n\tt0Port, resp, err := routing.CreateLogicalRouterLinkPortOnTier0(ctx, manager.LogicalRouterLinkPortOnTier0{\n\t\tLogicalRouterId: t0.Id,\n\t\tDisplayName: \"lb-\" + opts.Name + \"-T1\",\n\t})\n\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create T0 Local router port %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tt1, resp, err := routing.CreateLogicalRouter(ctx, manager.LogicalRouter{\n\t\tRouterType: \"TIER1\",\n\t\tDisplayName: \"lb-\" + opts.Name,\n\t\tEdgeClusterId: t0.EdgeClusterId,\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create T1 router %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\t_, resp, err = routing.UpdateAdvertisementConfig(ctx, t1.Id, manager.AdvertisementConfig{\n\t\tAdvertiseLbVip: true,\n\t\tAdvertiseLbSnatIp: true,\n\t\tEnabled: true,\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to update advertisement config %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tc.Infof(\"Created T1 router %s/%s\", t1.DisplayName, t1.Id)\n\n\t_, resp, err = routing.CreateLogicalRouterLinkPortOnTier1(ctx, manager.LogicalRouterLinkPortOnTier1{\n\t\tLogicalRouterId: t1.Id,\n\t\tDisplayName: t0.DisplayName + \"-uplink\",\n\t\tLinkedLogicalRouterPortId: &common.ResourceReference{\n\t\t\tTargetType: \"LogicalPort\",\n\t\t\tTargetId: t0Port.Id,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"failed to link T1 (%s) to T0 (%s): %s\", t1.Id, t0Port.Id, errorString(resp, err))\n\t}\n\n\tgroup, err := c.CreateOrUpdateNSGroup(opts.Name, \"LogicalPort\", opts.MemberTags)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\tvar monitorID string\n\tif opts.Protocol == TCPProtocol {\n\t\tmonitorID, err = c.GetOrCreateTCPHealthCheck(opts.Ports[0])\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"unable to create tcp loadbalancer monitor: %v\", err)\n\t\t}\n\t} else {\n\t\tmonitorID, err = c.GetOrCreateHTTPHealthCheck(opts.Ports[0])\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"unable to create http loadbalancer monitor: %v\", err)\n\t\t}\n\t}\n\tpool, resp, err := api.CreateLoadBalancerPool(ctx, loadbalancer.LbPool{\n\t\tId: opts.Name,\n\t\tActiveMonitorIds: []string{monitorID},\n\t\tSnatTranslation: &loadbalancer.LbSnatTranslation{\n\t\t\tType_: \"LbSnatAutoMap\",\n\t\t},\n\t\tMemberGroup: &loadbalancer.PoolMemberGroup{\n\t\t\tGroupingObject: &common.ResourceReference{\n\t\t\t\tTargetType: \"NSGroup\",\n\t\t\t\tTargetId: group.Id,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create load balancer pool %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tip, err := c.AllocateIP(opts.IPPool)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to allocate VIP %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tserver, resp, err := api.CreateLoadBalancerVirtualServer(ctx, loadbalancer.LbVirtualServer{\n\t\tId: opts.Name,\n\t\tEnabled: true,\n\t\tIpAddress: ip,\n\t\tIpProtocol: opts.Protocol,\n\t\tPorts: opts.Ports,\n\t\tPoolId: pool.Id,\n\t})\n\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create virtual server %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tlb := loadbalancer.LbService{\n\t\tDisplayName: opts.Name,\n\t\tAttachment: &common.ResourceReference{\n\t\t\tTargetType: \"LogicalRouter\",\n\t\t\tTargetId: t1.Id,\n\t\t},\n\t\tEnabled: true,\n\t\tErrorLogLevel: \"INFO\",\n\t\tSize: \"SMALL\",\n\t\tVirtualServerIds: []string{server.Id},\n\t}\n\n\t_, resp, err = api.CreateLoadBalancerService(c.api.Context, lb)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create load balancer %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tc.Infof(\"Created LoadBalancer service: %s/%s\", server.Id, ip)\n\treturn ip, false, nil\n}", "func (a *Client) V1GetLoadBalancerPool(params *V1GetLoadBalancerPoolParams) (*V1GetLoadBalancerPoolOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1GetLoadBalancerPoolParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1GetLoadBalancerPool\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/loadbalancers/{name}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1GetLoadBalancerPoolReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*V1GetLoadBalancerPoolOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*V1GetLoadBalancerPoolDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func ExampleELB_CreateLoadBalancer_shared02() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(443),\n\t\t\t\tProtocol: aws.String(\"HTTPS\"),\n\t\t\t\tSSLCertificateId: aws.String(\"arn:aws:iam::123456789012:server-certificate/my-server-cert\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tSecurityGroups: []*string{\n\t\t\taws.String(\"sg-a61988c3\"),\n\t\t},\n\t\tSubnets: []*string{\n\t\t\taws.String(\"subnet-15aaab61\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) DeleteLoadBalancer(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteLoadBalancer\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).DeleteLoadBalancer), p)\n}", "func HandleFullyPopulatedLoadbalancerCreationSuccessfully(t *testing.T, response string) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestJSONRequest(t, r, `{\n\t\t\t\"loadbalancer\": {\n\t\t\t\t\"admin_state_up\": true,\n\t\t\t\t\"flavor_id\": \"bba40eb2-ee8c-11e9-81b4-2a2ae2dbcce4\",\n\t\t\t\t\"listeners\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"default_pool\": {\n\t\t\t\t\t\t\t\"healthmonitor\": {\n\t\t\t\t\t\t\t\t\"delay\": 3,\n\t\t\t\t\t\t\t\t\"expected_codes\": \"200\",\n\t\t\t\t\t\t\t\t\"http_method\": \"GET\",\n\t\t\t\t\t\t\t\t\"max_retries\": 2,\n\t\t\t\t\t\t\t\t\"max_retries_down\": 3,\n\t\t\t\t\t\t\t\t\"name\": \"db\",\n\t\t\t\t\t\t\t\t\"timeout\": 1,\n\t\t\t\t\t\t\t\t\"type\": \"HTTP\",\n\t\t\t\t\t\t\t\t\"url_path\": \"/index.html\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"lb_algorithm\": \"ROUND_ROBIN\",\n\t\t\t\t\t\t\t\"members\": [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.51\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.52\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\"name\": \"Example pool\",\n\t\t\t\t\t\t\t\"protocol\": \"HTTP\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"l7policies\": [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"action\": \"REDIRECT_TO_URL\",\n\t\t\t\t\t\t\t\t\"name\": \"redirect-example.com\",\n\t\t\t\t\t\t\t\t\"redirect_url\": \"http://www.example.com\",\n\t\t\t\t\t\t\t\t\"rules\": [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"compare_type\": \"REGEX\",\n\t\t\t\t\t\t\t\t\t\t\"type\": \"PATH\",\n\t\t\t\t\t\t\t\t\t\t\"value\": \"/images*\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t],\n\t\t\t\t\t\t\"name\": \"redirect_listener\",\n\t\t\t\t\t\t\"protocol\": \"HTTP\",\n\t\t\t\t\t\t\"protocol_port\": 8080\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"name\": \"db_lb\",\n\t\t\t\t\"provider\": \"octavia\",\n\t\t\t\t\"tags\": [\n\t\t\t\t\t\"test\",\n\t\t\t\t\t\"stage\"\n\t\t\t\t],\n\t\t\t\t\"vip_address\": \"10.30.176.48\",\n\t\t\t\t\"vip_port_id\": \"2bf413c8-41a9-4477-b505-333d5cbe8b55\",\n\t\t\t\t\"vip_subnet_id\": \"9cedb85d-0759-4898-8a4b-fa5a5ea10086\"\n\t\t\t}\n\t\t}`)\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, response)\n\t})\n}", "func (l *SharedLoadBalancer) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tklog.Infof(\"EnsureLoadBalancer: called with service %s/%s, node: %d\",\n\t\tservice.Namespace, service.Name, len(nodes))\n\n\tif err := ensureLoadBalancerValidation(service, nodes); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get exits or create a new ELB instance\n\tloadbalancer, err := l.getLoadBalancerInstance(ctx, clusterName, service)\n\tspecifiedID := getStringFromSvsAnnotation(service, ElbID, \"\")\n\tif common.IsNotFound(err) && specifiedID != \"\" {\n\t\treturn nil, err\n\t}\n\tif err != nil && common.IsNotFound(err) {\n\t\tsubnetID, e := l.getSubnetID(service, nodes[0])\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tloadbalancer, err = l.createLoadbalancer(clusterName, subnetID, service)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// query ELB listeners list\n\tlisteners, err := l.sharedELBClient.ListListeners(&elbmodel.ListListenersRequest{LoadbalancerId: &loadbalancer.Id})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, port := range service.Spec.Ports {\n\t\tlistener := l.filterListenerByPort(listeners, service, port)\n\t\t// add or update listener\n\t\tif listener == nil {\n\t\t\tlistener, err = l.createListener(loadbalancer.Id, service, port)\n\t\t} else {\n\t\t\terr = l.updateListener(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlisteners = popListener(listeners, listener.Id)\n\n\t\t// query pool or create pool\n\t\tpool, err := l.getPool(loadbalancer.Id, listener.Id)\n\t\tif err != nil && common.IsNotFound(err) {\n\t\t\tpool, err = l.createPool(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add new members and remove the obsolete members.\n\t\tif err = l.addOrRemoveMembers(loadbalancer, service, pool, port, nodes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add or remove health monitor\n\t\tif err = l.ensureHealthCheck(loadbalancer.Id, pool, port, service, nodes[0]); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif specifiedID == \"\" {\n\t\t// All remaining listeners are obsolete, delete them\n\t\terr = l.deleteListeners(loadbalancer.Id, listeners)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tingressIP := loadbalancer.VipAddress\n\tpublicIPAddr, err := l.createOrAssociateEIP(loadbalancer, service)\n\tif err == nil {\n\t\tif publicIPAddr != \"\" {\n\t\t\tingressIP = publicIPAddr\n\t\t}\n\n\t\treturn &corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{{IP: ingressIP}},\n\t\t}, nil\n\t}\n\n\t// rollback\n\tklog.Errorf(\"rollback:failed to create the EIP, delete ELB instance created, error: %s\", err)\n\terrs := []error{err}\n\terr = l.EnsureLoadBalancerDeleted(ctx, clusterName, service)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\tklog.Errorf(\"rollback: error deleting ELB instance: %s\", err)\n\t}\n\treturn nil, errors.NewAggregate(errs)\n}", "func (r *serviceStatusResolver) LoadBalancer(ctx context.Context) (edgecluster.LoadBalancerStatusResolverContract, error) {\n\tif r.serviceStatus.LoadBalancer == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn r.resolverCreator.NewLoadBalancerStatusResolver(ctx, r.serviceStatus.LoadBalancer)\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) CreateLoadBalancer(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateLoadBalancer\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).CreateLoadBalancer), p)\n}", "func TestGetAllBackendServer(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\t// bs is nil\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\t// get bs\n\tbs, err = cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 2 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}", "func (c *clbClient) Lbs() []string {\n\treturn c.loadBalancers\n}", "func (s AwsElbv2LoadBalancerDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {\n\tctx, cancel := getContextWithCancel()\n\tdefer cancel()\n\n\tlb = cleanupSubnetInFrontendIPConfigurations(&lb)\n\n\trgName := az.getLoadBalancerResourceGroup()\n\trerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, pointer.StringDeref(lb.Name, \"\"), lb, pointer.StringDeref(lb.Etag, \"\"))\n\tklog.V(10).Infof(\"LoadBalancerClient.CreateOrUpdate(%s): end\", *lb.Name)\n\tif rerr == nil {\n\t\t// Invalidate the cache right after updating\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t\treturn nil\n\t}\n\n\tlbJSON, _ := json.Marshal(lb)\n\tklog.Warningf(\"LoadBalancerClient.CreateOrUpdate(%s) failed: %v, LoadBalancer request: %s\", pointer.StringDeref(lb.Name, \"\"), rerr.Error(), string(lbJSON))\n\n\t// Invalidate the cache because ETAG precondition mismatch.\n\tif rerr.HTTPStatusCode == http.StatusPreconditionFailed {\n\t\tklog.V(3).Infof(\"LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed\", pointer.StringDeref(lb.Name, \"\"))\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\tretryErrorMessage := rerr.Error().Error()\n\t// Invalidate the cache because another new operation has canceled the current request.\n\tif strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) {\n\t\tklog.V(3).Infof(\"LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation\", pointer.StringDeref(lb.Name, \"\"))\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\t// The LB update may fail because the referenced PIP is not in the Succeeded provisioning state\n\tif strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(consts.ReferencedResourceNotProvisionedMessageCode)) {\n\t\tmatches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage)\n\t\tif len(matches) != 3 {\n\t\t\tklog.Errorf(\"Failed to parse the retry error message %s\", retryErrorMessage)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\tpipRG, pipName := matches[1], matches[2]\n\t\tklog.V(3).Infof(\"The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it\", pipName, pointer.StringDeref(lb.Name, \"\"))\n\t\tpip, _, err := az.getPublicIPAddress(pipRG, pipName, azcache.CacheReadTypeDefault)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to get the public IP %s in resource group %s: %v\", pipName, pipRG, err)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\t// Perform a dummy update to fix the provisioning state\n\t\terr = az.CreateOrUpdatePIP(service, pipRG, pip)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to update the public IP %s in resource group %s: %v\", pipName, pipRG, err)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\t// Invalidate the LB cache, return the error, and the controller manager\n\t\t// would retry the LB update in the next reconcile loop\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\treturn rerr.Error()\n}", "func (el *ELBV2Manager) Detect(metrics []config.MetricConfig) (interface{}, error) {\n\n\tlog.WithFields(log.Fields{\n\t\t\"region\": el.awsManager.GetRegion(),\n\t\t\"resource\": \"elb_v2\",\n\t}).Info(\"starting to analyze resource\")\n\n\tel.awsManager.GetCollector().CollectStart(el.Name)\n\n\tdetectedELBV2 := []DetectedELBV2{}\n\n\tpricingRegionPrefix, err := el.awsManager.GetPricingClient().GetRegionPrefix(el.awsManager.GetRegion())\n\tif err != nil {\n\t\tlog.WithError(err).WithFields(log.Fields{\n\t\t\t\"region\": el.awsManager.GetRegion(),\n\t\t}).Error(\"Could not get pricing region prefix\")\n\t\tel.awsManager.GetCollector().CollectError(el.Name, err)\n\t\treturn detectedELBV2, err\n\t}\n\n\tinstances, err := el.describeLoadbalancers(nil, nil)\n\tif err != nil {\n\t\tel.awsManager.GetCollector().CollectError(el.Name, err)\n\t\treturn detectedELBV2, err\n\t}\n\n\tnow := time.Now()\n\n\tfor _, instance := range instances {\n\t\tvar cloudWatchNameSpace string\n\t\tvar price float64\n\t\tif loadBalancerConfig, found := loadBalancersConfig[*instance.Type]; found {\n\t\t\tcloudWatchNameSpace = loadBalancerConfig.cloudWatchNamespace\n\n\t\t\tlog.WithField(\"name\", *instance.LoadBalancerName).Debug(\"checking elbV2\")\n\n\t\t\tloadBalancerConfig.pricingfilters = append(\n\t\t\t\tloadBalancerConfig.pricingfilters, &pricing.Filter{\n\t\t\t\t\tType: awsClient.String(\"TERM_MATCH\"),\n\t\t\t\t\tField: awsClient.String(\"usagetype\"),\n\t\t\t\t\tValue: awsClient.String(fmt.Sprintf(\"%sLoadBalancerUsage\", pricingRegionPrefix)),\n\t\t\t\t})\n\t\t\tprice, _ = el.awsManager.GetPricingClient().GetPrice(el.getPricingFilterInput(loadBalancerConfig.pricingfilters), \"\", el.awsManager.GetRegion())\n\t\t}\n\t\tfor _, metric := range metrics {\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"name\": *instance.LoadBalancerName,\n\t\t\t\t\"metric_name\": metric.Description,\n\t\t\t}).Debug(\"check metric\")\n\n\t\t\tperiod := int64(metric.Period.Seconds())\n\n\t\t\tmetricEndTime := now.Add(time.Duration(-metric.StartTime))\n\n\t\t\tregx, _ := regexp.Compile(\".*loadbalancer/\")\n\n\t\t\telbv2Name := regx.ReplaceAllString(*instance.LoadBalancerArn, \"\")\n\n\t\t\tmetricInput := awsCloudwatch.GetMetricStatisticsInput{\n\t\t\t\tNamespace: &cloudWatchNameSpace,\n\t\t\t\tMetricName: &metric.Description,\n\t\t\t\tPeriod: &period,\n\t\t\t\tStartTime: &metricEndTime,\n\t\t\t\tEndTime: &now,\n\t\t\t\tDimensions: []*awsCloudwatch.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: awsClient.String(\"LoadBalancer\"),\n\t\t\t\t\t\tValue: &elbv2Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tformulaValue, _, err := el.awsManager.GetCloudWatchClient().GetMetric(&metricInput, metric)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).WithFields(log.Fields{\n\t\t\t\t\t\"name\": *instance.LoadBalancerName,\n\t\t\t\t\t\"metric_name\": metric.Description,\n\t\t\t\t}).Error(\"Could not get cloudwatch metric data\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpression, err := expression.BoolExpression(formulaValue, metric.Constraint.Value, metric.Constraint.Operator)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif expression {\n\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"metric_name\": metric.Description,\n\t\t\t\t\t\"constraint_operator\": metric.Constraint.Operator,\n\t\t\t\t\t\"constraint_Value\": metric.Constraint.Value,\n\t\t\t\t\t\"formula_value\": formulaValue,\n\t\t\t\t\t\"name\": *instance.LoadBalancerName,\n\t\t\t\t\t\"region\": el.awsManager.GetRegion(),\n\t\t\t\t}).Info(\"LoadBalancer detected as unutilized resource\")\n\n\t\t\t\ttags, err := el.client.DescribeTags(&elbv2.DescribeTagsInput{\n\t\t\t\t\tResourceArns: []*string{instance.LoadBalancerArn},\n\t\t\t\t})\n\t\t\t\ttagsData := map[string]string{}\n\t\t\t\tif err == nil {\n\t\t\t\t\tfor _, tags := range tags.TagDescriptions {\n\t\t\t\t\t\tfor _, tag := range tags.Tags {\n\t\t\t\t\t\t\ttagsData[*tag.Key] = *tag.Value\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\telbv2 := DetectedELBV2{\n\t\t\t\t\tRegion: el.awsManager.GetRegion(),\n\t\t\t\t\tMetric: metric.Description,\n\t\t\t\t\tType: *instance.Type,\n\t\t\t\t\tPriceDetectedFields: collector.PriceDetectedFields{\n\t\t\t\t\t\tResourceID: *instance.LoadBalancerName,\n\t\t\t\t\t\tLaunchTime: *instance.CreatedTime,\n\t\t\t\t\t\tPricePerHour: price,\n\t\t\t\t\t\tPricePerMonth: price * collector.TotalMonthHours,\n\t\t\t\t\t\tTag: tagsData,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tel.awsManager.GetCollector().AddResource(collector.EventCollector{\n\t\t\t\t\tResourceName: el.Name,\n\t\t\t\t\tData: elbv2,\n\t\t\t\t})\n\n\t\t\t\tdetectedELBV2 = append(detectedELBV2, elbv2)\n\t\t\t}\n\t\t}\n\t}\n\n\tel.awsManager.GetCollector().CollectFinish(el.Name)\n\n\treturn detectedELBV2, nil\n\n}", "func (c *Client) GetLoadBalancer(loadBalancerID string) (LoadBalancer, *bmc.Error) {\n\tloadBalancer := LoadBalancer{}\n\tloadBalancerID = url.PathEscape(loadBalancerID)\n\tresp, err := c.Request(\"GET\", fmt.Sprintf(\"/loadBalancers/%s\", loadBalancerID), nil)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\tbmcError := bmc.Error{Code: string(resp.StatusCode), Message: err.Error()}\n\t\treturn loadBalancer, &bmcError\n\t}\n\tlogrus.Debug(\"StatusCode: \", resp.StatusCode)\n\tif resp.StatusCode != 200 {\n\t\treturn loadBalancer, bmc.NewError(*resp)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tlogrus.Debug(\"Body: \", string(body))\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Could not read JSON response: %s\", err)\n\t}\n\n\tif err = json.Unmarshal(body, &loadBalancer); err != nil {\n\t\tlogrus.Fatalf(\"Unmarshal impossible: %s\", err)\n\t}\n\treturn loadBalancer, nil\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) UpdateLoadBalancer(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateLoadBalancer\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).UpdateLoadBalancer), p)\n}", "func generateLoadBalancers(cr *svcapitypes.Service) []*svcsdk.LoadBalancer {\n\tloadBalancers := []*svcsdk.LoadBalancer{}\n\n\tif cr.Spec.ForProvider.LoadBalancers == nil {\n\t\treturn loadBalancers\n\t}\n\n\tfor _, loadBalancer := range cr.Spec.ForProvider.LoadBalancers {\n\t\tconvertedLB := &svcsdk.LoadBalancer{}\n\t\tconvertedLB.ContainerName = loadBalancer.ContainerName\n\t\tconvertedLB.ContainerPort = loadBalancer.ContainerPort\n\t\tconvertedLB.LoadBalancerName = loadBalancer.LoadBalancerName\n\t\tconvertedLB.TargetGroupArn = loadBalancer.TargetGroupARN\n\n\t\tloadBalancers = append(loadBalancers, convertedLB)\n\t}\n\treturn loadBalancers\n}", "func (az *Cloud) ListLB(service *v1.Service) ([]network.LoadBalancer, error) {\n\tctx, cancel := getContextWithCancel()\n\tdefer cancel()\n\n\trgName := az.getLoadBalancerResourceGroup()\n\tallLBs, rerr := az.LoadBalancerClient.List(ctx, rgName)\n\tif rerr != nil {\n\t\tif rerr.IsNotFound() {\n\t\t\treturn nil, nil\n\t\t}\n\t\taz.Event(service, v1.EventTypeWarning, \"ListLoadBalancers\", rerr.Error().Error())\n\t\tklog.Errorf(\"LoadBalancerClient.List(%v) failure with err=%v\", rgName, rerr)\n\t\treturn nil, rerr.Error()\n\t}\n\tklog.V(2).Infof(\"LoadBalancerClient.List(%v) success\", rgName)\n\treturn allLBs, nil\n}", "func (mr *MockRDSAPIMockRecorder) DescribeDBSubnetGroups(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeDBSubnetGroups\", reflect.TypeOf((*MockRDSAPI)(nil).DescribeDBSubnetGroups), arg0)\n}", "func LookupLoadBalancer(ctx *pulumi.Context, args *LookupLoadBalancerArgs, opts ...pulumi.InvokeOption) (*LookupLoadBalancerResult, error) {\n\tvar rv LookupLoadBalancerResult\n\terr := ctx.Invoke(\"aws:elb/getLoadBalancer:getLoadBalancer\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}", "func (o *DeregisterVmsInLoadBalancerRequest) GetLoadBalancerNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.LoadBalancerName, true\n}", "func (*NetworkLoadBalancer) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_loadbalancer_v1_network_load_balancer_proto_rawDescGZIP(), []int{0}\n}", "func (a *Client) V1GetLoadBalancerPool(params *V1GetLoadBalancerPoolParams) (*V1GetLoadBalancerPoolOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1GetLoadBalancerPoolParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1GetLoadBalancerPool\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/loadbalancers/{name}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1GetLoadBalancerPoolReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V1GetLoadBalancerPoolOK), nil\n\n}", "func (mr *MockClientMockRecorder) RegisterInstancesWithLoadBalancer(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RegisterInstancesWithLoadBalancer\", reflect.TypeOf((*MockClient)(nil).RegisterInstancesWithLoadBalancer), arg0)\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) NewListLoadBalancersParams() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NewListLoadBalancersParams\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).NewListLoadBalancersParams))\n}", "func TestServiceDelete(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tvar svcIP string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tsvcIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been allocated\")\n\t}\n\n\terr := fixture.svcClient.Services(\"default\").Delete(context.Background(), \"service-a\", meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been released\")\n\t}\n}", "func GetLoadBalancerNames(kubectlOptions *KubectlOptions) ([]string, error) {\n\tlogger := logging.GetProjectLogger()\n\tlogger.Infof(\"Getting all LoadBalancer names from services in kubernetes\")\n\n\tclient, err := GetKubernetesClientFromOptions(kubectlOptions)\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\tservices, err := GetAllServices(client)\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\tloadBalancerServices := filterLoadBalancerServices(services)\n\tlogger.Infof(\"Found %d LoadBalancer services of %d services in kubernetes.\", len(loadBalancerServices), len(services))\n\n\tlbNames := []string{}\n\tfor _, service := range loadBalancerServices {\n\t\tlbName, err := GetLoadBalancerNameFromService(service)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t}\n\t\tlbNames = append(lbNames, lbName)\n\t}\n\tlogger.Infof(\"Successfully extracted loadbalancer names\")\n\treturn lbNames, nil\n}", "func ExampleELB_DescribeLoadBalancerAttributes_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DescribeLoadBalancerAttributesInput{\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.DescribeLoadBalancerAttributes(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeLoadBalancerAttributeNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeLoadBalancerAttributeNotFoundException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func LoadBalancerName() reference.ExtractValueFn {\n\treturn func(mg resource.Managed) string {\n\t\tlb, ok := mg.(*elbv2.LoadBalancer)\n\t\tif !ok {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn aws.StringValue(lb.Status.AtProvider.LoadBalancerName)\n\t}\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) AddNetscalerLoadBalancer(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddNetscalerLoadBalancer\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).AddNetscalerLoadBalancer), p)\n}", "func TestAllocOnInit(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.123\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.124\",\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.124\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n}", "func TestRemoveServiceLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"blue\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive exactly zero ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Labels = map[string]string{\n\t\t\"color\": \"green\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) GetLoadBalancerByID(id interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{id}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetLoadBalancerByID\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).GetLoadBalancerByID), varargs...)\n}" ]
[ "0.64861685", "0.60638064", "0.5967723", "0.5940613", "0.5812245", "0.5787266", "0.5732442", "0.57097083", "0.5666738", "0.55321044", "0.5529869", "0.5459238", "0.5447435", "0.54297996", "0.5411221", "0.54095614", "0.53486633", "0.53483623", "0.53436834", "0.5331982", "0.5303855", "0.5297424", "0.52837366", "0.5272651", "0.52671415", "0.5264427", "0.5260396", "0.52591276", "0.5245598", "0.5226472", "0.5216353", "0.52157104", "0.52157104", "0.52110213", "0.5208826", "0.5208453", "0.5203137", "0.5180481", "0.5175972", "0.51636004", "0.5153596", "0.5114124", "0.5114124", "0.5093626", "0.5081256", "0.50704587", "0.5035119", "0.5026658", "0.50237066", "0.50226706", "0.50211984", "0.49946997", "0.49913487", "0.49906895", "0.49831623", "0.4969033", "0.49547142", "0.49513978", "0.49498785", "0.49311888", "0.49286196", "0.4925993", "0.49144033", "0.49142134", "0.49137163", "0.49074498", "0.48986828", "0.48969603", "0.48786643", "0.48779225", "0.48691306", "0.48672333", "0.4867181", "0.48626772", "0.4862282", "0.485644", "0.48445508", "0.48270923", "0.48170424", "0.48152846", "0.48017728", "0.48016652", "0.47968036", "0.47880915", "0.4777423", "0.47712746", "0.47702014", "0.47654614", "0.4762887", "0.47555864", "0.47554433", "0.46982327", "0.46929008", "0.46912825", "0.46850502", "0.46837714", "0.46831417", "0.46737212", "0.46639124", "0.46608514" ]
0.72991157
0
DescribeLoadBalancerWithNs mocks base method
func (m *MockLoadBalance) DescribeLoadBalancerWithNs(ns, region, lbID, name string) (*cloud.LoadBalanceObject, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DescribeLoadBalancerWithNs", ns, region, lbID, name) ret0, _ := ret[0].(*cloud.LoadBalanceObject) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) DescribeLoadBalancer(region, lbID, name string) (*cloud.LoadBalanceObject, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeLoadBalancer\", region, lbID, name)\n\tret0, _ := ret[0].(*cloud.LoadBalanceObject)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalancerServiceIface) ListNetscalerLoadBalancers(p *ListNetscalerLoadBalancersParams) (*ListNetscalerLoadBalancersResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListNetscalerLoadBalancers\", p)\n\tret0, _ := ret[0].(*ListNetscalerLoadBalancersResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalancerServiceIface) ListLoadBalancers(p *ListLoadBalancersParams) (*ListLoadBalancersResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListLoadBalancers\", p)\n\tret0, _ := ret[0].(*ListLoadBalancersResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestNonMatchingLBClass(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tlbClass := \"net.example/some-other-class\"\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerClass: &lbClass,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"Unexpected patch to a service\")\n\n\t\treturn true\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n}", "func TestAllocOnInit(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.123\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.124\",\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.124\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n}", "func (c *MockLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string) (*network.LoadBalancer, error) {\n\tfor _, lb := range c.LBs {\n\t\tif *lb.Name == loadBalancerName {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\treturn nil, nil\n}", "func (lb *GetLoadbalancerInput) GetAllLoadbalancer() (GetLoadbalancerResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(lb.Cloud.Name)); status != true {\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetNetworks\")\n\t}\n\n\tswitch strings.ToLower(lb.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsess := (lb.Cloud.Client).(*session.Session)\n\n\t\t// authorizing further request\n\t\tauthinpt := new(auth.EstablishConnectionInput)\n\t\tauthinpt.Region = lb.Cloud.Region\n\t\tauthinpt.Session = sess\n\t\tauthinpt.Resource = \"elb12\"\n\t\tlbin := new(loadbalance.GetLoadbalancerInput)\n\t\tlbin.GetRaw = lb.Cloud.GetRaw\n\n\t\tswitch strings.ToLower(lb.Type) {\n\t\tcase \"classic\":\n\t\t\tresponse, lberr := lbin.GetAllClassicLb(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tcase \"application\":\n\t\t\tresponse, lberr := lbin.GetAllApplicationLb(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tcase \"\":\n\t\t\tresponse, lberr := lbin.GetAllLoadbalancer(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tdefault:\n\t\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(\"The loadbalancer type you entered is unknown to me\")\n\t\t}\n\n\tcase \"azure\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetLoadbalancers\")\n\t}\n}", "func (mr *MockLoadBalanceMockRecorder) DescribeLoadBalancerWithNs(ns, region, lbID, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeLoadBalancerWithNs\", reflect.TypeOf((*MockLoadBalance)(nil).DescribeLoadBalancerWithNs), ns, region, lbID, name)\n}", "func TestAddPool(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\ttwentyPool := mkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"})\n\t_, err := fixture.poolClient.Create(context.Background(), twentyPool, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (m *MockNetworkDescriber) OutboundLBName(arg0 string) string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OutboundLBName\", arg0)\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestRequestIPs(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.20\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.10.20\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.20'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tfirst := false\n\t\tsecond := false\n\t\tthird := false\n\n\t\tfor _, ingress := range svc.Status.LoadBalancer.Ingress {\n\t\t\tswitch ingress.IP {\n\t\t\tcase \"10.0.10.21\":\n\t\t\t\tfirst = true\n\t\t\tcase \"10.0.10.22\":\n\t\t\t\tsecond = true\n\t\t\tcase \"10.0.10.23\":\n\t\t\t\tthird = true\n\t\t\tdefault:\n\t\t\t\tt.Error(\"Unexpected ingress IP\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif !first {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.21'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !second {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.22'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !third {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.23'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tciliumSvcLBIPSAnnotation: \"10.0.10.22,10.0.10.23\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-c\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Reason != \"already_allocated\" {\n\t\t\tt.Error(\"Expected condition reason to be 'already_allocated'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// request an already allocated IP\n\tserviceC := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-c\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceCUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceC, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (p *MockProvisionerClient) LoadBalancer(string) client.GenericLoadBalancerInterface {\n\treturn &MockLoadBalancerClient{}\n}", "func (p *MockProvisionerClient) LoadBalancer(string) client.GenericLoadBalancerInterface {\n\treturn &MockLoadBalancerClient{}\n}", "func (s *Service) CreateOrUpdate(ctx context.Context, spec azure.Spec) error {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid internal load balancer specification\")\n\t}\n\tklog.V(2).Infof(\"creating internal load balancer %s\", internalLBSpec.Name)\n\tprobeName := \"tcpHTTPSProbe\"\n\tfrontEndIPConfigName := \"controlplane-internal-lbFrontEnd\"\n\tbackEndAddressPoolName := \"controlplane-internal-backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", s.Scope.SubscriptionID, s.Scope.ClusterConfig.ResourceGroup)\n\tlbName := internalLBSpec.Name\n\n\tklog.V(2).Infof(\"getting subnet %s\", internalLBSpec.SubnetName)\n\tsubnetInterface, err := subnets.NewService(s.Scope).Get(ctx, &subnets.Spec{Name: internalLBSpec.SubnetName, VnetName: internalLBSpec.VnetName})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubnet, ok := subnetInterface.(network.Subnet)\n\tif !ok {\n\t\treturn errors.New(\"subnet Get returned invalid interface\")\n\t}\n\tklog.V(2).Infof(\"successfully got subnet %s\", internalLBSpec.SubnetName)\n\n\tfuture, err := s.Client.CreateOrUpdate(ctx,\n\t\ts.Scope.ClusterConfig.ResourceGroup,\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tSku: &network.LoadBalancerSku{Name: network.LoadBalancerSkuNameStandard},\n\t\t\tLocation: to.StringPtr(s.Scope.ClusterConfig.Location),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Static,\n\t\t\t\t\t\t\tSubnet: &subnet,\n\t\t\t\t\t\t\tPrivateIPAddress: to.StringPtr(internalLBSpec.IPAddress),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolTCP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"LBRuleHTTPS\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.LoadDistributionDefault,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create load balancer\")\n\t}\n\n\terr = future.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot get internal load balancer create or update future response\")\n\t}\n\n\t_, err = future.Result(s.Client)\n\tklog.V(2).Infof(\"successfully created internal load balancer %s\", internalLBSpec.Name)\n\treturn err\n}", "func (m *MockLoadBalancerServiceIface) NewListNetscalerLoadBalancersParams() *ListNetscalerLoadBalancersParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListNetscalerLoadBalancersParams\")\n\tret0, _ := ret[0].(*ListNetscalerLoadBalancersParams)\n\treturn ret0\n}", "func (m *MockNetworkDescriber) APIServerLBName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLBName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (lb *GetLoadbalancerInput) GetLoadbalancers() (GetLoadbalancerResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(lb.Cloud.Name)); status != true {\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetNetworks\")\n\t}\n\n\tswitch strings.ToLower(lb.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// Gets the established session so that it can carry out the process in cloud\n\t\tsess := (lb.Cloud.Client).(*session.Session)\n\n\t\t//authorizing to request further\n\t\tauthinpt := new(auth.EstablishConnectionInput)\n\t\tauthinpt.Region = lb.Cloud.Region\n\t\tauthinpt.Session = sess\n\t\tswitch strings.ToLower(lb.Type) {\n\t\tcase \"classic\":\n\t\t\tauthinpt.Resource = \"elb\"\n\t\tcase \"application\":\n\t\t\tauthinpt.Resource = \"elb2\"\n\t\t}\n\n\t\tlbin := new(loadbalance.GetLoadbalancerInput)\n\t\tlbin.GetRaw = lb.Cloud.GetRaw\n\t\tlbin.LbNames = lb.LbNames\n\t\tlbin.LbArns = lb.LbArns\n\t\tlbin.Type = lb.Type\n\t\tresponse, lberr := lbin.Getloadbalancers(*authinpt)\n\t\tif lberr != nil {\n\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t}\n\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetLoadbalancers\")\n\t}\n}", "func TestPoolSelectorNamespace(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tselector := slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"io.kubernetes.service.namespace\": \"tenant-one\",\n\t\t},\n\t}\n\tpoolA.Spec.ServiceSelector = &selector\n\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"red-service\" {\n\t\t\tt.Error(\"Expected update from 'red-service'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tmatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"red-service\",\n\t\t\tNamespace: \"tenant-one\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"tenant-one\").Create(context.Background(), matchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"blue-service\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to not receive any ingress IPs\")\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tnonMatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"blue-service\",\n\t\t\tNamespace: \"tenant-two\",\n\t\t\tUID: serviceBUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t// Setting the same label in an attempt to escalate privileges doesn't work\n\t\t\t\t\"io.kubernetes.service.namespace\": \"tenant-one\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"tenant-two\").Create(context.Background(), nonMatchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestGetAllBackendServer(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\t// bs is nil\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\t// get bs\n\tbs, err = cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 2 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}", "func CreateLoadBalancer(ctx context.Context, lbName, pipName string) (lb network.LoadBalancer, err error) {\n\tprobeName := \"probe\"\n\tfrontEndIPConfigName := \"fip\"\n\tbackEndAddressPoolName := \"backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", config.SubscriptionID(), config.GroupName())\n\n\tpip, err := GetPublicIP(ctx, pipName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlbClient := getLBClient()\n\tfuture, err := lbClient.CreateOrUpdate(ctx,\n\t\tconfig.GroupName(),\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tLocation: to.StringPtr(config.Location()),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Dynamic,\n\t\t\t\t\t\t\tPublicIPAddress: &pip,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolHTTP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tRequestPath: to.StringPtr(\"healthprobe.aspx\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"lbRule\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.Default,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tInboundNatRules: &[]network.InboundNatRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"natRule1\"),\n\t\t\t\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(21),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"natRule2\"),\n\t\t\t\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(23),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn lb, fmt.Errorf(\"cannot create load balancer: %v\", err)\n\t}\n\n\terr = future.WaitForCompletion(ctx, lbClient.Client)\n\tif err != nil {\n\t\treturn lb, fmt.Errorf(\"cannot get load balancer create or update future response: %v\", err)\n\t}\n\n\treturn future.Result(lbClient)\n}", "func (m *MockClusterScoper) OutboundLBName(arg0 string) string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OutboundLBName\", arg0)\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestDisablePool(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\tpoolA.Spec.Disabled = true\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].externallyDisabled {\n\t\tt.Fatal(\"The range has not been externally disabled\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected service status update to occur on service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolA.Spec.Disabled = false\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (m *MockLoadBalancerServiceIface) NewListLoadBalancersParams() *ListLoadBalancersParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListLoadBalancersParams\")\n\tret0, _ := ret[0].(*ListLoadBalancersParams)\n\treturn ret0\n}", "func (m *MockNetworkDescriber) APIServerLBPoolName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLBPoolName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (s *ClusterScope) LBSpecs() []azure.ResourceSpecGetter {\n\tspecs := []azure.ResourceSpecGetter{\n\t\t&loadbalancers.LBSpec{\n\t\t\t// API Server LB\n\t\t\tName: s.APIServerLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tSubnetName: s.ControlPlaneSubnet().Name,\n\t\t\tFrontendIPConfigs: s.APIServerLB().FrontendIPs,\n\t\t\tAPIServerPort: s.APIServerPort(),\n\t\t\tType: s.APIServerLB().Type,\n\t\t\tSKU: s.APIServerLB().SKU,\n\t\t\tRole: infrav1.APIServerRole,\n\t\t\tBackendPoolName: s.APIServerLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.APIServerLB().IdleTimeoutInMinutes,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t},\n\t}\n\n\t// Node outbound LB\n\tif s.NodeOutboundLB() != nil {\n\t\tspecs = append(specs, &loadbalancers.LBSpec{\n\t\t\tName: s.NodeOutboundLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tFrontendIPConfigs: s.NodeOutboundLB().FrontendIPs,\n\t\t\tType: s.NodeOutboundLB().Type,\n\t\t\tSKU: s.NodeOutboundLB().SKU,\n\t\t\tBackendPoolName: s.NodeOutboundLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.NodeOutboundLB().IdleTimeoutInMinutes,\n\t\t\tRole: infrav1.NodeOutboundRole,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t})\n\t}\n\n\t// Control Plane Outbound LB\n\tif s.ControlPlaneOutboundLB() != nil {\n\t\tspecs = append(specs, &loadbalancers.LBSpec{\n\t\t\tName: s.ControlPlaneOutboundLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tFrontendIPConfigs: s.ControlPlaneOutboundLB().FrontendIPs,\n\t\t\tType: s.ControlPlaneOutboundLB().Type,\n\t\t\tSKU: s.ControlPlaneOutboundLB().SKU,\n\t\t\tBackendPoolName: s.ControlPlaneOutboundLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.ControlPlaneOutboundLB().IdleTimeoutInMinutes,\n\t\t\tRole: infrav1.ControlPlaneOutboundRole,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t})\n\t}\n\n\treturn specs\n}", "func (_m *ELBv2APIClient) DescribeLoadBalancers(_a0 context.Context, _a1 *elasticloadbalancingv2.DescribeLoadBalancersInput, _a2 ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error) {\n\t_va := make([]interface{}, len(_a2))\n\tfor _i := range _a2 {\n\t\t_va[_i] = _a2[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 *elasticloadbalancingv2.DescribeLoadBalancersOutput\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error)); ok {\n\t\treturn rf(_a0, _a1, _a2...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) *elasticloadbalancingv2.DescribeLoadBalancersOutput); ok {\n\t\tr0 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*elasticloadbalancingv2.DescribeLoadBalancersOutput)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func TestRequestIPWithMismatchedLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"color\": \"red\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\t\tif svc.Status.Conditions[0].Reason != \"pool_selector_mismatch\" {\n\t\t\tt.Error(\"Expected service to receive 'pool_selector_mismatch' condition\")\n\t\t}\n\n\t\treturn true\n\t}, 1*time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected status update of service\")\n\t}\n}", "func (m *MockLoadBalancerServiceIface) GetLoadBalancerByName(name string, opts ...OptionFunc) (*LoadBalancer, int, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{name}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"GetLoadBalancerByName\", varargs...)\n\tret0, _ := ret[0].(*LoadBalancer)\n\tret1, _ := ret[1].(int)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func HandleFullyPopulatedLoadbalancerCreationSuccessfully(t *testing.T, response string) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestJSONRequest(t, r, `{\n\t\t\t\"loadbalancer\": {\n\t\t\t\t\"admin_state_up\": true,\n\t\t\t\t\"flavor_id\": \"bba40eb2-ee8c-11e9-81b4-2a2ae2dbcce4\",\n\t\t\t\t\"listeners\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"default_pool\": {\n\t\t\t\t\t\t\t\"healthmonitor\": {\n\t\t\t\t\t\t\t\t\"delay\": 3,\n\t\t\t\t\t\t\t\t\"expected_codes\": \"200\",\n\t\t\t\t\t\t\t\t\"http_method\": \"GET\",\n\t\t\t\t\t\t\t\t\"max_retries\": 2,\n\t\t\t\t\t\t\t\t\"max_retries_down\": 3,\n\t\t\t\t\t\t\t\t\"name\": \"db\",\n\t\t\t\t\t\t\t\t\"timeout\": 1,\n\t\t\t\t\t\t\t\t\"type\": \"HTTP\",\n\t\t\t\t\t\t\t\t\"url_path\": \"/index.html\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"lb_algorithm\": \"ROUND_ROBIN\",\n\t\t\t\t\t\t\t\"members\": [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.51\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.52\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\"name\": \"Example pool\",\n\t\t\t\t\t\t\t\"protocol\": \"HTTP\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"l7policies\": [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"action\": \"REDIRECT_TO_URL\",\n\t\t\t\t\t\t\t\t\"name\": \"redirect-example.com\",\n\t\t\t\t\t\t\t\t\"redirect_url\": \"http://www.example.com\",\n\t\t\t\t\t\t\t\t\"rules\": [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"compare_type\": \"REGEX\",\n\t\t\t\t\t\t\t\t\t\t\"type\": \"PATH\",\n\t\t\t\t\t\t\t\t\t\t\"value\": \"/images*\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t],\n\t\t\t\t\t\t\"name\": \"redirect_listener\",\n\t\t\t\t\t\t\"protocol\": \"HTTP\",\n\t\t\t\t\t\t\"protocol_port\": 8080\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"name\": \"db_lb\",\n\t\t\t\t\"provider\": \"octavia\",\n\t\t\t\t\"tags\": [\n\t\t\t\t\t\"test\",\n\t\t\t\t\t\"stage\"\n\t\t\t\t],\n\t\t\t\t\"vip_address\": \"10.30.176.48\",\n\t\t\t\t\"vip_port_id\": \"2bf413c8-41a9-4477-b505-333d5cbe8b55\",\n\t\t\t\t\"vip_subnet_id\": \"9cedb85d-0759-4898-8a4b-fa5a5ea10086\"\n\t\t\t}\n\t\t}`)\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, response)\n\t})\n}", "func TestAddRange(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestRandomLBWhenNodeFailBalanced(t *testing.T) {\n\tdefer func() {\n\t\t// clear healthStore\n\t\thealthStore = sync.Map{}\n\t}()\n\n\tpool := makePool(4)\n\tvar hosts []types.Host\n\tvar unhealthyIdx = 2\n\tfor i := 0; i < 4; i++ {\n\t\thost := &mockHost{\n\t\t\taddr: pool.Get(),\n\t\t}\n\t\tif i == unhealthyIdx {\n\t\t\thost.SetHealthFlag(api.FAILED_ACTIVE_HC)\n\t\t}\n\t\thosts = append(hosts, host)\n\t}\n\n\ths := &hostSet{}\n\ths.setFinalHost(hosts)\n\tlb := newRandomLoadBalancer(nil, hs)\n\ttotal := 1000000\n\trunCase := func(subTotal int) {\n\t\tresults := map[string]int{}\n\t\tfor i := 0; i < subTotal; i++ {\n\t\t\th := lb.ChooseHost(nil)\n\t\t\tv, ok := results[h.AddressString()]\n\t\t\tif !ok {\n\t\t\t\tv = 0\n\t\t\t}\n\t\t\tresults[h.AddressString()] = v + 1\n\t\t}\n\t\tfor i := 0; i < 4; i++ {\n\t\t\taddr := hosts[i].AddressString()\n\t\t\trate := float64(results[addr]) / float64(subTotal)\n\t\t\texpected := 0.33333\n\t\t\tif i == unhealthyIdx {\n\t\t\t\texpected = 0.000\n\t\t\t}\n\t\t\tif math.Abs(rate-expected) > 0.1 { // no lock, have deviation 10% is acceptable\n\t\t\t\tt.Errorf(\"%s request rate is %f, expected %f\", addr, rate, expected)\n\t\t\t}\n\t\t\tt.Logf(\"%s request rate is %f, request count: %d\", addr, rate, results[addr])\n\t\t}\n\t}\n\t// simple test\n\trunCase(total)\n\t// concurr\n\twg := sync.WaitGroup{}\n\tsubTotal := total / 10\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\trunCase(subTotal)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}", "func (c *MockLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) {\n\tvar l []network.LoadBalancer\n\tfor _, lb := range c.LBs {\n\t\tl = append(l, lb)\n\t}\n\treturn l, nil\n}", "func TestRangesFull(t *testing.T) {\n\tinitDone := make(chan struct{})\n\t// A single /32 can't be used to allocate since we always reserve 2 IPs,\n\t// the network and broadcast address, which in the case of a /32 means it is always full.\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.123/32\", \"FF::123/128\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-a\" {\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t}\n\n\t\tif svc.Name != \"service-b\" {\n\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected two service updates\")\n\t}\n}", "func (m *MockClusterScoper) APIServerLBName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLBName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestRemoveServiceLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"blue\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive exactly zero ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Labels = map[string]string{\n\t\t\"color\": \"green\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (s *Service) Get(ctx context.Context, spec azure.Spec) (interface{}, error) {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn network.LoadBalancer{}, errors.New(\"invalid internal load balancer specification\")\n\t}\n\t//lbName := fmt.Sprintf(\"%s-api-internallb\", s.Scope.Cluster.Name)\n\tlb, err := s.Client.Get(ctx, s.Scope.ClusterConfig.ResourceGroup, internalLBSpec.Name, \"\")\n\tif err != nil && azure.ResourceNotFound(err) {\n\t\treturn nil, errors.Wrapf(err, \"load balancer %s not found\", internalLBSpec.Name)\n\t} else if err != nil {\n\t\treturn lb, err\n\t}\n\treturn lb, nil\n}", "func TestPoolDelete(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tmkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tvar allocPool string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.10\") {\n\t\t\tallocPool = \"pool-a\"\n\t\t} else {\n\t\t\tallocPool = \"pool-b\"\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\t<-initDone\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.10\") {\n\t\t\tif allocPool == \"pool-a\" {\n\t\t\t\tt.Error(\"New IP was allocated from deleted pool\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif allocPool == \"pool-b\" {\n\t\t\t\tt.Error(\"New IP was allocated from deleted pool\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\terr := fixture.poolClient.Delete(context.Background(), allocPool, meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (m *MockLoadBalancerServiceIface) CreateLoadBalancer(p *CreateLoadBalancerParams) (*CreateLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateLoadBalancer\", p)\n\tret0, _ := ret[0].(*CreateLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestReallocOnInit(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"192.168.1.12\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP == \"192.168.1.12\" {\n\t\t\tt.Error(\"Expected ingress IP to not be the initial, bad IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n}", "func (m *MockClusterScoper) APIServerLBPoolName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLBPoolName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *Client) GetLoadBalancers(ctx context.Context, rgname string, logger log.Logger) ([]network.LoadBalancer, error) {\n\tret := _m.Called(ctx, rgname, logger)\n\n\tvar r0 []network.LoadBalancer\n\tif rf, ok := ret.Get(0).(func(context.Context, string, log.Logger) []network.LoadBalancer); ok {\n\t\tr0 = rf(ctx, rgname, logger)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]network.LoadBalancer)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string, log.Logger) error); ok {\n\t\tr1 = rf(ctx, rgname, logger)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockNetworkDescriber) APIServerLB() *v1beta1.LoadBalancerSpec {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLB\")\n\tret0, _ := ret[0].(*v1beta1.LoadBalancerSpec)\n\treturn ret0\n}", "func TestPoolSelectorBasic(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tselector := slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"red\",\n\t\t},\n\t}\n\tpoolA.Spec.ServiceSelector = &selector\n\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"red-service\" {\n\t\t\tt.Error(\"Expected update from 'red-service'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tmatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"red-service\",\n\t\t\tUID: serviceAUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"red\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Create(context.Background(), matchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"blue-service\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to not receive any ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tnonMatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"blue-service\",\n\t\t\tUID: serviceBUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"blue\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), nonMatchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (m *MockLoadBalancerServiceIface) ConfigureNetscalerLoadBalancer(p *ConfigureNetscalerLoadBalancerParams) (*NetscalerLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ConfigureNetscalerLoadBalancer\", p)\n\tret0, _ := ret[0].(*NetscalerLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalancerServiceIface) AddNetscalerLoadBalancer(p *AddNetscalerLoadBalancerParams) (*AddNetscalerLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddNetscalerLoadBalancer\", p)\n\tret0, _ := ret[0].(*AddNetscalerLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (el *ELBV2Manager) describeLoadbalancers(marker *string, loadbalancers []*elbv2.LoadBalancer) ([]*elbv2.LoadBalancer, error) {\n\n\tinput := &elbv2.DescribeLoadBalancersInput{\n\t\tMarker: marker,\n\t}\n\n\tresp, err := el.client.DescribeLoadBalancers(input)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err).Error(\"could not describe elb instances\")\n\t\treturn nil, err\n\t}\n\n\tif loadbalancers == nil {\n\t\tloadbalancers = []*elbv2.LoadBalancer{}\n\t}\n\n\tloadbalancers = append(loadbalancers, resp.LoadBalancers...)\n\n\tif resp.NextMarker != nil {\n\t\treturn el.describeLoadbalancers(resp.NextMarker, loadbalancers)\n\t}\n\n\treturn loadbalancers, nil\n}", "func TestChangePoolSelector(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\"color\": \"red\"},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"color\": \"red\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive exactly zero ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolA.Spec.ServiceSelector.MatchLabels = map[string]string{\"color\": \"green\"}\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (m *MockLoadBalancerServiceIface) UpdateLoadBalancer(p *UpdateLoadBalancerParams) (*UpdateLoadBalancerResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateLoadBalancer\", p)\n\tret0, _ := ret[0].(*UpdateLoadBalancerResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *NSXClient) CreateLoadBalancer(opts LoadBalancerOptions) (string, bool, error) {\n\tctx := c.api.Context\n\tapi := c.api.ServicesApi\n\trouting := c.api.LogicalRoutingAndServicesApi\n\n\texistingServer, err := c.GetLoadBalancer(opts.Name)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\tif existingServer != nil {\n\t\treturn existingServer.IpAddress, true, nil\n\t}\n\n\tt0, resp, err := routing.ReadLogicalRouter(ctx, opts.Tier0)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"failed to read T0 router %s: %s\", opts.Tier0, errorString(resp, err))\n\t}\n\n\tt0Port, resp, err := routing.CreateLogicalRouterLinkPortOnTier0(ctx, manager.LogicalRouterLinkPortOnTier0{\n\t\tLogicalRouterId: t0.Id,\n\t\tDisplayName: \"lb-\" + opts.Name + \"-T1\",\n\t})\n\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create T0 Local router port %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tt1, resp, err := routing.CreateLogicalRouter(ctx, manager.LogicalRouter{\n\t\tRouterType: \"TIER1\",\n\t\tDisplayName: \"lb-\" + opts.Name,\n\t\tEdgeClusterId: t0.EdgeClusterId,\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create T1 router %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\t_, resp, err = routing.UpdateAdvertisementConfig(ctx, t1.Id, manager.AdvertisementConfig{\n\t\tAdvertiseLbVip: true,\n\t\tAdvertiseLbSnatIp: true,\n\t\tEnabled: true,\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to update advertisement config %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tc.Infof(\"Created T1 router %s/%s\", t1.DisplayName, t1.Id)\n\n\t_, resp, err = routing.CreateLogicalRouterLinkPortOnTier1(ctx, manager.LogicalRouterLinkPortOnTier1{\n\t\tLogicalRouterId: t1.Id,\n\t\tDisplayName: t0.DisplayName + \"-uplink\",\n\t\tLinkedLogicalRouterPortId: &common.ResourceReference{\n\t\t\tTargetType: \"LogicalPort\",\n\t\t\tTargetId: t0Port.Id,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"failed to link T1 (%s) to T0 (%s): %s\", t1.Id, t0Port.Id, errorString(resp, err))\n\t}\n\n\tgroup, err := c.CreateOrUpdateNSGroup(opts.Name, \"LogicalPort\", opts.MemberTags)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\tvar monitorID string\n\tif opts.Protocol == TCPProtocol {\n\t\tmonitorID, err = c.GetOrCreateTCPHealthCheck(opts.Ports[0])\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"unable to create tcp loadbalancer monitor: %v\", err)\n\t\t}\n\t} else {\n\t\tmonitorID, err = c.GetOrCreateHTTPHealthCheck(opts.Ports[0])\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"unable to create http loadbalancer monitor: %v\", err)\n\t\t}\n\t}\n\tpool, resp, err := api.CreateLoadBalancerPool(ctx, loadbalancer.LbPool{\n\t\tId: opts.Name,\n\t\tActiveMonitorIds: []string{monitorID},\n\t\tSnatTranslation: &loadbalancer.LbSnatTranslation{\n\t\t\tType_: \"LbSnatAutoMap\",\n\t\t},\n\t\tMemberGroup: &loadbalancer.PoolMemberGroup{\n\t\t\tGroupingObject: &common.ResourceReference{\n\t\t\t\tTargetType: \"NSGroup\",\n\t\t\t\tTargetId: group.Id,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create load balancer pool %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tip, err := c.AllocateIP(opts.IPPool)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to allocate VIP %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tserver, resp, err := api.CreateLoadBalancerVirtualServer(ctx, loadbalancer.LbVirtualServer{\n\t\tId: opts.Name,\n\t\tEnabled: true,\n\t\tIpAddress: ip,\n\t\tIpProtocol: opts.Protocol,\n\t\tPorts: opts.Ports,\n\t\tPoolId: pool.Id,\n\t})\n\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create virtual server %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tlb := loadbalancer.LbService{\n\t\tDisplayName: opts.Name,\n\t\tAttachment: &common.ResourceReference{\n\t\t\tTargetType: \"LogicalRouter\",\n\t\t\tTargetId: t1.Id,\n\t\t},\n\t\tEnabled: true,\n\t\tErrorLogLevel: \"INFO\",\n\t\tSize: \"SMALL\",\n\t\tVirtualServerIds: []string{server.Id},\n\t}\n\n\t_, resp, err = api.CreateLoadBalancerService(c.api.Context, lb)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create load balancer %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tc.Infof(\"Created LoadBalancer service: %s/%s\", server.Id, ip)\n\treturn ip, false, nil\n}", "func TestDeleteAllBackendServers(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\t// bs is nil\n\terr = cloud.deleteAllBackendServers(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"deleteAllBackendServers err, err : %v\", err)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\terr = cloud.deleteAllBackendServers(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"deleteAllBackendServers err, err : %v\", err)\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}", "func (bc *Baiducloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer(%v, %v, %v, %v, %v)\",\n\t\tclusterName, service.Namespace, service.Name, bc.Region, service.Spec.LoadBalancerIP, service.Spec.Ports, service.Annotations)\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = bc.validateService(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure BLB\n\tlb, err := bc.ensureBLB(ctx, clusterName, service, nodes, result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif result.LoadBalancerInternalVpc == \"true\" {\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer: use LoadBalancerInternalVpc, EXTERNAL-IP is %s\", service.Namespace, service.Name, lb.Address)\n\t\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: lb.Address}}}, nil\n\t}\n\n\t// ensure EIP\n\tpubIP, err := bc.ensureEIP(ctx, clusterName, service, nodes, result, lb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer: EXTERNAL-IP is %s\", service.Namespace, service.Name, pubIP)\n\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: pubIP}}}, nil\n}", "func (l *SharedLoadBalancer) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tklog.Infof(\"EnsureLoadBalancer: called with service %s/%s, node: %d\",\n\t\tservice.Namespace, service.Name, len(nodes))\n\n\tif err := ensureLoadBalancerValidation(service, nodes); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get exits or create a new ELB instance\n\tloadbalancer, err := l.getLoadBalancerInstance(ctx, clusterName, service)\n\tspecifiedID := getStringFromSvsAnnotation(service, ElbID, \"\")\n\tif common.IsNotFound(err) && specifiedID != \"\" {\n\t\treturn nil, err\n\t}\n\tif err != nil && common.IsNotFound(err) {\n\t\tsubnetID, e := l.getSubnetID(service, nodes[0])\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tloadbalancer, err = l.createLoadbalancer(clusterName, subnetID, service)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// query ELB listeners list\n\tlisteners, err := l.sharedELBClient.ListListeners(&elbmodel.ListListenersRequest{LoadbalancerId: &loadbalancer.Id})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, port := range service.Spec.Ports {\n\t\tlistener := l.filterListenerByPort(listeners, service, port)\n\t\t// add or update listener\n\t\tif listener == nil {\n\t\t\tlistener, err = l.createListener(loadbalancer.Id, service, port)\n\t\t} else {\n\t\t\terr = l.updateListener(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlisteners = popListener(listeners, listener.Id)\n\n\t\t// query pool or create pool\n\t\tpool, err := l.getPool(loadbalancer.Id, listener.Id)\n\t\tif err != nil && common.IsNotFound(err) {\n\t\t\tpool, err = l.createPool(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add new members and remove the obsolete members.\n\t\tif err = l.addOrRemoveMembers(loadbalancer, service, pool, port, nodes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add or remove health monitor\n\t\tif err = l.ensureHealthCheck(loadbalancer.Id, pool, port, service, nodes[0]); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif specifiedID == \"\" {\n\t\t// All remaining listeners are obsolete, delete them\n\t\terr = l.deleteListeners(loadbalancer.Id, listeners)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tingressIP := loadbalancer.VipAddress\n\tpublicIPAddr, err := l.createOrAssociateEIP(loadbalancer, service)\n\tif err == nil {\n\t\tif publicIPAddr != \"\" {\n\t\t\tingressIP = publicIPAddr\n\t\t}\n\n\t\treturn &corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{{IP: ingressIP}},\n\t\t}, nil\n\t}\n\n\t// rollback\n\tklog.Errorf(\"rollback:failed to create the EIP, delete ELB instance created, error: %s\", err)\n\terrs := []error{err}\n\terr = l.EnsureLoadBalancerDeleted(ctx, clusterName, service)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\tklog.Errorf(\"rollback: error deleting ELB instance: %s\", err)\n\t}\n\treturn nil, errors.NewAggregate(errs)\n}", "func (m *MockLoadBalancerServiceIface) NewCreateLoadBalancerParams(algorithm string, instanceport int, name, networkid, scheme, sourceipaddressnetworkid string, sourceport int) *CreateLoadBalancerParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewCreateLoadBalancerParams\", algorithm, instanceport, name, networkid, scheme, sourceipaddressnetworkid, sourceport)\n\tret0, _ := ret[0].(*CreateLoadBalancerParams)\n\treturn ret0\n}", "func init() {\n\tbalancer.Register(orcaLBBuilder{})\n}", "func (m *MockClient) DescribeSubnets(arg0 *ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeSubnets\", arg0)\n\tret0, _ := ret[0].(*ec2.DescribeSubnetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) DescribeSubnets(arg0 *ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeSubnets\", arg0)\n\tret0, _ := ret[0].(*ec2.DescribeSubnetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s) TestBalancer_TwoAddresses_UpdateLoads(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\tsrv1 := startServer(t, reportOOB)\n\tsrv2 := startServer(t, reportOOB)\n\n\t// srv1 starts loaded and srv2 starts without load; ensure RPCs are routed\n\t// disproportionately to srv2 (10:1).\n\tsrv1.oobMetrics.SetQPS(10.0)\n\tsrv1.oobMetrics.SetApplicationUtilization(1.0)\n\n\tsrv2.oobMetrics.SetQPS(10.0)\n\tsrv2.oobMetrics.SetApplicationUtilization(.1)\n\n\tsc := svcConfig(t, oobConfig)\n\tif err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil {\n\t\tt.Fatalf(\"Error starting client: %v\", err)\n\t}\n\taddrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}}\n\tsrv1.R.UpdateState(resolver.State{Addresses: addrs})\n\n\t// Call each backend once to ensure the weights have been received.\n\tensureReached(ctx, t, srv1.Client, 2)\n\n\t// Wait for the weight update period to allow the new weights to be processed.\n\ttime.Sleep(weightUpdatePeriod)\n\tcheckWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10})\n\n\t// Update the loads so srv2 is loaded and srv1 is not; ensure RPCs are\n\t// routed disproportionately to srv1.\n\tsrv1.oobMetrics.SetQPS(10.0)\n\tsrv1.oobMetrics.SetApplicationUtilization(.1)\n\n\tsrv2.oobMetrics.SetQPS(10.0)\n\tsrv2.oobMetrics.SetApplicationUtilization(1.0)\n\n\t// Wait for the weight update period to allow the new weights to be processed.\n\ttime.Sleep(weightUpdatePeriod + oobReportingInterval)\n\tcheckWeights(ctx, t, srvWeight{srv1, 10}, srvWeight{srv2, 1})\n}", "func (m *MockLoadBalancerServiceIface) NewConfigureNetscalerLoadBalancerParams(lbdeviceid string) *ConfigureNetscalerLoadBalancerParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewConfigureNetscalerLoadBalancerParams\", lbdeviceid)\n\tret0, _ := ret[0].(*ConfigureNetscalerLoadBalancerParams)\n\treturn ret0\n}", "func TestServiceDelete(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tvar svcIP string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tsvcIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been allocated\")\n\t}\n\n\terr := fixture.svcClient.Services(\"default\").Delete(context.Background(), \"service-a\", meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been released\")\n\t}\n}", "func (m *MockLabeledWorkloadSet) Labels() map[string]string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Labels\")\n\tret0, _ := ret[0].(map[string]string)\n\treturn ret0\n}", "func (nat *NATCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, hosts []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tstatus := &v1.LoadBalancerStatus{}\n\n\t// step 0: ensure the nat gateway is exist\n\tnatProvider, err := nat.getNATClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnatGatewayId := service.ObjectMeta.Annotations[AnnotationsNATID]\n\tif natGatewayId == \"\" {\n\t\treturn nil, fmt.Errorf(\"The id of natGateway should be set by %v in annotations \", AnnotationsNATID)\n\t}\n\n\tnatGateway, err := natProvider.GetNATGateway(natGatewayId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif natGateway.RouterId != nat.cloudConfig.VpcOpts.ID {\n\t\treturn nil, fmt.Errorf(\"The natGateway is not in the same VPC with cluster. \")\n\t}\n\n\t//step 1:get floatingip id by floatingip address and check the floatingIp can be used\n\tdnatRuleList, err := listDnatRule(natProvider, natGatewayId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfloatingIp, err := nat.getFloatingIpInfoByIp(natProvider, service.Spec.LoadBalancerIP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallDnatRuleInFloatIP, err := listAllDnatRuleByFloatIP(natProvider, service.Spec.LoadBalancerIP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !nat.checkFloatingIp(allDnatRuleInFloatIP, floatingIp, natGatewayId) {\n\t\treturn nil, fmt.Errorf(\"The floating ip %v is binding to port,and its not DNAT rule in natGateway %s\", floatingIp.FloatingIpAddress, natGateway.Name)\n\t}\n\n\t//step 2: get podList (with labels/selectors of this service),then get the backend to create DNAT rule\n\tpodList, err := nat.getPods(service.Name, service.Namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar runningPod v1.Pod\n\tfor _, pod := range podList.Items {\n\t\tif podutil.IsPodReady(&pod) {\n\t\t\trunningPod = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(runningPod.Status.HostIP) == 0 {\n\t\treturn nil, fmt.Errorf(\"There is no availabel endpoint for the service %s\", service.Name)\n\t}\n\n\tsubnetId := nat.getSubnetIdForPod(runningPod, hosts)\n\tnetPort, err := nat.getPortByFixedIp(natProvider, subnetId, runningPod.Status.HostIP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar errs []error\n\t// step1: create dnat rule\n\tfor _, port := range service.Spec.Ports {\n\t\t//check if the DNAT rule has been created by the service,if exists continue\n\t\tif nat.getDNATRule(dnatRuleList, &port) != nil {\n\t\t\tklog.V(4).Infoln(\"DNAT rule already exists, no need to create\")\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.V(4).Infof(\"port:%v dnat rule not exist,start create dnat rule\", port)\n\n\t\terr := nat.ensureCreateDNATRule(natProvider, &port, netPort, floatingIp, natGatewayId)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"EnsureCreateDNATRule Failed: %v\", err))\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// get service with loadbalancer type and loadbalancer ip\n\tlbServers, _ := nat.kubeClient.Services(\"\").List(context.TODO(), metav1.ListOptions{})\n\tvar lbPorts []v1.ServicePort\n\tfor _, svc := range lbServers.Items {\n\t\tlbType := svc.Annotations[ElbClass]\n\t\tif lbType != \"dnat\" || svc.Spec.LoadBalancerIP != service.Spec.LoadBalancerIP {\n\t\t\tcontinue\n\t\t}\n\t\tklog.V(4).Infof(\"exist dnat svc:%v\", svc)\n\t\tlbPorts = append(lbPorts, svc.Spec.Ports...)\n\t}\n\n\tfor _, dnatRule := range dnatRuleList.DNATRules {\n\t\tif dnatRule.FloatingIpAddress != service.Spec.LoadBalancerIP {\n\t\t\tcontinue\n\t\t}\n\n\t\tif nat.getServicePort(&dnatRule, lbPorts) != nil {\n\t\t\tklog.V(4).Infoln(\"port exist,no need to delete\")\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.V(4).Infof(\"rule:%v port not exist,start delete dnat rule\", dnatRule)\n\n\t\terr := nat.ensureDeleteDNATRule(natProvider, &dnatRule, natGatewayId)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"EnsureDeleteDNATRule Failed: %v\", err))\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tif len(errs) != 0 {\n\t\treturn nil, utilerrors.NewAggregate(errs)\n\t}\n\tstatus.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: service.Spec.LoadBalancerIP})\n\treturn status, nil\n}", "func ExampleELB_DescribeLoadBalancers_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DescribeLoadBalancersInput{\n\t\tLoadBalancerNames: []*string{\n\t\t\taws.String(\"my-load-balancer\"),\n\t\t},\n\t}\n\n\tresult, err := svc.DescribeLoadBalancers(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeDependencyThrottleException:\n\t\t\t\tfmt.Println(elb.ErrCodeDependencyThrottleException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func createLBServiceWithIngressIP(cs kubernetes.Interface, namespace, name string, protocol v1.IPFamily, annotations, selector map[string]string, port int32, tweak ...func(svc *v1.Service)) *v1.Service {\n\tsvc := &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: selector,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t\tPort: port,\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilies: []v1.IPFamily{protocol},\n\t\t},\n\t}\n\n\tfor _, f := range tweak {\n\t\tf(svc)\n\t}\n\n\tsvc, err := cs.CoreV1().Services(namespace).Create(context.TODO(), svc, metav1.CreateOptions{})\n\tframework.ExpectNoError(err, \"failed to create loadbalancer service\")\n\n\tgomega.Eventually(func() error {\n\t\tsvc, err = cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn fmt.Errorf(\"expected 1 lb ingress ip, got %v as ips\", svc.Status.LoadBalancer.Ingress)\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress[0].IP) == 0 {\n\t\t\treturn fmt.Errorf(\"expected lb ingress to be set\")\n\t\t}\n\n\t\treturn nil\n\t}, 5*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred(), \"failed to set loadbalancer's ingress ip\")\n\n\treturn svc\n}", "func createLBServiceWithIngressIP(cs kubernetes.Interface, namespace, name string, protocol v1.IPFamily, selector map[string]string, port int32, tweak ...func(svc *v1.Service)) *v1.Service {\n\tsvc := &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: selector,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t\tPort: port,\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilies: []v1.IPFamily{protocol},\n\t\t},\n\t}\n\n\tfor _, f := range tweak {\n\t\tf(svc)\n\t}\n\n\tsvc, err := cs.CoreV1().Services(namespace).Create(context.TODO(), svc, metav1.CreateOptions{})\n\tframework.ExpectNoError(err, \"failed to create loadbalancer service\")\n\n\tgomega.Eventually(func() error {\n\t\tsvc, err = cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn fmt.Errorf(\"expected 1 lb ingress ip, got %v as ips\", svc.Status.LoadBalancer.Ingress)\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress[0].IP) == 0 {\n\t\t\treturn fmt.Errorf(\"expected lb ingress to be set\")\n\t\t}\n\n\t\treturn nil\n\t}, 5*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred(), \"failed to set loadbalancer's ingress ip\")\n\n\treturn svc\n}", "func TestRemoveRequestedIP(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124,10.0.10.125\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 2 {\n\t\t\tt.Error(\"Expected service to receive exactly two ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Annotations = map[string]string{\n\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.123' to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.124' to be allocated\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.125\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.125' to be released\")\n\t}\n}", "func (s) TestOutlierDetection(t *testing.T) {\n\tedsLBCh := testutils.NewChannel()\n\txdsC, cleanup := setup(edsLBCh)\n\tdefer cleanup()\n\tbuilder := balancer.Get(Name)\n\tedsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{})\n\tif edsB == nil {\n\t\tt.Fatalf(\"builder.Build(%s) failed and returned nil\", Name)\n\t}\n\tdefer edsB.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\t// Update Cluster Resolver with Client Conn State with Outlier Detection\n\t// configuration present. This is what will be passed down to this balancer,\n\t// as CDS Balancer gets the Cluster Update and converts the Outlier\n\t// Detection data to an Outlier Detection configuration and sends it to this\n\t// level.\n\tif err := edsB.UpdateClientConnState(balancer.ClientConnState{\n\t\tResolverState: xdsclient.SetClient(resolver.State{}, xdsC),\n\t\tBalancerConfig: newLBConfigWithOneEDSAndOutlierDetection(testEDSServcie, noopODCfg),\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := xdsC.WaitForWatchEDS(ctx); err != nil {\n\t\tt.Fatalf(\"xdsClient.WatchEndpoints failed with error: %v\", err)\n\t}\n\n\t// Invoke EDS Callback - causes child balancer to be built and then\n\t// UpdateClientConnState called on it with Outlier Detection as a direct\n\t// child.\n\txdsC.InvokeWatchEDSCallback(\"\", defaultEndpointsUpdate, nil)\n\tedsLB, err := waitForNewChildLB(ctx, edsLBCh)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlocalityID := xdsinternal.LocalityID{Zone: \"zone\"}\n\t// The priority configuration generated should have Outlier Detection as a\n\t// direct child due to Outlier Detection being turned on.\n\tpCfgWant := &priority.LBConfig{\n\t\tChildren: map[string]*priority.Child{\n\t\t\t\"priority-0-0\": {\n\t\t\t\tConfig: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\tName: outlierdetection.Name,\n\t\t\t\t\tConfig: &outlierdetection.LBConfig{\n\t\t\t\t\t\tInterval: 1<<63 - 1,\n\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\t\t\tName: clusterimpl.Name,\n\t\t\t\t\t\t\tConfig: &clusterimpl.LBConfig{\n\t\t\t\t\t\t\t\tCluster: testClusterName,\n\t\t\t\t\t\t\t\tEDSServiceName: \"test-eds-service-name\",\n\t\t\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\t\t\t\t\tName: weightedtarget.Name,\n\t\t\t\t\t\t\t\t\tConfig: &weightedtarget.LBConfig{\n\t\t\t\t\t\t\t\t\t\tTargets: map[string]weightedtarget.Target{\n\t\t\t\t\t\t\t\t\t\t\tassertString(localityID.ToString): {\n\t\t\t\t\t\t\t\t\t\t\t\tWeight: 100,\n\t\t\t\t\t\t\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tIgnoreReresolutionRequests: true,\n\t\t\t},\n\t\t},\n\t\tPriorities: []string{\"priority-0-0\"},\n\t}\n\n\tif err := edsLB.waitForClientConnStateChangeVerifyBalancerConfig(ctx, balancer.ClientConnState{\n\t\tBalancerConfig: pCfgWant,\n\t}); err != nil {\n\t\tt.Fatalf(\"EDS impl got unexpected update: %v\", err)\n\t}\n}", "func HandleLoadbalancerGetSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"GET\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\n\t\tfmt.Fprintf(w, SingleLoadbalancerBody)\n\t})\n}", "func (m *MockNetworkDescriber) Subnets() v1beta1.Subnets {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Subnets\")\n\tret0, _ := ret[0].(v1beta1.Subnets)\n\treturn ret0\n}", "func NewLoadBalancerCollector(logger log.Logger, client *hcloud.Client, failures *prometheus.CounterVec, duration *prometheus.HistogramVec, cfg config.Target) *LoadBalancerCollector {\n\tif failures != nil {\n\t\tfailures.WithLabelValues(\"load_balancer\").Add(0)\n\t}\n\n\tlabels := []string{\"id\", \"name\", \"datacenter\"}\n\treturn &LoadBalancerCollector{\n\t\tclient: client,\n\t\tlogger: log.With(logger, \"collector\", \"load-balancer\"),\n\t\tfailures: failures,\n\t\tduration: duration,\n\t\tconfig: cfg,\n\n\t\tCreated: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_created_timestamp\",\n\t\t\t\"Timestamp when the load balancer have been created\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tServices: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_services\",\n\t\t\t\"The number of configured services\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxServices: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_services\",\n\t\t\t\"The maximum number of services that can be configured\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargets: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets\",\n\t\t\t\"The number of targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxTargets: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_targets\",\n\t\t\t\"The maximum number of targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsHealthy: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_healthy\",\n\t\t\t\"The number of healthy targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsUnhealthy: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_unhealthy\",\n\t\t\t\"The number of unhealthy targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsUnknown: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_unknown\",\n\t\t\t\"The number of unknown targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tAssignedCertificates: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_assigned_certificates\",\n\t\t\t\"The number of assigned certificates\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxAssignedCertificates: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_assigned_certificates\",\n\t\t\t\"The maximum number of certificates that can be assigned\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIngoingTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_ingoing_traffic\",\n\t\t\t\"The total amount of ingoing traffic in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutgoingTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_outgoing_traffic\",\n\t\t\t\"The total amount of outgoing traffic in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIncludedTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_included_traffic\",\n\t\t\t\"The amount of traffic that is included for the load balancer type in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tConnections: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections\",\n\t\t\t\"The number of open connections\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxConnections: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_open_connections\",\n\t\t\t\"The maximum number of open connections\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tConnectionsPerSecond: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_connections_per_second\",\n\t\t\t\"The number of new connections per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tRequestsPerSecond: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_requests_per_second\",\n\t\t\t\"The number of requests per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIncomingBandwidth: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections_bandwidth_in\",\n\t\t\t\"The incoming bandwidth in bytes per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutgoingBandwidth: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections_bandwidth_out\",\n\t\t\t\"The outgoing bandwidth in bytes per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t}\n}", "func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {\n\tctx, cancel := getContextWithCancel()\n\tdefer cancel()\n\n\tlb = cleanupSubnetInFrontendIPConfigurations(&lb)\n\n\trgName := az.getLoadBalancerResourceGroup()\n\trerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, pointer.StringDeref(lb.Name, \"\"), lb, pointer.StringDeref(lb.Etag, \"\"))\n\tklog.V(10).Infof(\"LoadBalancerClient.CreateOrUpdate(%s): end\", *lb.Name)\n\tif rerr == nil {\n\t\t// Invalidate the cache right after updating\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t\treturn nil\n\t}\n\n\tlbJSON, _ := json.Marshal(lb)\n\tklog.Warningf(\"LoadBalancerClient.CreateOrUpdate(%s) failed: %v, LoadBalancer request: %s\", pointer.StringDeref(lb.Name, \"\"), rerr.Error(), string(lbJSON))\n\n\t// Invalidate the cache because ETAG precondition mismatch.\n\tif rerr.HTTPStatusCode == http.StatusPreconditionFailed {\n\t\tklog.V(3).Infof(\"LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed\", pointer.StringDeref(lb.Name, \"\"))\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\tretryErrorMessage := rerr.Error().Error()\n\t// Invalidate the cache because another new operation has canceled the current request.\n\tif strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) {\n\t\tklog.V(3).Infof(\"LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation\", pointer.StringDeref(lb.Name, \"\"))\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\t// The LB update may fail because the referenced PIP is not in the Succeeded provisioning state\n\tif strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(consts.ReferencedResourceNotProvisionedMessageCode)) {\n\t\tmatches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage)\n\t\tif len(matches) != 3 {\n\t\t\tklog.Errorf(\"Failed to parse the retry error message %s\", retryErrorMessage)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\tpipRG, pipName := matches[1], matches[2]\n\t\tklog.V(3).Infof(\"The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it\", pipName, pointer.StringDeref(lb.Name, \"\"))\n\t\tpip, _, err := az.getPublicIPAddress(pipRG, pipName, azcache.CacheReadTypeDefault)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to get the public IP %s in resource group %s: %v\", pipName, pipRG, err)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\t// Perform a dummy update to fix the provisioning state\n\t\terr = az.CreateOrUpdatePIP(service, pipRG, pip)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to update the public IP %s in resource group %s: %v\", pipName, pipRG, err)\n\t\t\treturn rerr.Error()\n\t\t}\n\t\t// Invalidate the LB cache, return the error, and the controller manager\n\t\t// would retry the LB update in the next reconcile loop\n\t\t_ = az.lbCache.Delete(*lb.Name)\n\t}\n\n\treturn rerr.Error()\n}", "func (c *MockAzureCloud) LoadBalancer() azure.LoadBalancersClient {\n\treturn c.LoadBalancersClient\n}", "func desiredLoadBalancerService(ci *operatorv1.IngressController, deploymentRef metav1.OwnerReference, platform *configv1.PlatformStatus) (bool, *corev1.Service, error) {\n\tif ci.Status.EndpointPublishingStrategy.Type != operatorv1.LoadBalancerServiceStrategyType {\n\t\treturn false, nil, nil\n\t}\n\tservice := manifests.LoadBalancerService()\n\n\tname := controller.LoadBalancerServiceName(ci)\n\n\tservice.Namespace = name.Namespace\n\tservice.Name = name.Name\n\n\tif service.Labels == nil {\n\t\tservice.Labels = map[string]string{}\n\t}\n\tservice.Labels[\"router\"] = name.Name\n\tservice.Labels[manifests.OwningIngressControllerLabel] = ci.Name\n\n\tservice.Spec.Selector = controller.IngressControllerDeploymentPodSelector(ci).MatchLabels\n\n\tlb := ci.Status.EndpointPublishingStrategy.LoadBalancer\n\tisInternal := lb != nil && lb.Scope == operatorv1.InternalLoadBalancer\n\n\tif service.Annotations == nil {\n\t\tservice.Annotations = map[string]string{}\n\t}\n\n\tproxyNeeded, err := IsProxyProtocolNeeded(ci, platform)\n\tif err != nil {\n\t\treturn false, nil, fmt.Errorf(\"failed to determine if proxy protocol is proxyNeeded for ingresscontroller %q: %v\", ci.Name, err)\n\t}\n\n\tif platform != nil {\n\t\tif isInternal {\n\t\t\tannotation := InternalLBAnnotations[platform.Type]\n\t\t\tfor name, value := range annotation {\n\t\t\t\tservice.Annotations[name] = value\n\t\t\t}\n\n\t\t\t// Set the GCP Global Access annotation for internal load balancers on GCP only\n\t\t\tif platform.Type == configv1.GCPPlatformType {\n\t\t\t\tif lb != nil && lb.ProviderParameters != nil &&\n\t\t\t\t\tlb.ProviderParameters.Type == operatorv1.GCPLoadBalancerProvider &&\n\t\t\t\t\tlb.ProviderParameters.GCP != nil {\n\t\t\t\t\tglobalAccessEnabled := lb.ProviderParameters.GCP.ClientAccess == operatorv1.GCPGlobalAccess\n\t\t\t\t\tservice.Annotations[GCPGlobalAccessAnnotation] = strconv.FormatBool(globalAccessEnabled)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tannotation := externalLBAnnotations[platform.Type]\n\t\t\tfor name, value := range annotation {\n\t\t\t\tservice.Annotations[name] = value\n\t\t\t}\n\t\t}\n\t\tswitch platform.Type {\n\t\tcase configv1.AWSPlatformType:\n\t\t\tservice.Annotations[awsLBHealthCheckIntervalAnnotation] = awsLBHealthCheckIntervalDefault\n\t\t\tif proxyNeeded {\n\t\t\t\tservice.Annotations[awsLBProxyProtocolAnnotation] = \"*\"\n\t\t\t}\n\t\t\tif lb != nil && lb.ProviderParameters != nil {\n\t\t\t\tif aws := lb.ProviderParameters.AWS; aws != nil && lb.ProviderParameters.Type == operatorv1.AWSLoadBalancerProvider {\n\t\t\t\t\tswitch aws.Type {\n\t\t\t\t\tcase operatorv1.AWSNetworkLoadBalancer:\n\t\t\t\t\t\tservice.Annotations[AWSLBTypeAnnotation] = AWSNLBAnnotation\n\t\t\t\t\t\t// NLBs require a different health check interval than CLBs.\n\t\t\t\t\t\t// See <https://bugzilla.redhat.com/show_bug.cgi?id=1908758>.\n\t\t\t\t\t\tservice.Annotations[awsLBHealthCheckIntervalAnnotation] = awsLBHealthCheckIntervalNLB\n\t\t\t\t\tcase operatorv1.AWSClassicLoadBalancer:\n\t\t\t\t\t\tif aws.ClassicLoadBalancerParameters != nil {\n\t\t\t\t\t\t\tif v := aws.ClassicLoadBalancerParameters.ConnectionIdleTimeout; v.Duration > 0 {\n\t\t\t\t\t\t\t\tservice.Annotations[awsELBConnectionIdleTimeoutAnnotation] = strconv.FormatUint(uint64(v.Round(time.Second).Seconds()), 10)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif platform.AWS != nil && len(platform.AWS.ResourceTags) > 0 {\n\t\t\t\tvar additionalTags []string\n\t\t\t\tfor _, userTag := range platform.AWS.ResourceTags {\n\t\t\t\t\tif len(userTag.Key) > 0 {\n\t\t\t\t\t\tadditionalTags = append(additionalTags, userTag.Key+\"=\"+userTag.Value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(additionalTags) > 0 {\n\t\t\t\t\tservice.Annotations[awsLBAdditionalResourceTags] = strings.Join(additionalTags, \",\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Set the load balancer for AWS to be as aggressive as Azure (2 fail @ 5s interval, 2 healthy)\n\t\t\tservice.Annotations[awsLBHealthCheckTimeoutAnnotation] = awsLBHealthCheckTimeoutDefault\n\t\t\tservice.Annotations[awsLBHealthCheckUnhealthyThresholdAnnotation] = awsLBHealthCheckUnhealthyThresholdDefault\n\t\t\tservice.Annotations[awsLBHealthCheckHealthyThresholdAnnotation] = awsLBHealthCheckHealthyThresholdDefault\n\t\tcase configv1.IBMCloudPlatformType, configv1.PowerVSPlatformType:\n\t\t\t// Set ExternalTrafficPolicy to type Cluster - IBM's LoadBalancer impl is created within the cluster.\n\t\t\t// LB places VIP on one of the worker nodes, using keepalived to maintain the VIP and ensuring redundancy\n\t\t\t// LB relies on iptable rules kube-proxy puts in to send traffic from the VIP node to the cluster\n\t\t\t// If policy is local, traffic is only sent to pods on the local node, as such Cluster enables traffic to flow to all the pods in the cluster\n\t\t\tservice.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeCluster\n\t\t\tif proxyNeeded {\n\t\t\t\tservice.Annotations[iksLBEnableFeaturesAnnotation] = iksLBEnableFeaturesProxyProtocol\n\t\t\t}\n\n\t\tcase configv1.AlibabaCloudPlatformType:\n\t\t\tif !isInternal {\n\t\t\t\tservice.Annotations[alibabaCloudLBAddressTypeAnnotation] = alibabaCloudLBAddressTypeInternet\n\t\t\t}\n\t\t}\n\t\t// Azure load balancers are not customizable and are set to (2 fail @ 5s interval, 2 healthy)\n\t\t// GCP load balancers are not customizable and are set to (3 fail @ 8s interval, 1 healthy)\n\n\t\tif v, err := shouldUseLocalWithFallback(ci, service); err != nil {\n\t\t\treturn true, service, err\n\t\t} else if v {\n\t\t\tservice.Annotations[localWithFallbackAnnotation] = \"\"\n\t\t}\n\t}\n\n\tif ci.Spec.EndpointPublishingStrategy != nil {\n\t\tlb := ci.Spec.EndpointPublishingStrategy.LoadBalancer\n\t\tif lb != nil && len(lb.AllowedSourceRanges) > 0 {\n\t\t\tcidrs := make([]string, len(lb.AllowedSourceRanges))\n\t\t\tfor i, cidr := range lb.AllowedSourceRanges {\n\t\t\t\tcidrs[i] = string(cidr)\n\t\t\t}\n\t\t\tservice.Spec.LoadBalancerSourceRanges = cidrs\n\t\t}\n\t}\n\n\tservice.SetOwnerReferences([]metav1.OwnerReference{deploymentRef})\n\treturn true, service, nil\n}", "func (m *MockClusterScoper) Subnets() v1beta1.Subnets {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Subnets\")\n\tret0, _ := ret[0].(v1beta1.Subnets)\n\treturn ret0\n}", "func (m *MockLabeledDestinationSet) Labels() map[string]string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Labels\")\n\tret0, _ := ret[0].(map[string]string)\n\treturn ret0\n}", "func NewLoadBalancer(c config.LoadBalancerConfig) *LoadBalancer {\n\tvar lb LoadBalancer\n\tif c.Hosts != nil && len(c.Hosts) > 0 {\n\t\tlb.hosts = make([]string, len(c.Hosts))\n\t\tfor i, server := range c.Hosts {\n\t\t\tlb.hosts[i] = server\n\t\t\tgloballog.WithFields(logrus.Fields{\n\t\t\t\t\"host\": server,\n\t\t\t\t\"index\": i,\n\t\t\t}).Debug(\"adding lb host\")\n\t\t}\n\t} else {\n\t\tlb.hosts = make([]string, 10)\n\t}\n\tlb.mode = c.BalanceMode\n\tlb.hostLock = new(sync.RWMutex)\n\treturn &lb\n}", "func (mr *MockLoadBalanceMockRecorder) DescribeLoadBalancer(region, lbID, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeLoadBalancer\", reflect.TypeOf((*MockLoadBalance)(nil).DescribeLoadBalancer), region, lbID, name)\n}", "func generateLoadBalancers(cr *svcapitypes.Service) []*svcsdk.LoadBalancer {\n\tloadBalancers := []*svcsdk.LoadBalancer{}\n\n\tif cr.Spec.ForProvider.LoadBalancers == nil {\n\t\treturn loadBalancers\n\t}\n\n\tfor _, loadBalancer := range cr.Spec.ForProvider.LoadBalancers {\n\t\tconvertedLB := &svcsdk.LoadBalancer{}\n\t\tconvertedLB.ContainerName = loadBalancer.ContainerName\n\t\tconvertedLB.ContainerPort = loadBalancer.ContainerPort\n\t\tconvertedLB.LoadBalancerName = loadBalancer.LoadBalancerName\n\t\tconvertedLB.TargetGroupArn = loadBalancer.TargetGroupARN\n\n\t\tloadBalancers = append(loadBalancers, convertedLB)\n\t}\n\treturn loadBalancers\n}", "func TestLoadBalancerPolicyRequestHashHeader(t *testing.T) {\n\trh, c, done := setup(t)\n\tdefer done()\n\n\ts1 := fixture.NewService(\"app\").WithPorts(\n\t\tv1.ServicePort{Port: 80, TargetPort: intstr.FromInt(8080)},\n\t\tv1.ServicePort{Port: 8080, TargetPort: intstr.FromInt(8080)})\n\trh.OnAdd(s1)\n\n\tproxy1 := fixture.NewProxy(\"simple\").\n\t\tWithFQDN(\"www.example.com\").\n\t\tWithSpec(contour_api_v1.HTTPProxySpec{\n\t\t\tRoutes: []contour_api_v1.Route{{\n\t\t\t\tConditions: matchconditions(prefixMatchCondition(\"/cart\")),\n\t\t\t\tLoadBalancerPolicy: &contour_api_v1.LoadBalancerPolicy{\n\t\t\t\t\tStrategy: \"RequestHash\",\n\t\t\t\t\tRequestHashPolicies: []contour_api_v1.RequestHashPolicy{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tTerminal: true,\n\t\t\t\t\t\t\tHeaderHashOptions: &contour_api_v1.HeaderHashOptions{\n\t\t\t\t\t\t\t\tHeaderName: \"X-Some-Header\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tHeaderHashOptions: &contour_api_v1.HeaderHashOptions{\n\t\t\t\t\t\t\t\tHeaderName: \"X-Some-Other-Header\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tServices: []contour_api_v1.Service{{\n\t\t\t\t\tName: s1.Name,\n\t\t\t\t\tPort: 80,\n\t\t\t\t}},\n\t\t\t}},\n\t\t})\n\trh.OnAdd(proxy1)\n\n\tc.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tResources: resources(t,\n\t\t\tDefaultCluster(&envoy_cluster_v3.Cluster{\n\t\t\t\tName: s1.Namespace + \"/\" + s1.Name + \"/80/1a2ffc1fef\",\n\t\t\t\tClusterDiscoveryType: envoy_v3.ClusterDiscoveryType(envoy_cluster_v3.Cluster_EDS),\n\t\t\t\tAltStatName: s1.Namespace + \"_\" + s1.Name + \"_80\",\n\t\t\t\tEdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{\n\t\t\t\t\tEdsConfig: envoy_v3.ConfigSource(\"contour\"),\n\t\t\t\t\tServiceName: s1.Namespace + \"/\" + s1.Name,\n\t\t\t\t},\n\t\t\t\tLbPolicy: envoy_cluster_v3.Cluster_RING_HASH,\n\t\t\t}),\n\t\t),\n\t\tTypeUrl: clusterType,\n\t})\n\n\tc.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tResources: resources(t,\n\t\t\tenvoy_v3.RouteConfiguration(\"ingress_http\",\n\t\t\t\tenvoy_v3.VirtualHost(\"www.example.com\",\n\t\t\t\t\t&envoy_route_v3.Route{\n\t\t\t\t\t\tMatch: routePrefix(\"/cart\"),\n\t\t\t\t\t\tAction: withRequestHashPolicySpecifiers(\n\t\t\t\t\t\t\trouteCluster(\"default/app/80/1a2ffc1fef\"),\n\t\t\t\t\t\t\thashPolicySpecifier{headerName: \"X-Some-Header\", terminal: true},\n\t\t\t\t\t\t\thashPolicySpecifier{headerName: \"X-Some-Other-Header\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t\tTypeUrl: routeType,\n\t})\n}", "func TestServiceRegistryExternalTrafficHealthCheckNodePortUserAllocationBeta(t *testing.T) {\n\trandomNodePort := generateRandomNodePort()\n\tctx := genericapirequest.NewDefaultContext()\n\tstorage, _, server := NewTestREST(t, nil)\n\tdefer server.Terminate(t)\n\tsvc := &api.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"external-lb-esipp\",\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tapi.BetaAnnotationExternalTraffic: api.AnnotationValueExternalTrafficLocal,\n\t\t\t\tapi.BetaAnnotationHealthCheckNodePort: fmt.Sprintf(\"%v\", randomNodePort),\n\t\t\t},\n\t\t},\n\t\tSpec: api.ServiceSpec{\n\t\t\tSelector: map[string]string{\"bar\": \"baz\"},\n\t\t\tSessionAffinity: api.ServiceAffinityNone,\n\t\t\tType: api.ServiceTypeLoadBalancer,\n\t\t\tPorts: []api.ServicePort{{\n\t\t\t\tPort: 6502,\n\t\t\t\tProtocol: api.ProtocolTCP,\n\t\t\t\tTargetPort: intstr.FromInt(6502),\n\t\t\t}},\n\t\t},\n\t}\n\tcreated_svc, err := storage.Create(ctx, svc, false)\n\tif created_svc == nil || err != nil {\n\t\tt.Fatalf(\"Unexpected failure creating service :%v\", err)\n\t}\n\tcreated_service := created_svc.(*api.Service)\n\tif !service.NeedsHealthCheck(created_service) {\n\t\tt.Errorf(\"Expecting health check needed, returned health check not needed instead\")\n\t}\n\tport := service.GetServiceHealthCheckNodePort(created_service)\n\tif port == 0 {\n\t\tt.Errorf(\"Failed to allocate health check node port and set the HealthCheckNodePort\")\n\t}\n\tif port != randomNodePort {\n\t\tt.Errorf(\"Failed to allocate requested nodePort expected %d, got %d\", randomNodePort, port)\n\t}\n}", "func TestMinimalGCEInternalLoadBalancer(t *testing.T) {\n\tnewIntegrationTest(\"minimal-gce-ilb.example.com\", \"minimal_gce_ilb\").\n\t\twithAddons(\n\t\t\tdnsControllerAddon,\n\t\t\tgcpCCMAddon,\n\t\t\tgcpPDCSIAddon,\n\t\t).\n\t\trunTestTerraformGCE(t)\n}", "func NewMocklbDescriber(ctrl *gomock.Controller) *MocklbDescriber {\n\tmock := &MocklbDescriber{ctrl: ctrl}\n\tmock.recorder = &MocklbDescriberMockRecorder{mock}\n\treturn mock\n}", "func checkLoadBalancerServiceType(clientset kubernetes.Interface, log logger.Logger) error {\n\tconst (\n\t\ttestServiceName = \"kubernetes-test-service\"\n\t\ttestNamespace = \"default\"\n\t\twaitTimeoutSec = 300\n\t)\n\tlog.Infof(\"Creating test service %s/%s\", testServiceName, testNamespace)\n\tsvc, err := createTestLoadBalancer(testServiceName, testNamespace, clientset)\n\tif err != nil || svc == nil {\n\t\treturn errors.Wrap(err, \"error creating test service\")\n\t}\n\n\t// handle service deletion\n\tdefer func() {\n\t\tclientset.CoreV1().Services(testNamespace).Delete(context.TODO(), testServiceName, v1.DeleteOptions{})\n\t\tlog.Debugf(\"test service %s deleted\", testNamespace, testServiceName)\n\t}()\n\n\tlog.Infof(\"Checking LoadBalancer service type\")\n\tfor i := 1; i < waitTimeoutSec; i += 1 {\n\t\ttime.Sleep(time.Second)\n\t\ts, err := clientset.CoreV1().Services(testNamespace).Get(context.TODO(), testServiceName, v1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error getting test service\")\n\t\t}\n\t\tif loadBalancerProvisioned(s) {\n\t\t\treturn nil\n\t\t}\n\t\tcontinue\n\t}\n\treturn errors.New(\"Service with LoadBalancer didn't get ingress address\")\n}", "func isSDKLoadBalancerRequiresReplacement(sdkLB LoadBalancerWithTags, resLB *elbv2model.LoadBalancer) bool {\n\tif string(resLB.Spec.Type) != awssdk.StringValue(sdkLB.LoadBalancer.Type) {\n\t\treturn true\n\t}\n\tif resLB.Spec.Scheme != nil && string(*resLB.Spec.Scheme) != awssdk.StringValue(sdkLB.LoadBalancer.Scheme) {\n\t\treturn true\n\t}\n\treturn false\n}", "func TestAllocHappyPath(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\", \"FF::0/48\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Unexpected condition type assigned to service\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Unexpected condition status assigned to service\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tsvc, err := fixture.svcClient.Services(\"default\").Get(context.Background(), \"service-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Switch to requesting an IPv6 address\n\tsvc.Spec.IPFamilies = []slim_core_v1.IPFamily{\n\t\tslim_core_v1.IPv6Protocol,\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\t// The second update allocates the new IPv6\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() != nil {\n\t\t\tt.Error(\"Expected service to receive a IPv6 address\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), svc, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update after update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\t// Allow time for additional events to fire\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsvc, err = fixture.svcClient.Services(\"default\").Get(context.Background(), \"service-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Switch back to requesting an IPv4 address\n\tsvc.Spec.IPFamilies = []slim_core_v1.IPFamily{\n\t\tslim_core_v1.IPv4Protocol,\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\t// The second update allocates the new IPv4\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), svc, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update after update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n}", "func NewLoadBalancer() *LoadBalancer {\n\tlb := &LoadBalancer{\n\t\tnodes: make(map[string]*weightedNode),\n\t}\n\treturn lb\n}", "func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tlb, err := l.fetchLoadBalancer(ctx, clusterName, service)\n\tswitch err {\n\tcase nil:\n\t\t// continue\n\tcase LoadBalancerNotFound:\n\t\t// create LoadBalancer\n\t\tlb, err = l.createLoadBalancer(ctx, clusterName, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\t// any kind of Error\n\t\tklog.Errorf(\"error getting loadbalancer for service %s: %v\", service.Name, err)\n\t\treturn nil, err\n\t}\n\n\tif service.Spec.LoadBalancerIP != \"\" && service.Spec.LoadBalancerIP != lb.IP[0].IPAddress {\n\t\terr = l.deleteLoadBalancer(ctx, lb, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlb, err = l.createLoadBalancer(ctx, clusterName, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif lb.Status != scwlb.LbStatusReady {\n\t\treturn nil, LoadBalancerNotReady\n\t}\n\n\terr = l.updateLoadBalancer(ctx, lb, service, nodes)\n\tif err != nil {\n\t\tklog.Errorf(\"error updating loadbalancer for service %s: %v\", service.Name, err)\n\t\treturn nil, err\n\t}\n\n\tstatus := &v1.LoadBalancerStatus{}\n\tstatus.Ingress = make([]v1.LoadBalancerIngress, len(lb.IP))\n\tfor idx, ip := range lb.IP {\n\t\tif getUseHostname(service) {\n\t\t\tstatus.Ingress[idx].Hostname = ip.Reverse\n\t\t} else {\n\t\t\tstatus.Ingress[idx].IP = ip.IPAddress\n\t\t}\n\t}\n\n\treturn status, nil\n}", "func TestAWSLBController(t *testing.T) {\n\tnewIntegrationTest(\"minimal.example.com\", \"aws-lb-controller\").\n\t\twithOIDCDiscovery().\n\t\twithServiceAccountRole(\"dns-controller.kube-system\", true).\n\t\twithServiceAccountRole(\"aws-load-balancer-controller.kube-system\", true).\n\t\twithServiceAccountRole(\"aws-cloud-controller-manager.kube-system\", true).\n\t\twithServiceAccountRole(\"aws-node-termination-handler.kube-system\", true).\n\t\twithServiceAccountRole(\"ebs-csi-controller-sa.kube-system\", true).\n\t\twithAddons(\"aws-load-balancer-controller.addons.k8s.io-k8s-1.19\",\n\t\t\t\"certmanager.io-k8s-1.16\",\n\t\t\tawsEBSCSIAddon,\n\t\t\tdnsControllerAddon,\n\t\t\tawsCCMAddon,\n\t\t).\n\t\trunTestTerraformAWS(t)\n}", "func HandleLoadbalancerCreationSuccessfully(t *testing.T, response string) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestJSONRequest(t, r, `{\n\t\t\t\"loadbalancer\": {\n\t\t\t\t\"name\": \"db_lb\",\n\t\t\t\t\"vip_port_id\": \"2bf413c8-41a9-4477-b505-333d5cbe8b55\",\n\t\t\t\t\"vip_subnet_id\": \"9cedb85d-0759-4898-8a4b-fa5a5ea10086\",\n\t\t\t\t\"vip_address\": \"10.30.176.48\",\n\t\t\t\t\"flavor_id\": \"bba40eb2-ee8c-11e9-81b4-2a2ae2dbcce4\",\n\t\t\t\t\"provider\": \"haproxy\",\n\t\t\t\t\"admin_state_up\": true,\n\t\t\t\t\"tags\": [\"test\", \"stage\"]\n\t\t\t}\n\t\t}`)\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, response)\n\t})\n}", "func waitForIngressLB(ctx context.Context, cl client.Client, timeout time.Duration, ns, name string) (string, error) {\n\tnsName := types.NamespacedName{\n\t\tNamespace: ns,\n\t\tName: name,\n\t}\n\ting := &networkingv1.Ingress{}\n\terr := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := cl.Get(ctx, nsName, ing); err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif ing.Status.LoadBalancer.Ingress == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"timed out waiting for ingress %s/%s load-balancer: %v\", ns, name, err)\n\t}\n\n\tswitch {\n\tcase len(ing.Status.LoadBalancer.Ingress[0].Hostname) > 0:\n\t\treturn ing.Status.LoadBalancer.Ingress[0].Hostname, nil\n\tcase len(ing.Status.LoadBalancer.Ingress[0].IP) > 0:\n\t\treturn ing.Status.LoadBalancer.Ingress[0].IP, nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to determine ingress %s/%s load-balancer: %v\", ns, name, err)\n}", "func (m *MockLoadBalancerServiceIface) ListLoadBalancerRuleInstances(p *ListLoadBalancerRuleInstancesParams) (*ListLoadBalancerRuleInstancesResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListLoadBalancerRuleInstances\", p)\n\tret0, _ := ret[0].(*ListLoadBalancerRuleInstancesResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockNetworkUtils) GetAllNetworkInterfaces() ([]net.Interface, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetAllNetworkInterfaces\")\n\tret0, _ := ret[0].([]net.Interface)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestServiceRegistryExternalTrafficHealthCheckNodePortAllocationBeta(t *testing.T) {\n\tctx := genericapirequest.NewDefaultContext()\n\tstorage, _, server := NewTestREST(t, nil)\n\tdefer server.Terminate(t)\n\tsvc := &api.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"external-lb-esipp\",\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tapi.BetaAnnotationExternalTraffic: api.AnnotationValueExternalTrafficLocal,\n\t\t\t},\n\t\t},\n\t\tSpec: api.ServiceSpec{\n\t\t\tSelector: map[string]string{\"bar\": \"baz\"},\n\t\t\tSessionAffinity: api.ServiceAffinityNone,\n\t\t\tType: api.ServiceTypeLoadBalancer,\n\t\t\tPorts: []api.ServicePort{{\n\t\t\t\tPort: 6502,\n\t\t\t\tProtocol: api.ProtocolTCP,\n\t\t\t\tTargetPort: intstr.FromInt(6502),\n\t\t\t}},\n\t\t},\n\t}\n\tcreated_svc, err := storage.Create(ctx, svc, false)\n\tif created_svc == nil || err != nil {\n\t\tt.Errorf(\"Unexpected failure creating service %v\", err)\n\t}\n\tcreated_service := created_svc.(*api.Service)\n\tif !service.NeedsHealthCheck(created_service) {\n\t\tt.Errorf(\"Expecting health check needed, returned health check not needed instead\")\n\t}\n\tport := service.GetServiceHealthCheckNodePort(created_service)\n\tif port == 0 {\n\t\tt.Errorf(\"Failed to allocate health check node port and set the HealthCheckNodePort\")\n\t}\n\n}", "func (m *MockLoadBalancerServiceIface) ListLBHealthCheckPolicies(p *ListLBHealthCheckPoliciesParams) (*ListLBHealthCheckPoliciesResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListLBHealthCheckPolicies\", p)\n\tret0, _ := ret[0].(*ListLBHealthCheckPoliciesResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *MockLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName, loadBalancerName string, parameters network.LoadBalancer) error {\n\tif _, ok := c.LBs[loadBalancerName]; ok {\n\t\treturn nil\n\t}\n\tparameters.Name = &loadBalancerName\n\tc.LBs[loadBalancerName] = parameters\n\treturn nil\n}", "func (s) TestBalancer_OneAddress(t *testing.T) {\n\ttestCases := []struct {\n\t\trt reportType\n\t\tcfg iwrr.LBConfig\n\t}{\n\t\t{rt: reportNone, cfg: perCallConfig},\n\t\t{rt: reportCall, cfg: perCallConfig},\n\t\t{rt: reportOOB, cfg: oobConfig},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(fmt.Sprintf(\"reportType:%v\", tc.rt), func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\t\t\tdefer cancel()\n\n\t\t\tsrv := startServer(t, tc.rt)\n\n\t\t\tsc := svcConfig(t, tc.cfg)\n\t\t\tif err := srv.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil {\n\t\t\t\tt.Fatalf(\"Error starting client: %v\", err)\n\t\t\t}\n\n\t\t\t// Perform many RPCs to ensure the LB policy works with 1 address.\n\t\t\tfor i := 0; i < 100; i++ {\n\t\t\t\tsrv.callMetrics.SetQPS(float64(i))\n\t\t\t\tsrv.oobMetrics.SetQPS(float64(i))\n\t\t\t\tif _, err := srv.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {\n\t\t\t\t\tt.Fatalf(\"Error from EmptyCall: %v\", err)\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Millisecond) // Delay; test will run 100ms and should perform ~10 weight updates\n\t\t\t}\n\t\t})\n\t}\n}", "func (m *MockClusterScoper) APIServerLB() *v1beta1.LoadBalancerSpec {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"APIServerLB\")\n\tret0, _ := ret[0].(*v1beta1.LoadBalancerSpec)\n\treturn ret0\n}", "func CleanupKubernetesLoadbalancers(ctx context.Context, log logr.Logger, client openstackclient.Loadbalancing, subnetID, clusterName string) error {\n\tlbList, err := client.ListLoadbalancers(loadbalancers.ListOpts{\n\t\tVipSubnetID: subnetID,\n\t})\n\n\t// do we need that if we anyway want to delete the gardener managed subnet ?\n\tk8sSvcPrefix := servicePrefix + clusterName\n\tres := make(chan error, len(lbList))\n\tacceptableStates := map[string]struct{}{\n\t\t\"ACTIVE\": {},\n\t\t\"ERROR\": {},\n\t}\n\tw := sync.WaitGroup{}\n\tb := wait.Backoff{\n\t\tDuration: 1 * time.Second,\n\t\tJitter: 1.2,\n\t\tSteps: 10,\n\t}\n\tfor _, lb := range lbList {\n\t\tlb := lb\n\t\tif !strings.HasPrefix(lb.Name, k8sSvcPrefix) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := acceptableStates[lb.ProvisioningStatus]; !ok {\n\t\t\treturn fmt.Errorf(\"load balancer %s can't be updated currently due to provisioning state: %s\", lb.ID, lb.ProvisioningStatus)\n\t\t}\n\n\t\tlog.Info(\"deleting orphan loadbalancer\", \"ID\", lb.ID, \"name\", lb.Name)\n\t\tw.Add(1)\n\t\tgo func() {\n\t\t\tdefer w.Done()\n\t\t\tif err := client.DeleteLoadbalancer(lb.ID, loadbalancers.DeleteOpts{Cascade: true}); err != nil {\n\t\t\t\tres <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr := wait.ExponentialBackoffWithContext(ctx, b, func() (done bool, err error) {\n\t\t\t\tlb, err := client.GetLoadbalancer(lb.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tif lb == nil {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t\treturn false, nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tres <- fmt.Errorf(\"failed to ensure loadbalancers are deleted: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\tw.Wait()\n\tclose(res)\n\tfor errIn := range res {\n\t\terr = errors.Join(err, errIn)\n\t}\n\treturn err\n}", "func (s) TestXdsBalanceHandleBalancerConfigFallbackUpdate(t *testing.T) {\n\toriginalNewEDSBalancer := newEDSBalancer\n\tnewEDSBalancer = newFakeEDSBalancer\n\tdefer func() {\n\t\tnewEDSBalancer = originalNewEDSBalancer\n\t}()\n\n\tbuilder := balancer.Get(\"xds\")\n\tcc := newTestClientConn()\n\tlb, ok := builder.Build(cc, balancer.BuildOptions{}).(*xdsBalancer)\n\tif !ok {\n\t\tt.Fatalf(\"unable to type assert to *xdsBalancer\")\n\t}\n\tdefer lb.Close()\n\n\taddr, td, cleanup := setupServer(t)\n\n\tcfg := &testBalancerConfig{\n\t\tBalancerName: addr,\n\t\tChildPolicy: []lbPolicy{fakeBalancerA},\n\t\tFallbackPolicy: []lbPolicy{fakeBalancerA},\n\t}\n\tworkingBalancerConfig, _ := json.Marshal(cfg)\n\n\tif err := lb.HandleBalancerConfig(json.RawMessage(workingBalancerConfig)); err != nil {\n\t\tt.Fatalf(\"failed to HandleBalancerConfig(%v), due to err: %v\", string(workingBalancerConfig), err)\n\t}\n\n\tcfg.FallbackPolicy = []lbPolicy{fakeBalancerB}\n\tworkingBalancerConfig, _ = json.Marshal(cfg)\n\n\tif err := lb.HandleBalancerConfig(json.RawMessage(workingBalancerConfig)); err != nil {\n\t\tt.Fatalf(\"failed to HandleBalancerConfig(%v), due to err: %v\", string(workingBalancerConfig), err)\n\t}\n\n\ttd.sendResp(&response{resp: testEDSRespWithoutEndpoints})\n\n\tvar i int\n\tfor i = 0; i < 10; i++ {\n\t\tif edsLB := getLatestEdsBalancer(); edsLB != nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\tif i == 10 {\n\t\tt.Fatal(\"edsBalancer instance has not been created and assigned to lb.xdsLB after 1s\")\n\t}\n\n\tcleanup()\n\n\taddrs := []resolver.Address{{Addr: \"1.1.1.1:10001\"}, {Addr: \"2.2.2.2:10002\"}, {Addr: \"3.3.3.3:10003\"}}\n\tlb.HandleResolvedAddrs(addrs, nil)\n\n\t// verify fallback balancer B takes over\n\tselect {\n\tcase nsc := <-cc.newSubConns:\n\t\tif !reflect.DeepEqual(append(addrs, specialAddrForBalancerB), nsc) {\n\t\t\tt.Fatalf(\"got new subconn address %v, want %v\", nsc, append(addrs, specialAddrForBalancerB))\n\t\t}\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatalf(\"timeout when geting new subconn result\")\n\t}\n\n\tcfg.FallbackPolicy = []lbPolicy{fakeBalancerA}\n\tworkingBalancerConfig, _ = json.Marshal(cfg)\n\tif err := lb.HandleBalancerConfig(json.RawMessage(workingBalancerConfig)); err != nil {\n\t\tt.Fatalf(\"failed to HandleBalancerConfig(%v), due to err: %v\", string(workingBalancerConfig), err)\n\t}\n\n\t// verify fallback balancer A takes over\n\tselect {\n\tcase nsc := <-cc.newSubConns:\n\t\tif !reflect.DeepEqual(append(addrs, specialAddrForBalancerA), nsc) {\n\t\t\tt.Fatalf(\"got new subconn address %v, want %v\", nsc, append(addrs, specialAddrForBalancerA))\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatalf(\"timeout when geting new subconn result\")\n\t}\n}", "func ExampleELB_CreateLoadBalancer_shared04() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tScheme: aws.String(\"internal\"),\n\t\tSecurityGroups: []*string{\n\t\t\taws.String(\"sg-a61988c3\"),\n\t\t},\n\t\tSubnets: []*string{\n\t\t\taws.String(\"subnet-15aaab61\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func TestRangeDelete(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\t// Add a new CIDR, this should not have any effect on the existing service.\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.20\") {\n\t\t\tt.Error(\"Expected new ingress to be in the 10.0.20.0/24 range\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// Remove the existing range, this should trigger the re-allocation of the existing service\n\tpoolA.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.20.0/24\",\n\t\t},\n\t}\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}" ]
[ "0.6641239", "0.61146396", "0.6037609", "0.573003", "0.56537986", "0.5642106", "0.55869925", "0.55865353", "0.5546094", "0.55285203", "0.5510052", "0.5503564", "0.5503564", "0.54933125", "0.54905623", "0.54893786", "0.5481105", "0.54673535", "0.545948", "0.5448736", "0.54167783", "0.5406734", "0.5402846", "0.5383394", "0.5377976", "0.53758836", "0.53676564", "0.5355396", "0.5353084", "0.5315479", "0.5292263", "0.52740514", "0.5258095", "0.52522093", "0.52227706", "0.52219164", "0.52176577", "0.51813513", "0.5178377", "0.51700896", "0.5167307", "0.5133889", "0.51188594", "0.5098563", "0.5096357", "0.5089807", "0.50772697", "0.50739634", "0.5061199", "0.5053535", "0.50382155", "0.50211805", "0.50206316", "0.5012026", "0.4995295", "0.4995295", "0.49883628", "0.49867934", "0.49841934", "0.49839756", "0.49480823", "0.49445844", "0.49406403", "0.49343005", "0.49338716", "0.4933678", "0.49214754", "0.4917802", "0.49155065", "0.49090677", "0.4907915", "0.49051616", "0.49043065", "0.48940402", "0.4871422", "0.48646823", "0.48639172", "0.4863534", "0.48618233", "0.4855933", "0.48404774", "0.4825181", "0.4818776", "0.48172954", "0.4811225", "0.48067614", "0.48041835", "0.48010883", "0.48002294", "0.47846517", "0.47698173", "0.47616374", "0.47601768", "0.47547728", "0.47544608", "0.47445408", "0.47354093", "0.47218707", "0.4717402", "0.47129405" ]
0.7370017
0
DescribeLoadBalancerWithNs indicates an expected call of DescribeLoadBalancerWithNs
func (mr *MockLoadBalanceMockRecorder) DescribeLoadBalancerWithNs(ns, region, lbID, name interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancerWithNs", reflect.TypeOf((*MockLoadBalance)(nil).DescribeLoadBalancerWithNs), ns, region, lbID, name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) DescribeLoadBalancerWithNs(ns, region, lbID, name string) (*cloud.LoadBalanceObject, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeLoadBalancerWithNs\", ns, region, lbID, name)\n\tret0, _ := ret[0].(*cloud.LoadBalanceObject)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalanceMockRecorder) DescribeLoadBalancer(region, lbID, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeLoadBalancer\", reflect.TypeOf((*MockLoadBalance)(nil).DescribeLoadBalancer), region, lbID, name)\n}", "func (lb *GetLoadbalancerInput) GetLoadbalancers() (GetLoadbalancerResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(lb.Cloud.Name)); status != true {\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetNetworks\")\n\t}\n\n\tswitch strings.ToLower(lb.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// Gets the established session so that it can carry out the process in cloud\n\t\tsess := (lb.Cloud.Client).(*session.Session)\n\n\t\t//authorizing to request further\n\t\tauthinpt := new(auth.EstablishConnectionInput)\n\t\tauthinpt.Region = lb.Cloud.Region\n\t\tauthinpt.Session = sess\n\t\tswitch strings.ToLower(lb.Type) {\n\t\tcase \"classic\":\n\t\t\tauthinpt.Resource = \"elb\"\n\t\tcase \"application\":\n\t\t\tauthinpt.Resource = \"elb2\"\n\t\t}\n\n\t\tlbin := new(loadbalance.GetLoadbalancerInput)\n\t\tlbin.GetRaw = lb.Cloud.GetRaw\n\t\tlbin.LbNames = lb.LbNames\n\t\tlbin.LbArns = lb.LbArns\n\t\tlbin.Type = lb.Type\n\t\tresponse, lberr := lbin.Getloadbalancers(*authinpt)\n\t\tif lberr != nil {\n\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t}\n\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetLoadbalancers\")\n\t}\n}", "func (lb *GetLoadbalancerInput) GetAllLoadbalancer() (GetLoadbalancerResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(lb.Cloud.Name)); status != true {\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetNetworks\")\n\t}\n\n\tswitch strings.ToLower(lb.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsess := (lb.Cloud.Client).(*session.Session)\n\n\t\t// authorizing further request\n\t\tauthinpt := new(auth.EstablishConnectionInput)\n\t\tauthinpt.Region = lb.Cloud.Region\n\t\tauthinpt.Session = sess\n\t\tauthinpt.Resource = \"elb12\"\n\t\tlbin := new(loadbalance.GetLoadbalancerInput)\n\t\tlbin.GetRaw = lb.Cloud.GetRaw\n\n\t\tswitch strings.ToLower(lb.Type) {\n\t\tcase \"classic\":\n\t\t\tresponse, lberr := lbin.GetAllClassicLb(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tcase \"application\":\n\t\t\tresponse, lberr := lbin.GetAllApplicationLb(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tcase \"\":\n\t\t\tresponse, lberr := lbin.GetAllLoadbalancer(*authinpt)\n\t\t\tif lberr != nil {\n\t\t\t\treturn GetLoadbalancerResponse{}, lberr\n\t\t\t}\n\t\t\treturn GetLoadbalancerResponse{AwsResponse: response}, nil\n\t\tdefault:\n\t\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(\"The loadbalancer type you entered is unknown to me\")\n\t\t}\n\n\tcase \"azure\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn GetLoadbalancerResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"GetLoadbalancers\")\n\t}\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) ListNetscalerLoadBalancers(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListNetscalerLoadBalancers\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).ListNetscalerLoadBalancers), p)\n}", "func ExampleELB_DescribeLoadBalancers_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DescribeLoadBalancersInput{\n\t\tLoadBalancerNames: []*string{\n\t\t\taws.String(\"my-load-balancer\"),\n\t\t},\n\t}\n\n\tresult, err := svc.DescribeLoadBalancers(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeDependencyThrottleException:\n\t\t\t\tfmt.Println(elb.ErrCodeDependencyThrottleException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (m *MockLoadBalance) DescribeLoadBalancer(region, lbID, name string) (*cloud.LoadBalanceObject, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DescribeLoadBalancer\", region, lbID, name)\n\tret0, _ := ret[0].(*cloud.LoadBalanceObject)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalancerServiceIface) ListNetscalerLoadBalancers(p *ListNetscalerLoadBalancersParams) (*ListNetscalerLoadBalancersResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListNetscalerLoadBalancers\", p)\n\tret0, _ := ret[0].(*ListNetscalerLoadBalancersResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func isSDKLoadBalancerRequiresReplacement(sdkLB LoadBalancerWithTags, resLB *elbv2model.LoadBalancer) bool {\n\tif string(resLB.Spec.Type) != awssdk.StringValue(sdkLB.LoadBalancer.Type) {\n\t\treturn true\n\t}\n\tif resLB.Spec.Scheme != nil && string(*resLB.Spec.Scheme) != awssdk.StringValue(sdkLB.LoadBalancer.Scheme) {\n\t\treturn true\n\t}\n\treturn false\n}", "func TestNonMatchingLBClass(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tlbClass := \"net.example/some-other-class\"\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerClass: &lbClass,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"Unexpected patch to a service\")\n\n\t\treturn true\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n}", "func (c *MockLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string) (*network.LoadBalancer, error) {\n\tfor _, lb := range c.LBs {\n\t\tif *lb.Name == loadBalancerName {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\treturn nil, nil\n}", "func (m *MockLoadBalancerServiceIface) ListLoadBalancers(p *ListLoadBalancersParams) (*ListLoadBalancersResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListLoadBalancers\", p)\n\tret0, _ := ret[0].(*ListLoadBalancersResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestRequestIPs(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.20\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.10.20\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.20'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tfirst := false\n\t\tsecond := false\n\t\tthird := false\n\n\t\tfor _, ingress := range svc.Status.LoadBalancer.Ingress {\n\t\t\tswitch ingress.IP {\n\t\t\tcase \"10.0.10.21\":\n\t\t\t\tfirst = true\n\t\t\tcase \"10.0.10.22\":\n\t\t\t\tsecond = true\n\t\t\tcase \"10.0.10.23\":\n\t\t\t\tthird = true\n\t\t\tdefault:\n\t\t\t\tt.Error(\"Unexpected ingress IP\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif !first {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.21'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !second {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.22'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !third {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.23'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tciliumSvcLBIPSAnnotation: \"10.0.10.22,10.0.10.23\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-c\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Reason != \"already_allocated\" {\n\t\t\tt.Error(\"Expected condition reason to be 'already_allocated'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// request an already allocated IP\n\tserviceC := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-c\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceCUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceC, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) ListLoadBalancers(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListLoadBalancers\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).ListLoadBalancers), p)\n}", "func ExampleELB_DescribeTags_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DescribeTagsInput{\n\t\tLoadBalancerNames: []*string{\n\t\t\taws.String(\"my-load-balancer\"),\n\t\t},\n\t}\n\n\tresult, err := svc.DescribeTags(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (s) TestBalancer_TwoAddresses_UpdateLoads(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\tsrv1 := startServer(t, reportOOB)\n\tsrv2 := startServer(t, reportOOB)\n\n\t// srv1 starts loaded and srv2 starts without load; ensure RPCs are routed\n\t// disproportionately to srv2 (10:1).\n\tsrv1.oobMetrics.SetQPS(10.0)\n\tsrv1.oobMetrics.SetApplicationUtilization(1.0)\n\n\tsrv2.oobMetrics.SetQPS(10.0)\n\tsrv2.oobMetrics.SetApplicationUtilization(.1)\n\n\tsc := svcConfig(t, oobConfig)\n\tif err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil {\n\t\tt.Fatalf(\"Error starting client: %v\", err)\n\t}\n\taddrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}}\n\tsrv1.R.UpdateState(resolver.State{Addresses: addrs})\n\n\t// Call each backend once to ensure the weights have been received.\n\tensureReached(ctx, t, srv1.Client, 2)\n\n\t// Wait for the weight update period to allow the new weights to be processed.\n\ttime.Sleep(weightUpdatePeriod)\n\tcheckWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10})\n\n\t// Update the loads so srv2 is loaded and srv1 is not; ensure RPCs are\n\t// routed disproportionately to srv1.\n\tsrv1.oobMetrics.SetQPS(10.0)\n\tsrv1.oobMetrics.SetApplicationUtilization(.1)\n\n\tsrv2.oobMetrics.SetQPS(10.0)\n\tsrv2.oobMetrics.SetApplicationUtilization(1.0)\n\n\t// Wait for the weight update period to allow the new weights to be processed.\n\ttime.Sleep(weightUpdatePeriod + oobReportingInterval)\n\tcheckWeights(ctx, t, srvWeight{srv1, 10}, srvWeight{srv2, 1})\n}", "func (mr *MockClientMockRecorder) DescribeSubnets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeSubnets\", reflect.TypeOf((*MockClient)(nil).DescribeSubnets), arg0)\n}", "func (mr *MockClientMockRecorder) DescribeSubnets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeSubnets\", reflect.TypeOf((*MockClient)(nil).DescribeSubnets), arg0)\n}", "func (el *ELBV2Manager) describeLoadbalancers(marker *string, loadbalancers []*elbv2.LoadBalancer) ([]*elbv2.LoadBalancer, error) {\n\n\tinput := &elbv2.DescribeLoadBalancersInput{\n\t\tMarker: marker,\n\t}\n\n\tresp, err := el.client.DescribeLoadBalancers(input)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err).Error(\"could not describe elb instances\")\n\t\treturn nil, err\n\t}\n\n\tif loadbalancers == nil {\n\t\tloadbalancers = []*elbv2.LoadBalancer{}\n\t}\n\n\tloadbalancers = append(loadbalancers, resp.LoadBalancers...)\n\n\tif resp.NextMarker != nil {\n\t\treturn el.describeLoadbalancers(resp.NextMarker, loadbalancers)\n\t}\n\n\treturn loadbalancers, nil\n}", "func CreateLoadBalancer(ctx context.Context, lbName, pipName string) (lb network.LoadBalancer, err error) {\n\tprobeName := \"probe\"\n\tfrontEndIPConfigName := \"fip\"\n\tbackEndAddressPoolName := \"backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", config.SubscriptionID(), config.GroupName())\n\n\tpip, err := GetPublicIP(ctx, pipName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlbClient := getLBClient()\n\tfuture, err := lbClient.CreateOrUpdate(ctx,\n\t\tconfig.GroupName(),\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tLocation: to.StringPtr(config.Location()),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Dynamic,\n\t\t\t\t\t\t\tPublicIPAddress: &pip,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolHTTP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tRequestPath: to.StringPtr(\"healthprobe.aspx\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"lbRule\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(80),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.Default,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tInboundNatRules: &[]network.InboundNatRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"natRule1\"),\n\t\t\t\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(21),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"natRule2\"),\n\t\t\t\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(23),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn lb, fmt.Errorf(\"cannot create load balancer: %v\", err)\n\t}\n\n\terr = future.WaitForCompletion(ctx, lbClient.Client)\n\tif err != nil {\n\t\treturn lb, fmt.Errorf(\"cannot get load balancer create or update future response: %v\", err)\n\t}\n\n\treturn future.Result(lbClient)\n}", "func (nat *NATCloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {\n\tstatus = &v1.LoadBalancerStatus{}\n\tnatClient, err := nat.getNATClient()\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\treturn nil, false, nil\n\t\t}\n\n\t\treturn nil, false, err\n\t}\n\n\t//get dnat rules binded to the dnat instance\n\tnatGatewayId := service.ObjectMeta.Annotations[AnnotationsNATID]\n\tif natGatewayId == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"The id of natGateway should be set by %v in annotations \", AnnotationsNATID)\n\t}\n\tdnatRuleList, err := listDnatRule(natClient, natGatewayId)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif len(dnatRuleList.DNATRules) == 0 {\n\t\treturn nil, false, nil\n\t}\n\n\tfor _, externalPort := range service.Spec.Ports {\n\t\t//check if the DNAT rule exists\n\t\tif nat.getDNATRule(dnatRuleList, &externalPort) == nil {\n\t\t\treturn nil, false, nil\n\t\t}\n\t}\n\tstatus.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: service.Spec.LoadBalancerIP})\n\treturn status, true, nil\n}", "func (bc *Baiducloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer(%v, %v, %v, %v, %v)\",\n\t\tclusterName, service.Namespace, service.Name, bc.Region, service.Spec.LoadBalancerIP, service.Spec.Ports, service.Annotations)\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = bc.validateService(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure BLB\n\tlb, err := bc.ensureBLB(ctx, clusterName, service, nodes, result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif result.LoadBalancerInternalVpc == \"true\" {\n\t\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer: use LoadBalancerInternalVpc, EXTERNAL-IP is %s\", service.Namespace, service.Name, lb.Address)\n\t\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: lb.Address}}}, nil\n\t}\n\n\t// ensure EIP\n\tpubIP, err := bc.ensureEIP(ctx, clusterName, service, nodes, result, lb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tglog.V(3).Infof(\"[%v %v] EnsureLoadBalancer: EXTERNAL-IP is %s\", service.Namespace, service.Name, pubIP)\n\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: pubIP}}}, nil\n}", "func (mr *MockClientMockRecorder) RegisterInstancesWithLoadBalancer(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RegisterInstancesWithLoadBalancer\", reflect.TypeOf((*MockClient)(nil).RegisterInstancesWithLoadBalancer), arg0)\n}", "func (bc *Baiducloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {\n\t// workaround to support old version, can be removed if not support old version\n\tbc.workAround(service)\n\tresult, err := ExtractServiceAnnotation(service)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif len(result.CceAutoAddLoadBalancerId) == 0 {\n\t\treturn nil, false, nil\n\t}\n\tlb, exists, err := bc.getBCELoadBalancerById(result.CceAutoAddLoadBalancerId)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tif !exists {\n\t\treturn nil, false, nil\n\t}\n\n\tvar ip string\n\tif result.LoadBalancerInternalVpc == \"true\" {\n\t\tip = lb.Address // internal vpc ip\n\t} else {\n\t\tip = lb.PublicIp // EIP\n\t}\n\tglog.V(3).Infof(\"[%v %v] GetLoadBalancer ip: %s\", service.Namespace, service.Name, ip)\n\n\treturn &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: ip}}}, true, nil\n}", "func (c *Client) WaitUntilLoadBalancerAvailable(ctx context.Context, input *DescribeLoadBalancersInput, opts ...aws.WaiterOption) error {\n\tw := aws.Waiter{\n\t\tName: \"WaitUntilLoadBalancerAvailable\",\n\t\tMaxAttempts: 40,\n\t\tDelay: aws.ConstantWaiterDelay(15 * time.Second),\n\t\tAcceptors: []aws.WaiterAcceptor{\n\t\t\t{\n\t\t\t\tState: aws.SuccessWaiterState,\n\t\t\t\tMatcher: aws.PathAllWaiterMatch, Argument: \"LoadBalancers[].State.Code\",\n\t\t\t\tExpected: \"active\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: aws.RetryWaiterState,\n\t\t\t\tMatcher: aws.PathAnyWaiterMatch, Argument: \"LoadBalancers[].State.Code\",\n\t\t\t\tExpected: \"provisioning\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: aws.RetryWaiterState,\n\t\t\t\tMatcher: aws.ErrorWaiterMatch,\n\t\t\t\tExpected: \"LoadBalancerNotFound\",\n\t\t\t},\n\t\t},\n\t\tLogger: c.Config.Logger,\n\t\tNewRequest: func(opts []aws.Option) (*aws.Request, error) {\n\t\t\tvar inCpy *DescribeLoadBalancersInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq := c.DescribeLoadBalancersRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req.Request, nil\n\t\t},\n\t}\n\tw.ApplyOptions(opts...)\n\n\treturn w.Wait(ctx)\n}", "func TestRequestIPWithMismatchedLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"color\": \"red\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\t\tif svc.Status.Conditions[0].Reason != \"pool_selector_mismatch\" {\n\t\t\tt.Error(\"Expected service to receive 'pool_selector_mismatch' condition\")\n\t\t}\n\n\t\treturn true\n\t}, 1*time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected status update of service\")\n\t}\n}", "func GetLoadBalancerNames(kubectlOptions *KubectlOptions) ([]string, error) {\n\tlogger := logging.GetProjectLogger()\n\tlogger.Infof(\"Getting all LoadBalancer names from services in kubernetes\")\n\n\tclient, err := GetKubernetesClientFromOptions(kubectlOptions)\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\tservices, err := GetAllServices(client)\n\tif err != nil {\n\t\treturn nil, errors.WithStackTrace(err)\n\t}\n\tloadBalancerServices := filterLoadBalancerServices(services)\n\tlogger.Infof(\"Found %d LoadBalancer services of %d services in kubernetes.\", len(loadBalancerServices), len(services))\n\n\tlbNames := []string{}\n\tfor _, service := range loadBalancerServices {\n\t\tlbName, err := GetLoadBalancerNameFromService(service)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t}\n\t\tlbNames = append(lbNames, lbName)\n\t}\n\tlogger.Infof(\"Successfully extracted loadbalancer names\")\n\treturn lbNames, nil\n}", "func checkLoadBalancerServiceType(clientset kubernetes.Interface, log logger.Logger) error {\n\tconst (\n\t\ttestServiceName = \"kubernetes-test-service\"\n\t\ttestNamespace = \"default\"\n\t\twaitTimeoutSec = 300\n\t)\n\tlog.Infof(\"Creating test service %s/%s\", testServiceName, testNamespace)\n\tsvc, err := createTestLoadBalancer(testServiceName, testNamespace, clientset)\n\tif err != nil || svc == nil {\n\t\treturn errors.Wrap(err, \"error creating test service\")\n\t}\n\n\t// handle service deletion\n\tdefer func() {\n\t\tclientset.CoreV1().Services(testNamespace).Delete(context.TODO(), testServiceName, v1.DeleteOptions{})\n\t\tlog.Debugf(\"test service %s deleted\", testNamespace, testServiceName)\n\t}()\n\n\tlog.Infof(\"Checking LoadBalancer service type\")\n\tfor i := 1; i < waitTimeoutSec; i += 1 {\n\t\ttime.Sleep(time.Second)\n\t\ts, err := clientset.CoreV1().Services(testNamespace).Get(context.TODO(), testServiceName, v1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error getting test service\")\n\t\t}\n\t\tif loadBalancerProvisioned(s) {\n\t\t\treturn nil\n\t\t}\n\t\tcontinue\n\t}\n\treturn errors.New(\"Service with LoadBalancer didn't get ingress address\")\n}", "func (s *ClusterScope) LBSpecs() []azure.ResourceSpecGetter {\n\tspecs := []azure.ResourceSpecGetter{\n\t\t&loadbalancers.LBSpec{\n\t\t\t// API Server LB\n\t\t\tName: s.APIServerLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tSubnetName: s.ControlPlaneSubnet().Name,\n\t\t\tFrontendIPConfigs: s.APIServerLB().FrontendIPs,\n\t\t\tAPIServerPort: s.APIServerPort(),\n\t\t\tType: s.APIServerLB().Type,\n\t\t\tSKU: s.APIServerLB().SKU,\n\t\t\tRole: infrav1.APIServerRole,\n\t\t\tBackendPoolName: s.APIServerLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.APIServerLB().IdleTimeoutInMinutes,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t},\n\t}\n\n\t// Node outbound LB\n\tif s.NodeOutboundLB() != nil {\n\t\tspecs = append(specs, &loadbalancers.LBSpec{\n\t\t\tName: s.NodeOutboundLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tFrontendIPConfigs: s.NodeOutboundLB().FrontendIPs,\n\t\t\tType: s.NodeOutboundLB().Type,\n\t\t\tSKU: s.NodeOutboundLB().SKU,\n\t\t\tBackendPoolName: s.NodeOutboundLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.NodeOutboundLB().IdleTimeoutInMinutes,\n\t\t\tRole: infrav1.NodeOutboundRole,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t})\n\t}\n\n\t// Control Plane Outbound LB\n\tif s.ControlPlaneOutboundLB() != nil {\n\t\tspecs = append(specs, &loadbalancers.LBSpec{\n\t\t\tName: s.ControlPlaneOutboundLB().Name,\n\t\t\tResourceGroup: s.ResourceGroup(),\n\t\t\tSubscriptionID: s.SubscriptionID(),\n\t\t\tClusterName: s.ClusterName(),\n\t\t\tLocation: s.Location(),\n\t\t\tExtendedLocation: s.ExtendedLocation(),\n\t\t\tVNetName: s.Vnet().Name,\n\t\t\tVNetResourceGroup: s.Vnet().ResourceGroup,\n\t\t\tFrontendIPConfigs: s.ControlPlaneOutboundLB().FrontendIPs,\n\t\t\tType: s.ControlPlaneOutboundLB().Type,\n\t\t\tSKU: s.ControlPlaneOutboundLB().SKU,\n\t\t\tBackendPoolName: s.ControlPlaneOutboundLB().BackendPool.Name,\n\t\t\tIdleTimeoutInMinutes: s.ControlPlaneOutboundLB().IdleTimeoutInMinutes,\n\t\t\tRole: infrav1.ControlPlaneOutboundRole,\n\t\t\tAdditionalTags: s.AdditionalTags(),\n\t\t})\n\t}\n\n\treturn specs\n}", "func (c *clbClient) Lbs() []string {\n\treturn c.loadBalancers\n}", "func ELBv2LoadBalancerList() (*elbv2.DescribeLoadBalancersOutput, error) {\n\tsvc := elbv2.New(session.New(), &aws.Config{\n\t\tRegion: aws.String(beego.AppConfig.String(\"awsRegion\")),\n\t})\n\n\tparams := &elbv2.DescribeLoadBalancersInput{}\n\n\tresp, err := svc.DescribeLoadBalancers(params)\n\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\tbeego.Error(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())\n\t\t\tif reqErr, ok := err.(awserr.RequestFailure); ok {\n\t\t\t\tbeego.Error(\n\t\t\t\t\treqErr.Code(),\n\t\t\t\t\treqErr.Message(),\n\t\t\t\t\treqErr.StatusCode(),\n\t\t\t\t\treqErr.RequestID(),\n\t\t\t\t)\n\t\t\t}\n\t\t} else {\n\t\t\tbeego.Debug(err.Error())\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func TestGetAllBackendServer(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\t// bs is nil\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\t// get bs\n\tbs, err = cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 2 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}", "func NewGetLoadBalancerOK() *GetLoadBalancerOK {\n\treturn &GetLoadBalancerOK{}\n}", "func (c *NSXClient) CreateLoadBalancer(opts LoadBalancerOptions) (string, bool, error) {\n\tctx := c.api.Context\n\tapi := c.api.ServicesApi\n\trouting := c.api.LogicalRoutingAndServicesApi\n\n\texistingServer, err := c.GetLoadBalancer(opts.Name)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\tif existingServer != nil {\n\t\treturn existingServer.IpAddress, true, nil\n\t}\n\n\tt0, resp, err := routing.ReadLogicalRouter(ctx, opts.Tier0)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"failed to read T0 router %s: %s\", opts.Tier0, errorString(resp, err))\n\t}\n\n\tt0Port, resp, err := routing.CreateLogicalRouterLinkPortOnTier0(ctx, manager.LogicalRouterLinkPortOnTier0{\n\t\tLogicalRouterId: t0.Id,\n\t\tDisplayName: \"lb-\" + opts.Name + \"-T1\",\n\t})\n\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create T0 Local router port %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tt1, resp, err := routing.CreateLogicalRouter(ctx, manager.LogicalRouter{\n\t\tRouterType: \"TIER1\",\n\t\tDisplayName: \"lb-\" + opts.Name,\n\t\tEdgeClusterId: t0.EdgeClusterId,\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create T1 router %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\t_, resp, err = routing.UpdateAdvertisementConfig(ctx, t1.Id, manager.AdvertisementConfig{\n\t\tAdvertiseLbVip: true,\n\t\tAdvertiseLbSnatIp: true,\n\t\tEnabled: true,\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to update advertisement config %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tc.Infof(\"Created T1 router %s/%s\", t1.DisplayName, t1.Id)\n\n\t_, resp, err = routing.CreateLogicalRouterLinkPortOnTier1(ctx, manager.LogicalRouterLinkPortOnTier1{\n\t\tLogicalRouterId: t1.Id,\n\t\tDisplayName: t0.DisplayName + \"-uplink\",\n\t\tLinkedLogicalRouterPortId: &common.ResourceReference{\n\t\t\tTargetType: \"LogicalPort\",\n\t\t\tTargetId: t0Port.Id,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"failed to link T1 (%s) to T0 (%s): %s\", t1.Id, t0Port.Id, errorString(resp, err))\n\t}\n\n\tgroup, err := c.CreateOrUpdateNSGroup(opts.Name, \"LogicalPort\", opts.MemberTags)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\tvar monitorID string\n\tif opts.Protocol == TCPProtocol {\n\t\tmonitorID, err = c.GetOrCreateTCPHealthCheck(opts.Ports[0])\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"unable to create tcp loadbalancer monitor: %v\", err)\n\t\t}\n\t} else {\n\t\tmonitorID, err = c.GetOrCreateHTTPHealthCheck(opts.Ports[0])\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"unable to create http loadbalancer monitor: %v\", err)\n\t\t}\n\t}\n\tpool, resp, err := api.CreateLoadBalancerPool(ctx, loadbalancer.LbPool{\n\t\tId: opts.Name,\n\t\tActiveMonitorIds: []string{monitorID},\n\t\tSnatTranslation: &loadbalancer.LbSnatTranslation{\n\t\t\tType_: \"LbSnatAutoMap\",\n\t\t},\n\t\tMemberGroup: &loadbalancer.PoolMemberGroup{\n\t\t\tGroupingObject: &common.ResourceReference{\n\t\t\t\tTargetType: \"NSGroup\",\n\t\t\t\tTargetId: group.Id,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create load balancer pool %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tip, err := c.AllocateIP(opts.IPPool)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to allocate VIP %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tserver, resp, err := api.CreateLoadBalancerVirtualServer(ctx, loadbalancer.LbVirtualServer{\n\t\tId: opts.Name,\n\t\tEnabled: true,\n\t\tIpAddress: ip,\n\t\tIpProtocol: opts.Protocol,\n\t\tPorts: opts.Ports,\n\t\tPoolId: pool.Id,\n\t})\n\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create virtual server %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tlb := loadbalancer.LbService{\n\t\tDisplayName: opts.Name,\n\t\tAttachment: &common.ResourceReference{\n\t\t\tTargetType: \"LogicalRouter\",\n\t\t\tTargetId: t1.Id,\n\t\t},\n\t\tEnabled: true,\n\t\tErrorLogLevel: \"INFO\",\n\t\tSize: \"SMALL\",\n\t\tVirtualServerIds: []string{server.Id},\n\t}\n\n\t_, resp, err = api.CreateLoadBalancerService(c.api.Context, lb)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"unable to create load balancer %s: %s\", opts.Name, errorString(resp, err))\n\t}\n\n\tc.Infof(\"Created LoadBalancer service: %s/%s\", server.Id, ip)\n\treturn ip, false, nil\n}", "func (nat *NATCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, hosts []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tstatus := &v1.LoadBalancerStatus{}\n\n\t// step 0: ensure the nat gateway is exist\n\tnatProvider, err := nat.getNATClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnatGatewayId := service.ObjectMeta.Annotations[AnnotationsNATID]\n\tif natGatewayId == \"\" {\n\t\treturn nil, fmt.Errorf(\"The id of natGateway should be set by %v in annotations \", AnnotationsNATID)\n\t}\n\n\tnatGateway, err := natProvider.GetNATGateway(natGatewayId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif natGateway.RouterId != nat.cloudConfig.VpcOpts.ID {\n\t\treturn nil, fmt.Errorf(\"The natGateway is not in the same VPC with cluster. \")\n\t}\n\n\t//step 1:get floatingip id by floatingip address and check the floatingIp can be used\n\tdnatRuleList, err := listDnatRule(natProvider, natGatewayId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfloatingIp, err := nat.getFloatingIpInfoByIp(natProvider, service.Spec.LoadBalancerIP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallDnatRuleInFloatIP, err := listAllDnatRuleByFloatIP(natProvider, service.Spec.LoadBalancerIP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !nat.checkFloatingIp(allDnatRuleInFloatIP, floatingIp, natGatewayId) {\n\t\treturn nil, fmt.Errorf(\"The floating ip %v is binding to port,and its not DNAT rule in natGateway %s\", floatingIp.FloatingIpAddress, natGateway.Name)\n\t}\n\n\t//step 2: get podList (with labels/selectors of this service),then get the backend to create DNAT rule\n\tpodList, err := nat.getPods(service.Name, service.Namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar runningPod v1.Pod\n\tfor _, pod := range podList.Items {\n\t\tif podutil.IsPodReady(&pod) {\n\t\t\trunningPod = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(runningPod.Status.HostIP) == 0 {\n\t\treturn nil, fmt.Errorf(\"There is no availabel endpoint for the service %s\", service.Name)\n\t}\n\n\tsubnetId := nat.getSubnetIdForPod(runningPod, hosts)\n\tnetPort, err := nat.getPortByFixedIp(natProvider, subnetId, runningPod.Status.HostIP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar errs []error\n\t// step1: create dnat rule\n\tfor _, port := range service.Spec.Ports {\n\t\t//check if the DNAT rule has been created by the service,if exists continue\n\t\tif nat.getDNATRule(dnatRuleList, &port) != nil {\n\t\t\tklog.V(4).Infoln(\"DNAT rule already exists, no need to create\")\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.V(4).Infof(\"port:%v dnat rule not exist,start create dnat rule\", port)\n\n\t\terr := nat.ensureCreateDNATRule(natProvider, &port, netPort, floatingIp, natGatewayId)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"EnsureCreateDNATRule Failed: %v\", err))\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// get service with loadbalancer type and loadbalancer ip\n\tlbServers, _ := nat.kubeClient.Services(\"\").List(context.TODO(), metav1.ListOptions{})\n\tvar lbPorts []v1.ServicePort\n\tfor _, svc := range lbServers.Items {\n\t\tlbType := svc.Annotations[ElbClass]\n\t\tif lbType != \"dnat\" || svc.Spec.LoadBalancerIP != service.Spec.LoadBalancerIP {\n\t\t\tcontinue\n\t\t}\n\t\tklog.V(4).Infof(\"exist dnat svc:%v\", svc)\n\t\tlbPorts = append(lbPorts, svc.Spec.Ports...)\n\t}\n\n\tfor _, dnatRule := range dnatRuleList.DNATRules {\n\t\tif dnatRule.FloatingIpAddress != service.Spec.LoadBalancerIP {\n\t\t\tcontinue\n\t\t}\n\n\t\tif nat.getServicePort(&dnatRule, lbPorts) != nil {\n\t\t\tklog.V(4).Infoln(\"port exist,no need to delete\")\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.V(4).Infof(\"rule:%v port not exist,start delete dnat rule\", dnatRule)\n\n\t\terr := nat.ensureDeleteDNATRule(natProvider, &dnatRule, natGatewayId)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"EnsureDeleteDNATRule Failed: %v\", err))\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tif len(errs) != 0 {\n\t\treturn nil, utilerrors.NewAggregate(errs)\n\t}\n\tstatus.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: service.Spec.LoadBalancerIP})\n\treturn status, nil\n}", "func TestAddPool(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\ttwentyPool := mkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"})\n\t_, err := fixture.poolClient.Create(context.Background(), twentyPool, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (c *Client) WaitUntilLoadBalancerExists(ctx context.Context, input *DescribeLoadBalancersInput, opts ...aws.WaiterOption) error {\n\tw := aws.Waiter{\n\t\tName: \"WaitUntilLoadBalancerExists\",\n\t\tMaxAttempts: 40,\n\t\tDelay: aws.ConstantWaiterDelay(15 * time.Second),\n\t\tAcceptors: []aws.WaiterAcceptor{\n\t\t\t{\n\t\t\t\tState: aws.SuccessWaiterState,\n\t\t\t\tMatcher: aws.StatusWaiterMatch,\n\t\t\t\tExpected: 200,\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: aws.RetryWaiterState,\n\t\t\t\tMatcher: aws.ErrorWaiterMatch,\n\t\t\t\tExpected: \"LoadBalancerNotFound\",\n\t\t\t},\n\t\t},\n\t\tLogger: c.Config.Logger,\n\t\tNewRequest: func(opts []aws.Option) (*aws.Request, error) {\n\t\t\tvar inCpy *DescribeLoadBalancersInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq := c.DescribeLoadBalancersRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req.Request, nil\n\t\t},\n\t}\n\tw.ApplyOptions(opts...)\n\n\treturn w.Wait(ctx)\n}", "func (s *Service) Get(ctx context.Context, spec azure.Spec) (interface{}, error) {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn network.LoadBalancer{}, errors.New(\"invalid internal load balancer specification\")\n\t}\n\t//lbName := fmt.Sprintf(\"%s-api-internallb\", s.Scope.Cluster.Name)\n\tlb, err := s.Client.Get(ctx, s.Scope.ClusterConfig.ResourceGroup, internalLBSpec.Name, \"\")\n\tif err != nil && azure.ResourceNotFound(err) {\n\t\treturn nil, errors.Wrapf(err, \"load balancer %s not found\", internalLBSpec.Name)\n\t} else if err != nil {\n\t\treturn lb, err\n\t}\n\treturn lb, nil\n}", "func ExampleELB_CreateLoadBalancer_shared04() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tScheme: aws.String(\"internal\"),\n\t\tSecurityGroups: []*string{\n\t\t\taws.String(\"sg-a61988c3\"),\n\t\t},\n\t\tSubnets: []*string{\n\t\t\taws.String(\"subnet-15aaab61\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) AddNetscalerLoadBalancer(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddNetscalerLoadBalancer\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).AddNetscalerLoadBalancer), p)\n}", "func TestLoadBalancerPolicyRequestHashHeader(t *testing.T) {\n\trh, c, done := setup(t)\n\tdefer done()\n\n\ts1 := fixture.NewService(\"app\").WithPorts(\n\t\tv1.ServicePort{Port: 80, TargetPort: intstr.FromInt(8080)},\n\t\tv1.ServicePort{Port: 8080, TargetPort: intstr.FromInt(8080)})\n\trh.OnAdd(s1)\n\n\tproxy1 := fixture.NewProxy(\"simple\").\n\t\tWithFQDN(\"www.example.com\").\n\t\tWithSpec(contour_api_v1.HTTPProxySpec{\n\t\t\tRoutes: []contour_api_v1.Route{{\n\t\t\t\tConditions: matchconditions(prefixMatchCondition(\"/cart\")),\n\t\t\t\tLoadBalancerPolicy: &contour_api_v1.LoadBalancerPolicy{\n\t\t\t\t\tStrategy: \"RequestHash\",\n\t\t\t\t\tRequestHashPolicies: []contour_api_v1.RequestHashPolicy{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tTerminal: true,\n\t\t\t\t\t\t\tHeaderHashOptions: &contour_api_v1.HeaderHashOptions{\n\t\t\t\t\t\t\t\tHeaderName: \"X-Some-Header\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tHeaderHashOptions: &contour_api_v1.HeaderHashOptions{\n\t\t\t\t\t\t\t\tHeaderName: \"X-Some-Other-Header\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tServices: []contour_api_v1.Service{{\n\t\t\t\t\tName: s1.Name,\n\t\t\t\t\tPort: 80,\n\t\t\t\t}},\n\t\t\t}},\n\t\t})\n\trh.OnAdd(proxy1)\n\n\tc.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tResources: resources(t,\n\t\t\tDefaultCluster(&envoy_cluster_v3.Cluster{\n\t\t\t\tName: s1.Namespace + \"/\" + s1.Name + \"/80/1a2ffc1fef\",\n\t\t\t\tClusterDiscoveryType: envoy_v3.ClusterDiscoveryType(envoy_cluster_v3.Cluster_EDS),\n\t\t\t\tAltStatName: s1.Namespace + \"_\" + s1.Name + \"_80\",\n\t\t\t\tEdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{\n\t\t\t\t\tEdsConfig: envoy_v3.ConfigSource(\"contour\"),\n\t\t\t\t\tServiceName: s1.Namespace + \"/\" + s1.Name,\n\t\t\t\t},\n\t\t\t\tLbPolicy: envoy_cluster_v3.Cluster_RING_HASH,\n\t\t\t}),\n\t\t),\n\t\tTypeUrl: clusterType,\n\t})\n\n\tc.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tResources: resources(t,\n\t\t\tenvoy_v3.RouteConfiguration(\"ingress_http\",\n\t\t\t\tenvoy_v3.VirtualHost(\"www.example.com\",\n\t\t\t\t\t&envoy_route_v3.Route{\n\t\t\t\t\t\tMatch: routePrefix(\"/cart\"),\n\t\t\t\t\t\tAction: withRequestHashPolicySpecifiers(\n\t\t\t\t\t\t\trouteCluster(\"default/app/80/1a2ffc1fef\"),\n\t\t\t\t\t\t\thashPolicySpecifier{headerName: \"X-Some-Header\", terminal: true},\n\t\t\t\t\t\t\thashPolicySpecifier{headerName: \"X-Some-Other-Header\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t\tTypeUrl: routeType,\n\t})\n}", "func TestRemoveServiceLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"color\": \"blue\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive exactly zero ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Labels = map[string]string{\n\t\t\"color\": \"green\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) NewListNetscalerLoadBalancersParams() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NewListNetscalerLoadBalancersParams\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).NewListNetscalerLoadBalancersParams))\n}", "func TestRangesFull(t *testing.T) {\n\tinitDone := make(chan struct{})\n\t// A single /32 can't be used to allocate since we always reserve 2 IPs,\n\t// the network and broadcast address, which in the case of a /32 means it is always full.\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.123/32\", \"FF::123/128\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-a\" {\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t}\n\n\t\tif svc.Name != \"service-b\" {\n\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected two service updates\")\n\t}\n}", "func ExampleELB_CreateLoadBalancer_shared03() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tAvailabilityZones: []*string{\n\t\t\taws.String(\"us-west-2a\"),\n\t\t},\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(443),\n\t\t\t\tProtocol: aws.String(\"HTTPS\"),\n\t\t\t\tSSLCertificateId: aws.String(\"arn:aws:iam::123456789012:server-certificate/my-server-cert\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func HandleLoadbalancerGetSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"GET\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\n\t\tfmt.Fprintf(w, SingleLoadbalancerBody)\n\t})\n}", "func (c *MockLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) {\n\tvar l []network.LoadBalancer\n\tfor _, lb := range c.LBs {\n\t\tl = append(l, lb)\n\t}\n\treturn l, nil\n}", "func (l *SharedLoadBalancer) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tklog.Infof(\"EnsureLoadBalancer: called with service %s/%s, node: %d\",\n\t\tservice.Namespace, service.Name, len(nodes))\n\n\tif err := ensureLoadBalancerValidation(service, nodes); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get exits or create a new ELB instance\n\tloadbalancer, err := l.getLoadBalancerInstance(ctx, clusterName, service)\n\tspecifiedID := getStringFromSvsAnnotation(service, ElbID, \"\")\n\tif common.IsNotFound(err) && specifiedID != \"\" {\n\t\treturn nil, err\n\t}\n\tif err != nil && common.IsNotFound(err) {\n\t\tsubnetID, e := l.getSubnetID(service, nodes[0])\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tloadbalancer, err = l.createLoadbalancer(clusterName, subnetID, service)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// query ELB listeners list\n\tlisteners, err := l.sharedELBClient.ListListeners(&elbmodel.ListListenersRequest{LoadbalancerId: &loadbalancer.Id})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, port := range service.Spec.Ports {\n\t\tlistener := l.filterListenerByPort(listeners, service, port)\n\t\t// add or update listener\n\t\tif listener == nil {\n\t\t\tlistener, err = l.createListener(loadbalancer.Id, service, port)\n\t\t} else {\n\t\t\terr = l.updateListener(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlisteners = popListener(listeners, listener.Id)\n\n\t\t// query pool or create pool\n\t\tpool, err := l.getPool(loadbalancer.Id, listener.Id)\n\t\tif err != nil && common.IsNotFound(err) {\n\t\t\tpool, err = l.createPool(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add new members and remove the obsolete members.\n\t\tif err = l.addOrRemoveMembers(loadbalancer, service, pool, port, nodes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add or remove health monitor\n\t\tif err = l.ensureHealthCheck(loadbalancer.Id, pool, port, service, nodes[0]); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif specifiedID == \"\" {\n\t\t// All remaining listeners are obsolete, delete them\n\t\terr = l.deleteListeners(loadbalancer.Id, listeners)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tingressIP := loadbalancer.VipAddress\n\tpublicIPAddr, err := l.createOrAssociateEIP(loadbalancer, service)\n\tif err == nil {\n\t\tif publicIPAddr != \"\" {\n\t\t\tingressIP = publicIPAddr\n\t\t}\n\n\t\treturn &corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{{IP: ingressIP}},\n\t\t}, nil\n\t}\n\n\t// rollback\n\tklog.Errorf(\"rollback:failed to create the EIP, delete ELB instance created, error: %s\", err)\n\terrs := []error{err}\n\terr = l.EnsureLoadBalancerDeleted(ctx, clusterName, service)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\tklog.Errorf(\"rollback: error deleting ELB instance: %s\", err)\n\t}\n\treturn nil, errors.NewAggregate(errs)\n}", "func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tlb, err := l.fetchLoadBalancer(ctx, clusterName, service)\n\tswitch err {\n\tcase nil:\n\t\t// continue\n\tcase LoadBalancerNotFound:\n\t\t// create LoadBalancer\n\t\tlb, err = l.createLoadBalancer(ctx, clusterName, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\t// any kind of Error\n\t\tklog.Errorf(\"error getting loadbalancer for service %s: %v\", service.Name, err)\n\t\treturn nil, err\n\t}\n\n\tif service.Spec.LoadBalancerIP != \"\" && service.Spec.LoadBalancerIP != lb.IP[0].IPAddress {\n\t\terr = l.deleteLoadBalancer(ctx, lb, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlb, err = l.createLoadBalancer(ctx, clusterName, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif lb.Status != scwlb.LbStatusReady {\n\t\treturn nil, LoadBalancerNotReady\n\t}\n\n\terr = l.updateLoadBalancer(ctx, lb, service, nodes)\n\tif err != nil {\n\t\tklog.Errorf(\"error updating loadbalancer for service %s: %v\", service.Name, err)\n\t\treturn nil, err\n\t}\n\n\tstatus := &v1.LoadBalancerStatus{}\n\tstatus.Ingress = make([]v1.LoadBalancerIngress, len(lb.IP))\n\tfor idx, ip := range lb.IP {\n\t\tif getUseHostname(service) {\n\t\t\tstatus.Ingress[idx].Hostname = ip.Reverse\n\t\t} else {\n\t\t\tstatus.Ingress[idx].IP = ip.IPAddress\n\t\t}\n\t}\n\n\treturn status, nil\n}", "func (s) TestBalancer_TwoAddresses_OOBThenPerCall(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\tsrv1 := startServer(t, reportBoth)\n\tsrv2 := startServer(t, reportBoth)\n\n\t// srv1 starts loaded and srv2 starts without load; ensure RPCs are routed\n\t// disproportionately to srv2 (10:1).\n\tsrv1.oobMetrics.SetQPS(10.0)\n\tsrv1.oobMetrics.SetApplicationUtilization(1.0)\n\n\tsrv2.oobMetrics.SetQPS(10.0)\n\tsrv2.oobMetrics.SetApplicationUtilization(.1)\n\n\t// For per-call metrics (not used initially), srv2 reports that it is\n\t// loaded and srv1 reports low load. After confirming OOB works, switch to\n\t// per-call and confirm the new routing weights are applied.\n\tsrv1.callMetrics.SetQPS(10.0)\n\tsrv1.callMetrics.SetApplicationUtilization(.1)\n\n\tsrv2.callMetrics.SetQPS(10.0)\n\tsrv2.callMetrics.SetApplicationUtilization(1.0)\n\n\tsc := svcConfig(t, oobConfig)\n\tif err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil {\n\t\tt.Fatalf(\"Error starting client: %v\", err)\n\t}\n\taddrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}}\n\tsrv1.R.UpdateState(resolver.State{Addresses: addrs})\n\n\t// Call each backend once to ensure the weights have been received.\n\tensureReached(ctx, t, srv1.Client, 2)\n\n\t// Wait for the weight update period to allow the new weights to be processed.\n\ttime.Sleep(weightUpdatePeriod)\n\tcheckWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10})\n\n\t// Update to per-call weights.\n\tc := svcConfig(t, perCallConfig)\n\tparsedCfg := srv1.R.CC.ParseServiceConfig(c)\n\tif parsedCfg.Err != nil {\n\t\tpanic(fmt.Sprintf(\"Error parsing config %q: %v\", c, parsedCfg.Err))\n\t}\n\tsrv1.R.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parsedCfg})\n\n\t// Wait for the weight update period to allow the new weights to be processed.\n\ttime.Sleep(weightUpdatePeriod)\n\tcheckWeights(ctx, t, srvWeight{srv1, 10}, srvWeight{srv2, 1})\n}", "func TestDisablePool(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\tpoolA.Spec.Disabled = true\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].externallyDisabled {\n\t\tt.Fatal(\"The range has not been externally disabled\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected service status update to occur on service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolA.Spec.Disabled = false\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestAllocOnInit(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.123\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.124\",\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.124\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n}", "func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {\n\treturn hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)\n}", "func waitForIngressLB(ctx context.Context, cl client.Client, timeout time.Duration, ns, name string) (string, error) {\n\tnsName := types.NamespacedName{\n\t\tNamespace: ns,\n\t\tName: name,\n\t}\n\ting := &networkingv1.Ingress{}\n\terr := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := cl.Get(ctx, nsName, ing); err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif ing.Status.LoadBalancer.Ingress == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"timed out waiting for ingress %s/%s load-balancer: %v\", ns, name, err)\n\t}\n\n\tswitch {\n\tcase len(ing.Status.LoadBalancer.Ingress[0].Hostname) > 0:\n\t\treturn ing.Status.LoadBalancer.Ingress[0].Hostname, nil\n\tcase len(ing.Status.LoadBalancer.Ingress[0].IP) > 0:\n\t\treturn ing.Status.LoadBalancer.Ingress[0].IP, nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to determine ingress %s/%s load-balancer: %v\", ns, name, err)\n}", "func (az AzureClient) GetAllLoadBalancer() (network.LoadBalancerListResultPage, error) {\n\tlbClient := GetLbClient(Client.config)\n\tctx, cancel := context.WithTimeout(context.Background(), 6000*time.Second)\n\tdefer cancel()\n\treturn lbClient.List(ctx, Client.config.ResourceGroup)\n}", "func (o *CreateLoadBalancerRequest) GetSubnetsOk() (*[]string, bool) {\n\tif o == nil || o.Subnets == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Subnets, true\n}", "func (_m *ELBv2APIClient) DescribeLoadBalancers(_a0 context.Context, _a1 *elasticloadbalancingv2.DescribeLoadBalancersInput, _a2 ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error) {\n\t_va := make([]interface{}, len(_a2))\n\tfor _i := range _a2 {\n\t\t_va[_i] = _a2[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 *elasticloadbalancingv2.DescribeLoadBalancersOutput\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error)); ok {\n\t\treturn rf(_a0, _a1, _a2...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) *elasticloadbalancingv2.DescribeLoadBalancersOutput); ok {\n\t\tr0 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*elasticloadbalancingv2.DescribeLoadBalancersOutput)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *elasticloadbalancingv2.DescribeLoadBalancersInput, ...func(*elasticloadbalancingv2.Options)) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {\n\tlb, err := l.fetchLoadBalancer(ctx, clusterName, service)\n\tif err != nil {\n\t\tif err == LoadBalancerNotFound {\n\t\t\tklog.Infof(\"no load balancer found for service %s\", service.Name)\n\t\t\treturn nil, false, nil\n\t\t}\n\n\t\tklog.Errorf(\"error getting load balancer for service %s: %v\", service.Name, err)\n\t\treturn nil, false, err\n\t}\n\n\tstatus := &v1.LoadBalancerStatus{}\n\tstatus.Ingress = make([]v1.LoadBalancerIngress, len(lb.IP))\n\tfor idx, ip := range lb.IP {\n\t\tif getUseHostname(service) {\n\t\t\tstatus.Ingress[idx].Hostname = ip.Reverse\n\t\t} else {\n\t\t\tstatus.Ingress[idx].IP = ip.IPAddress\n\t\t}\n\t}\n\n\treturn status, true, nil\n}", "func ExampleELB_CreateLoadBalancer_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tSecurityGroups: []*string{\n\t\t\taws.String(\"sg-a61988c3\"),\n\t\t},\n\t\tSubnets: []*string{\n\t\t\taws.String(\"subnet-15aaab61\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (o *DeregisterVmsInLoadBalancerRequest) GetLoadBalancerNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.LoadBalancerName, true\n}", "func ExampleELB_CreateLoadBalancer_shared01() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tAvailabilityZones: []*string{\n\t\t\taws.String(\"us-west-2a\"),\n\t\t},\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (o *CreateLoadBalancerRequest) GetTagsOk() (*[]ResourceTag, bool) {\n\tif o == nil || o.Tags == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Tags, true\n}", "func (_m *Client) GetLoadBalancers(ctx context.Context, rgname string, logger log.Logger) ([]network.LoadBalancer, error) {\n\tret := _m.Called(ctx, rgname, logger)\n\n\tvar r0 []network.LoadBalancer\n\tif rf, ok := ret.Get(0).(func(context.Context, string, log.Logger) []network.LoadBalancer); ok {\n\t\tr0 = rf(ctx, rgname, logger)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]network.LoadBalancer)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string, log.Logger) error); ok {\n\t\tr1 = rf(ctx, rgname, logger)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (s) TestBalancer_TwoAddresses_ReportingDisabled(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\tsrv1 := startServer(t, reportNone)\n\tsrv2 := startServer(t, reportNone)\n\n\tsc := svcConfig(t, perCallConfig)\n\tif err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil {\n\t\tt.Fatalf(\"Error starting client: %v\", err)\n\t}\n\taddrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}}\n\tsrv1.R.UpdateState(resolver.State{Addresses: addrs})\n\n\t// Perform many RPCs to ensure the LB policy works with 2 addresses.\n\tfor i := 0; i < 20; i++ {\n\t\troundrobin.CheckRoundRobinRPCs(ctx, srv1.Client, addrs)\n\t}\n}", "func TestRandomLBWhenNodeFailBalanced(t *testing.T) {\n\tdefer func() {\n\t\t// clear healthStore\n\t\thealthStore = sync.Map{}\n\t}()\n\n\tpool := makePool(4)\n\tvar hosts []types.Host\n\tvar unhealthyIdx = 2\n\tfor i := 0; i < 4; i++ {\n\t\thost := &mockHost{\n\t\t\taddr: pool.Get(),\n\t\t}\n\t\tif i == unhealthyIdx {\n\t\t\thost.SetHealthFlag(api.FAILED_ACTIVE_HC)\n\t\t}\n\t\thosts = append(hosts, host)\n\t}\n\n\ths := &hostSet{}\n\ths.setFinalHost(hosts)\n\tlb := newRandomLoadBalancer(nil, hs)\n\ttotal := 1000000\n\trunCase := func(subTotal int) {\n\t\tresults := map[string]int{}\n\t\tfor i := 0; i < subTotal; i++ {\n\t\t\th := lb.ChooseHost(nil)\n\t\t\tv, ok := results[h.AddressString()]\n\t\t\tif !ok {\n\t\t\t\tv = 0\n\t\t\t}\n\t\t\tresults[h.AddressString()] = v + 1\n\t\t}\n\t\tfor i := 0; i < 4; i++ {\n\t\t\taddr := hosts[i].AddressString()\n\t\t\trate := float64(results[addr]) / float64(subTotal)\n\t\t\texpected := 0.33333\n\t\t\tif i == unhealthyIdx {\n\t\t\t\texpected = 0.000\n\t\t\t}\n\t\t\tif math.Abs(rate-expected) > 0.1 { // no lock, have deviation 10% is acceptable\n\t\t\t\tt.Errorf(\"%s request rate is %f, expected %f\", addr, rate, expected)\n\t\t\t}\n\t\t\tt.Logf(\"%s request rate is %f, request count: %d\", addr, rate, results[addr])\n\t\t}\n\t}\n\t// simple test\n\trunCase(total)\n\t// concurr\n\twg := sync.WaitGroup{}\n\tsubTotal := total / 10\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\trunCase(subTotal)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}", "func (s *Service) CreateOrUpdate(ctx context.Context, spec azure.Spec) error {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid internal load balancer specification\")\n\t}\n\tklog.V(2).Infof(\"creating internal load balancer %s\", internalLBSpec.Name)\n\tprobeName := \"tcpHTTPSProbe\"\n\tfrontEndIPConfigName := \"controlplane-internal-lbFrontEnd\"\n\tbackEndAddressPoolName := \"controlplane-internal-backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", s.Scope.SubscriptionID, s.Scope.ClusterConfig.ResourceGroup)\n\tlbName := internalLBSpec.Name\n\n\tklog.V(2).Infof(\"getting subnet %s\", internalLBSpec.SubnetName)\n\tsubnetInterface, err := subnets.NewService(s.Scope).Get(ctx, &subnets.Spec{Name: internalLBSpec.SubnetName, VnetName: internalLBSpec.VnetName})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubnet, ok := subnetInterface.(network.Subnet)\n\tif !ok {\n\t\treturn errors.New(\"subnet Get returned invalid interface\")\n\t}\n\tklog.V(2).Infof(\"successfully got subnet %s\", internalLBSpec.SubnetName)\n\n\tfuture, err := s.Client.CreateOrUpdate(ctx,\n\t\ts.Scope.ClusterConfig.ResourceGroup,\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tSku: &network.LoadBalancerSku{Name: network.LoadBalancerSkuNameStandard},\n\t\t\tLocation: to.StringPtr(s.Scope.ClusterConfig.Location),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Static,\n\t\t\t\t\t\t\tSubnet: &subnet,\n\t\t\t\t\t\t\tPrivateIPAddress: to.StringPtr(internalLBSpec.IPAddress),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolTCP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"LBRuleHTTPS\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.LoadDistributionDefault,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create load balancer\")\n\t}\n\n\terr = future.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot get internal load balancer create or update future response\")\n\t}\n\n\t_, err = future.Result(s.Client)\n\tklog.V(2).Infof(\"successfully created internal load balancer %s\", internalLBSpec.Name)\n\treturn err\n}", "func ExampleELB_CreateLoadBalancer_shared02() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(443),\n\t\t\t\tProtocol: aws.String(\"HTTPS\"),\n\t\t\t\tSSLCertificateId: aws.String(\"arn:aws:iam::123456789012:server-certificate/my-server-cert\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tSecurityGroups: []*string{\n\t\t\taws.String(\"sg-a61988c3\"),\n\t\t},\n\t\tSubnets: []*string{\n\t\t\taws.String(\"subnet-15aaab61\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateLoadBalancer(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeDuplicateAccessPointNameException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateAccessPointNameException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyAccessPointsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyAccessPointsException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeSubnetNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeSubnetNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSubnetException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSubnetException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSecurityGroupException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSecurityGroupException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidSchemeException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidSchemeException, aerr.Error())\n\t\t\tcase elb.ErrCodeTooManyTagsException:\n\t\t\t\tfmt.Println(elb.ErrCodeTooManyTagsException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateTagKeysException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tcase elb.ErrCodeOperationNotPermittedException:\n\t\t\t\tfmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) GetLoadBalancerID(name interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{name}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetLoadBalancerID\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).GetLoadBalancerID), varargs...)\n}", "func CleanupKubernetesLoadbalancers(ctx context.Context, log logr.Logger, client openstackclient.Loadbalancing, subnetID, clusterName string) error {\n\tlbList, err := client.ListLoadbalancers(loadbalancers.ListOpts{\n\t\tVipSubnetID: subnetID,\n\t})\n\n\t// do we need that if we anyway want to delete the gardener managed subnet ?\n\tk8sSvcPrefix := servicePrefix + clusterName\n\tres := make(chan error, len(lbList))\n\tacceptableStates := map[string]struct{}{\n\t\t\"ACTIVE\": {},\n\t\t\"ERROR\": {},\n\t}\n\tw := sync.WaitGroup{}\n\tb := wait.Backoff{\n\t\tDuration: 1 * time.Second,\n\t\tJitter: 1.2,\n\t\tSteps: 10,\n\t}\n\tfor _, lb := range lbList {\n\t\tlb := lb\n\t\tif !strings.HasPrefix(lb.Name, k8sSvcPrefix) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := acceptableStates[lb.ProvisioningStatus]; !ok {\n\t\t\treturn fmt.Errorf(\"load balancer %s can't be updated currently due to provisioning state: %s\", lb.ID, lb.ProvisioningStatus)\n\t\t}\n\n\t\tlog.Info(\"deleting orphan loadbalancer\", \"ID\", lb.ID, \"name\", lb.Name)\n\t\tw.Add(1)\n\t\tgo func() {\n\t\t\tdefer w.Done()\n\t\t\tif err := client.DeleteLoadbalancer(lb.ID, loadbalancers.DeleteOpts{Cascade: true}); err != nil {\n\t\t\t\tres <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr := wait.ExponentialBackoffWithContext(ctx, b, func() (done bool, err error) {\n\t\t\t\tlb, err := client.GetLoadbalancer(lb.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tif lb == nil {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t\treturn false, nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tres <- fmt.Errorf(\"failed to ensure loadbalancers are deleted: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\tw.Wait()\n\tclose(res)\n\tfor errIn := range res {\n\t\terr = errors.Join(err, errIn)\n\t}\n\treturn err\n}", "func NewLoadBalancer() *LoadBalancer {\n\tlb := &LoadBalancer{\n\t\tnodes: make(map[string]*weightedNode),\n\t}\n\treturn lb\n}", "func TestAddRange(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestMinimalGCEInternalLoadBalancer(t *testing.T) {\n\tnewIntegrationTest(\"minimal-gce-ilb.example.com\", \"minimal_gce_ilb\").\n\t\twithAddons(\n\t\t\tdnsControllerAddon,\n\t\t\tgcpCCMAddon,\n\t\t\tgcpPDCSIAddon,\n\t\t).\n\t\trunTestTerraformGCE(t)\n}", "func DeleteLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, lbID string) {\n\tt.Logf(\"Attempting to delete loadbalancer %s\", lbID)\n\n\tif err := loadbalancers.Delete(client, lbID).ExtractErr(); err != nil {\n\t\tif _, ok := err.(gophercloud.ErrDefault404); !ok {\n\t\t\tt.Fatalf(\"Unable to delete loadbalancer: %v\", err)\n\t\t}\n\t}\n\n\tt.Logf(\"Waiting for loadbalancer %s to delete\", lbID)\n\n\tif err := WaitForLoadBalancerState(client, lbID, \"DELETED\"); err != nil {\n\t\tt.Fatalf(\"Loadbalancer did not delete in time: %s\", err)\n\t}\n\n\tt.Logf(\"Successfully deleted loadbalancer %s\", lbID)\n}", "func (c *AzureModelContext) NameForLoadBalancer() string {\n\treturn \"api-\" + c.ClusterName()\n}", "func (m *MockLoadBalancerServiceIface) NewListNetscalerLoadBalancersParams() *ListNetscalerLoadBalancersParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListNetscalerLoadBalancersParams\")\n\tret0, _ := ret[0].(*ListNetscalerLoadBalancersParams)\n\treturn ret0\n}", "func (o GetLoadBalancersBalancerOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v GetLoadBalancersBalancer) map[string]interface{} { return v.Tags }).(pulumi.MapOutput)\n}", "func (s) TestOutlierDetection(t *testing.T) {\n\tedsLBCh := testutils.NewChannel()\n\txdsC, cleanup := setup(edsLBCh)\n\tdefer cleanup()\n\tbuilder := balancer.Get(Name)\n\tedsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{})\n\tif edsB == nil {\n\t\tt.Fatalf(\"builder.Build(%s) failed and returned nil\", Name)\n\t}\n\tdefer edsB.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\t// Update Cluster Resolver with Client Conn State with Outlier Detection\n\t// configuration present. This is what will be passed down to this balancer,\n\t// as CDS Balancer gets the Cluster Update and converts the Outlier\n\t// Detection data to an Outlier Detection configuration and sends it to this\n\t// level.\n\tif err := edsB.UpdateClientConnState(balancer.ClientConnState{\n\t\tResolverState: xdsclient.SetClient(resolver.State{}, xdsC),\n\t\tBalancerConfig: newLBConfigWithOneEDSAndOutlierDetection(testEDSServcie, noopODCfg),\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := xdsC.WaitForWatchEDS(ctx); err != nil {\n\t\tt.Fatalf(\"xdsClient.WatchEndpoints failed with error: %v\", err)\n\t}\n\n\t// Invoke EDS Callback - causes child balancer to be built and then\n\t// UpdateClientConnState called on it with Outlier Detection as a direct\n\t// child.\n\txdsC.InvokeWatchEDSCallback(\"\", defaultEndpointsUpdate, nil)\n\tedsLB, err := waitForNewChildLB(ctx, edsLBCh)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlocalityID := xdsinternal.LocalityID{Zone: \"zone\"}\n\t// The priority configuration generated should have Outlier Detection as a\n\t// direct child due to Outlier Detection being turned on.\n\tpCfgWant := &priority.LBConfig{\n\t\tChildren: map[string]*priority.Child{\n\t\t\t\"priority-0-0\": {\n\t\t\t\tConfig: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\tName: outlierdetection.Name,\n\t\t\t\t\tConfig: &outlierdetection.LBConfig{\n\t\t\t\t\t\tInterval: 1<<63 - 1,\n\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\t\t\tName: clusterimpl.Name,\n\t\t\t\t\t\t\tConfig: &clusterimpl.LBConfig{\n\t\t\t\t\t\t\t\tCluster: testClusterName,\n\t\t\t\t\t\t\t\tEDSServiceName: \"test-eds-service-name\",\n\t\t\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\t\t\t\t\tName: weightedtarget.Name,\n\t\t\t\t\t\t\t\t\tConfig: &weightedtarget.LBConfig{\n\t\t\t\t\t\t\t\t\t\tTargets: map[string]weightedtarget.Target{\n\t\t\t\t\t\t\t\t\t\t\tassertString(localityID.ToString): {\n\t\t\t\t\t\t\t\t\t\t\t\tWeight: 100,\n\t\t\t\t\t\t\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tIgnoreReresolutionRequests: true,\n\t\t\t},\n\t\t},\n\t\tPriorities: []string{\"priority-0-0\"},\n\t}\n\n\tif err := edsLB.waitForClientConnStateChangeVerifyBalancerConfig(ctx, balancer.ClientConnState{\n\t\tBalancerConfig: pCfgWant,\n\t}); err != nil {\n\t\tt.Fatalf(\"EDS impl got unexpected update: %v\", err)\n\t}\n}", "func HandleLoadbalancerListSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"GET\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tr.ParseForm()\n\t\tmarker := r.Form.Get(\"marker\")\n\t\tswitch marker {\n\t\tcase \"\":\n\t\t\tfmt.Fprintf(w, LoadbalancersListBody)\n\t\tcase \"45e08a3e-a78f-4b40-a229-1e7e23eee1ab\":\n\t\t\tfmt.Fprintf(w, `{ \"loadbalancers\": [] }`)\n\t\tdefault:\n\t\t\tt.Fatalf(\"/v2.0/lbaas/loadbalancers invoked with unexpected marker=[%s]\", marker)\n\t\t}\n\t})\n}", "func TestServiceDelete(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tvar svcIP string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tsvcIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been allocated\")\n\t}\n\n\terr := fixture.svcClient.Services(\"default\").Delete(context.Background(), \"service-a\", meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been released\")\n\t}\n}", "func (s) TestBalancer_TwoAddresses_ErrorPenalty(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\tsrv1 := startServer(t, reportOOB)\n\tsrv2 := startServer(t, reportOOB)\n\n\t// srv1 starts loaded and srv2 starts without load; ensure RPCs are routed\n\t// disproportionately to srv2 (10:1). EPS values are set (but ignored\n\t// initially due to ErrorUtilizationPenalty=0). Later EUP will be updated\n\t// to 0.9 which will cause the weights to be equal and RPCs to be routed\n\t// 50/50.\n\tsrv1.oobMetrics.SetQPS(10.0)\n\tsrv1.oobMetrics.SetApplicationUtilization(1.0)\n\tsrv1.oobMetrics.SetEPS(0)\n\t// srv1 weight before: 10.0 / 1.0 = 10.0\n\t// srv1 weight after: 10.0 / 1.0 = 10.0\n\n\tsrv2.oobMetrics.SetQPS(10.0)\n\tsrv2.oobMetrics.SetApplicationUtilization(.1)\n\tsrv2.oobMetrics.SetEPS(10.0)\n\t// srv2 weight before: 10.0 / 0.1 = 100.0\n\t// srv2 weight after: 10.0 / 1.0 = 10.0\n\n\tsc := svcConfig(t, oobConfig)\n\tif err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil {\n\t\tt.Fatalf(\"Error starting client: %v\", err)\n\t}\n\taddrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}}\n\tsrv1.R.UpdateState(resolver.State{Addresses: addrs})\n\n\t// Call each backend once to ensure the weights have been received.\n\tensureReached(ctx, t, srv1.Client, 2)\n\n\t// Wait for the weight update period to allow the new weights to be processed.\n\ttime.Sleep(weightUpdatePeriod)\n\tcheckWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10})\n\n\t// Update to include an error penalty in the weights.\n\tnewCfg := oobConfig\n\tnewCfg.ErrorUtilizationPenalty = float64p(0.9)\n\tc := svcConfig(t, newCfg)\n\tparsedCfg := srv1.R.CC.ParseServiceConfig(c)\n\tif parsedCfg.Err != nil {\n\t\tpanic(fmt.Sprintf(\"Error parsing config %q: %v\", c, parsedCfg.Err))\n\t}\n\tsrv1.R.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parsedCfg})\n\n\t// Wait for the weight update period to allow the new weights to be processed.\n\ttime.Sleep(weightUpdatePeriod + oobReportingInterval)\n\tcheckWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1})\n}", "func TestDeleteAllBackendServers(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\t// bs is nil\n\terr = cloud.deleteAllBackendServers(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"deleteAllBackendServers err, err : %v\", err)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\terr = cloud.deleteAllBackendServers(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"deleteAllBackendServers err, err : %v\", err)\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}", "func printEndpointLabels(lbls *labels.OpLabels) {\n\tlog.WithField(logfields.Labels, logfields.Repr(*lbls)).Debug(\"All Labels\")\n\tw := tabwriter.NewWriter(os.Stdout, 2, 0, 3, ' ', 0)\n\n\tfor _, v := range lbls.IdentityLabels() {\n\t\tfmt.Fprintf(w, \"%s\\t%s\\n\", v, \"Enabled\")\n\t}\n\n\tfor _, v := range lbls.Disabled {\n\t\tfmt.Fprintf(w, \"%s\\t%s\\n\", v, \"Disabled\")\n\t}\n\tw.Flush()\n}", "func (mr *MockClientMockRecorder) GetRepoLabels(org, repo interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetRepoLabels\", reflect.TypeOf((*MockClient)(nil).GetRepoLabels), org, repo)\n}", "func TestPoolSelectorNamespace(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tselector := slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"io.kubernetes.service.namespace\": \"tenant-one\",\n\t\t},\n\t}\n\tpoolA.Spec.ServiceSelector = &selector\n\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"red-service\" {\n\t\t\tt.Error(\"Expected update from 'red-service'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tmatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"red-service\",\n\t\t\tNamespace: \"tenant-one\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"tenant-one\").Create(context.Background(), matchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"blue-service\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to not receive any ingress IPs\")\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tnonMatchingService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"blue-service\",\n\t\t\tNamespace: \"tenant-two\",\n\t\t\tUID: serviceBUID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t// Setting the same label in an attempt to escalate privileges doesn't work\n\t\t\t\t\"io.kubernetes.service.namespace\": \"tenant-one\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tIPFamilyPolicy: &policy,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"tenant-two\").Create(context.Background(), nonMatchingService, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {\n\treturn nil, false\n}", "func getGatewayLoadBalancers(gatewayRouter string) (string, string, string, error) {\n\tlbTCP, stderr, err := util.RunOVNNbctl(\"--data=bare\", \"--no-heading\",\n\t\t\"--columns=_uuid\", \"find\", \"load_balancer\",\n\t\t\"external_ids:TCP_lb_gateway_router=\"+gatewayRouter)\n\tif err != nil {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(\"failed to get gateway router %q TCP \"+\n\t\t\t\"load balancer, stderr: %q, error: %v\", gatewayRouter, stderr, err)\n\t}\n\n\tlbUDP, stderr, err := util.RunOVNNbctl(\"--data=bare\", \"--no-heading\",\n\t\t\"--columns=_uuid\", \"find\", \"load_balancer\",\n\t\t\"external_ids:UDP_lb_gateway_router=\"+gatewayRouter)\n\tif err != nil {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(\"failed to get gateway router %q UDP \"+\n\t\t\t\"load balancer, stderr: %q, error: %v\", gatewayRouter, stderr, err)\n\t}\n\n\tlbSCTP, stderr, err := util.RunOVNNbctl(\"--data=bare\", \"--no-heading\",\n\t\t\"--columns=_uuid\", \"find\", \"load_balancer\",\n\t\t\"external_ids:SCTP_lb_gateway_router=\"+gatewayRouter)\n\tif err != nil {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(\"failed to get gateway router %q SCTP \"+\n\t\t\t\"load balancer, stderr: %q, error: %v\", gatewayRouter, stderr, err)\n\t}\n\treturn lbTCP, lbUDP, lbSCTP, nil\n}", "func desiredLoadBalancerService(ci *operatorv1.IngressController, deploymentRef metav1.OwnerReference, platform *configv1.PlatformStatus) (bool, *corev1.Service, error) {\n\tif ci.Status.EndpointPublishingStrategy.Type != operatorv1.LoadBalancerServiceStrategyType {\n\t\treturn false, nil, nil\n\t}\n\tservice := manifests.LoadBalancerService()\n\n\tname := controller.LoadBalancerServiceName(ci)\n\n\tservice.Namespace = name.Namespace\n\tservice.Name = name.Name\n\n\tif service.Labels == nil {\n\t\tservice.Labels = map[string]string{}\n\t}\n\tservice.Labels[\"router\"] = name.Name\n\tservice.Labels[manifests.OwningIngressControllerLabel] = ci.Name\n\n\tservice.Spec.Selector = controller.IngressControllerDeploymentPodSelector(ci).MatchLabels\n\n\tlb := ci.Status.EndpointPublishingStrategy.LoadBalancer\n\tisInternal := lb != nil && lb.Scope == operatorv1.InternalLoadBalancer\n\n\tif service.Annotations == nil {\n\t\tservice.Annotations = map[string]string{}\n\t}\n\n\tproxyNeeded, err := IsProxyProtocolNeeded(ci, platform)\n\tif err != nil {\n\t\treturn false, nil, fmt.Errorf(\"failed to determine if proxy protocol is proxyNeeded for ingresscontroller %q: %v\", ci.Name, err)\n\t}\n\n\tif platform != nil {\n\t\tif isInternal {\n\t\t\tannotation := InternalLBAnnotations[platform.Type]\n\t\t\tfor name, value := range annotation {\n\t\t\t\tservice.Annotations[name] = value\n\t\t\t}\n\n\t\t\t// Set the GCP Global Access annotation for internal load balancers on GCP only\n\t\t\tif platform.Type == configv1.GCPPlatformType {\n\t\t\t\tif lb != nil && lb.ProviderParameters != nil &&\n\t\t\t\t\tlb.ProviderParameters.Type == operatorv1.GCPLoadBalancerProvider &&\n\t\t\t\t\tlb.ProviderParameters.GCP != nil {\n\t\t\t\t\tglobalAccessEnabled := lb.ProviderParameters.GCP.ClientAccess == operatorv1.GCPGlobalAccess\n\t\t\t\t\tservice.Annotations[GCPGlobalAccessAnnotation] = strconv.FormatBool(globalAccessEnabled)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tannotation := externalLBAnnotations[platform.Type]\n\t\t\tfor name, value := range annotation {\n\t\t\t\tservice.Annotations[name] = value\n\t\t\t}\n\t\t}\n\t\tswitch platform.Type {\n\t\tcase configv1.AWSPlatformType:\n\t\t\tservice.Annotations[awsLBHealthCheckIntervalAnnotation] = awsLBHealthCheckIntervalDefault\n\t\t\tif proxyNeeded {\n\t\t\t\tservice.Annotations[awsLBProxyProtocolAnnotation] = \"*\"\n\t\t\t}\n\t\t\tif lb != nil && lb.ProviderParameters != nil {\n\t\t\t\tif aws := lb.ProviderParameters.AWS; aws != nil && lb.ProviderParameters.Type == operatorv1.AWSLoadBalancerProvider {\n\t\t\t\t\tswitch aws.Type {\n\t\t\t\t\tcase operatorv1.AWSNetworkLoadBalancer:\n\t\t\t\t\t\tservice.Annotations[AWSLBTypeAnnotation] = AWSNLBAnnotation\n\t\t\t\t\t\t// NLBs require a different health check interval than CLBs.\n\t\t\t\t\t\t// See <https://bugzilla.redhat.com/show_bug.cgi?id=1908758>.\n\t\t\t\t\t\tservice.Annotations[awsLBHealthCheckIntervalAnnotation] = awsLBHealthCheckIntervalNLB\n\t\t\t\t\tcase operatorv1.AWSClassicLoadBalancer:\n\t\t\t\t\t\tif aws.ClassicLoadBalancerParameters != nil {\n\t\t\t\t\t\t\tif v := aws.ClassicLoadBalancerParameters.ConnectionIdleTimeout; v.Duration > 0 {\n\t\t\t\t\t\t\t\tservice.Annotations[awsELBConnectionIdleTimeoutAnnotation] = strconv.FormatUint(uint64(v.Round(time.Second).Seconds()), 10)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif platform.AWS != nil && len(platform.AWS.ResourceTags) > 0 {\n\t\t\t\tvar additionalTags []string\n\t\t\t\tfor _, userTag := range platform.AWS.ResourceTags {\n\t\t\t\t\tif len(userTag.Key) > 0 {\n\t\t\t\t\t\tadditionalTags = append(additionalTags, userTag.Key+\"=\"+userTag.Value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(additionalTags) > 0 {\n\t\t\t\t\tservice.Annotations[awsLBAdditionalResourceTags] = strings.Join(additionalTags, \",\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Set the load balancer for AWS to be as aggressive as Azure (2 fail @ 5s interval, 2 healthy)\n\t\t\tservice.Annotations[awsLBHealthCheckTimeoutAnnotation] = awsLBHealthCheckTimeoutDefault\n\t\t\tservice.Annotations[awsLBHealthCheckUnhealthyThresholdAnnotation] = awsLBHealthCheckUnhealthyThresholdDefault\n\t\t\tservice.Annotations[awsLBHealthCheckHealthyThresholdAnnotation] = awsLBHealthCheckHealthyThresholdDefault\n\t\tcase configv1.IBMCloudPlatformType, configv1.PowerVSPlatformType:\n\t\t\t// Set ExternalTrafficPolicy to type Cluster - IBM's LoadBalancer impl is created within the cluster.\n\t\t\t// LB places VIP on one of the worker nodes, using keepalived to maintain the VIP and ensuring redundancy\n\t\t\t// LB relies on iptable rules kube-proxy puts in to send traffic from the VIP node to the cluster\n\t\t\t// If policy is local, traffic is only sent to pods on the local node, as such Cluster enables traffic to flow to all the pods in the cluster\n\t\t\tservice.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeCluster\n\t\t\tif proxyNeeded {\n\t\t\t\tservice.Annotations[iksLBEnableFeaturesAnnotation] = iksLBEnableFeaturesProxyProtocol\n\t\t\t}\n\n\t\tcase configv1.AlibabaCloudPlatformType:\n\t\t\tif !isInternal {\n\t\t\t\tservice.Annotations[alibabaCloudLBAddressTypeAnnotation] = alibabaCloudLBAddressTypeInternet\n\t\t\t}\n\t\t}\n\t\t// Azure load balancers are not customizable and are set to (2 fail @ 5s interval, 2 healthy)\n\t\t// GCP load balancers are not customizable and are set to (3 fail @ 8s interval, 1 healthy)\n\n\t\tif v, err := shouldUseLocalWithFallback(ci, service); err != nil {\n\t\t\treturn true, service, err\n\t\t} else if v {\n\t\t\tservice.Annotations[localWithFallbackAnnotation] = \"\"\n\t\t}\n\t}\n\n\tif ci.Spec.EndpointPublishingStrategy != nil {\n\t\tlb := ci.Spec.EndpointPublishingStrategy.LoadBalancer\n\t\tif lb != nil && len(lb.AllowedSourceRanges) > 0 {\n\t\t\tcidrs := make([]string, len(lb.AllowedSourceRanges))\n\t\t\tfor i, cidr := range lb.AllowedSourceRanges {\n\t\t\t\tcidrs[i] = string(cidr)\n\t\t\t}\n\t\t\tservice.Spec.LoadBalancerSourceRanges = cidrs\n\t\t}\n\t}\n\n\tservice.SetOwnerReferences([]metav1.OwnerReference{deploymentRef})\n\treturn true, service, nil\n}", "func (mr *MockRepositoryClientMockRecorder) GetRepoLabels(org, repo interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetRepoLabels\", reflect.TypeOf((*MockRepositoryClient)(nil).GetRepoLabels), org, repo)\n}", "func (mr *MockRDSAPIMockRecorder) DescribeDBSubnetGroups(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeDBSubnetGroups\", reflect.TypeOf((*MockRDSAPI)(nil).DescribeDBSubnetGroups), arg0)\n}", "func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterName string) (*[]network.LoadBalancer, error) {\n\tallLBs, err := az.ListLB(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif allLBs == nil {\n\t\tklog.Warningf(\"ListManagedLBs: no LBs found\")\n\t\treturn nil, nil\n\t}\n\n\tmanagedLBNames := sets.New[string](strings.ToLower(clusterName))\n\tmanagedLBs := make([]network.LoadBalancer, 0)\n\tif strings.EqualFold(az.LoadBalancerSku, consts.LoadBalancerSkuBasic) {\n\t\t// return early if wantLb=false\n\t\tif nodes == nil {\n\t\t\tklog.V(4).Infof(\"ListManagedLBs: return all LBs in the resource group %s, including unmanaged LBs\", az.getLoadBalancerResourceGroup())\n\t\t\treturn &allLBs, nil\n\t\t}\n\n\t\tagentPoolVMSetNamesMap := make(map[string]bool)\n\t\tagentPoolVMSetNames, err := az.VMSet.GetAgentPoolVMSetNames(nodes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ListManagedLBs: failed to get agent pool vmSet names: %w\", err)\n\t\t}\n\n\t\tif agentPoolVMSetNames != nil && len(*agentPoolVMSetNames) > 0 {\n\t\t\tfor _, vmSetName := range *agentPoolVMSetNames {\n\t\t\t\tklog.V(6).Infof(\"ListManagedLBs: found agent pool vmSet name %s\", vmSetName)\n\t\t\t\tagentPoolVMSetNamesMap[strings.ToLower(vmSetName)] = true\n\t\t\t}\n\t\t}\n\n\t\tfor agentPoolVMSetName := range agentPoolVMSetNamesMap {\n\t\t\tmanagedLBNames.Insert(az.mapVMSetNameToLoadBalancerName(agentPoolVMSetName, clusterName))\n\t\t}\n\t}\n\n\tif az.useMultipleStandardLoadBalancers() {\n\t\tfor _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations {\n\t\t\tmanagedLBNames.Insert(multiSLBConfig.Name, fmt.Sprintf(\"%s%s\", multiSLBConfig.Name, consts.InternalLoadBalancerNameSuffix))\n\t\t}\n\t}\n\n\tfor _, lb := range allLBs {\n\t\tif managedLBNames.Has(strings.ToLower(strings.TrimSuffix(pointer.StringDeref(lb.Name, \"\"), consts.InternalLoadBalancerNameSuffix))) {\n\t\t\tmanagedLBs = append(managedLBs, lb)\n\t\t\tklog.V(4).Infof(\"ListManagedLBs: found managed LB %s\", pointer.StringDeref(lb.Name, \"\"))\n\t\t}\n\t}\n\n\treturn &managedLBs, nil\n}", "func TestLBIPAM_serviceIPFamilyRequest(t *testing.T) {\n\ttype test struct {\n\t\tname string\n\t\tIPv4Enabled bool\n\t\tIPv6Enabled bool\n\t\tsvc *slim_core_v1.Service\n\t\twantIPv4Requested bool\n\t\twantIPv6Requested bool\n\t}\n\n\tsingleStack := slim_core_v1.IPFamilyPolicySingleStack\n\tpreferDual := slim_core_v1.IPFamilyPolicyPreferDualStack\n\trequireDual := slim_core_v1.IPFamilyPolicyRequireDualStack\n\n\ttests := []test{\n\t\t{\n\t\t\t// If no policy is set, fall back to single stack. Only IPv4 enabled\n\t\t\tname: \"No policy, No families, IPv4\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{},\n\t\t\t},\n\t\t\tIPv4Enabled: true,\n\t\t\twantIPv4Requested: true,\n\t\t},\n\t\t{\n\t\t\t// If no policy is set, fall back to single stack. Only IPv6 enabled\n\t\t\tname: \"No policy, No families, IPv6\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{},\n\t\t\t},\n\t\t\tIPv6Enabled: true,\n\t\t\twantIPv6Requested: true,\n\t\t},\n\t\t{\n\t\t\t// If no policy is set, fall back to single stack. Prefer IPv4 over IPv6 in single stack\n\t\t\tname: \"No policy, No families, IPv4/IPv6\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{},\n\t\t\t},\n\t\t\tIPv4Enabled: true,\n\t\t\tIPv6Enabled: true,\n\t\t\twantIPv4Requested: true,\n\t\t\twantIPv6Requested: false,\n\t\t},\n\t\t{\n\t\t\t// If no policy is set, fall back to single stack. Request IPv6, even if it is disabled.\n\t\t\tname: \"No policy, IPv6 family, IPv4\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tIPv4Enabled: true,\n\t\t\twantIPv6Requested: true,\n\t\t},\n\t\t{\n\t\t\t// If no policy is set, fall back to single stack. Request IPv4, even if it is disabled.\n\t\t\tname: \"No policy, IPv4 family, IPv6\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tIPv6Enabled: true,\n\t\t\twantIPv4Requested: true,\n\t\t},\n\t\t{\n\t\t\t// If no policy is set, fall back to single stack. Request the first family\n\t\t\tname: \"No policy, IPv4/IPv6 family, No enabled\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantIPv4Requested: true,\n\t\t},\n\t\t{\n\t\t\t// If no policy is set, fall back to single stack. Request the first family\n\t\t\tname: \"No policy, IPv4/IPv6 family, No enabled\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantIPv6Requested: true,\n\t\t},\n\t\t{\n\t\t\t// If single stack is explicitly set, and both are available, prefer IPv4\n\t\t\tname: \"Single stack, No families, IPv6/IPv4\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\t\tIPFamilyPolicy: &singleStack,\n\t\t\t\t},\n\t\t\t},\n\t\t\tIPv4Enabled: true,\n\t\t\tIPv6Enabled: true,\n\t\t\twantIPv4Requested: true,\n\t\t},\n\t\t{\n\t\t\t// If dual stack is requested, and available, request both\n\t\t\tname: \"PreferDual, No families, IPv6/IPv4\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\t\tIPFamilyPolicy: &preferDual,\n\t\t\t\t},\n\t\t\t},\n\t\t\tIPv4Enabled: true,\n\t\t\tIPv6Enabled: true,\n\t\t\twantIPv4Requested: true,\n\t\t\twantIPv6Requested: true,\n\t\t},\n\t\t{\n\t\t\t// If dual stack is requested, and available, request family\n\t\t\tname: \"PreferDual, IPv4 family, IPv6\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\t\tIPFamilyPolicy: &preferDual,\n\t\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tIPv4Enabled: false,\n\t\t\tIPv6Enabled: true,\n\t\t\twantIPv4Requested: false,\n\t\t\twantIPv6Requested: false,\n\t\t},\n\t\t{\n\t\t\t// If dual stack is required, and available, request both\n\t\t\tname: \"RequireDual, IPv4 family, IPv6\",\n\t\t\tsvc: &slim_core_v1.Service{\n\t\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\t\tIPFamilyPolicy: &requireDual,\n\t\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tIPv4Enabled: false,\n\t\t\tIPv6Enabled: true,\n\t\t\twantIPv4Requested: false,\n\t\t\twantIPv6Requested: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tipam := &LBIPAM{\n\t\t\t\tipv4Enabled: tt.IPv4Enabled,\n\t\t\t\tipv6Enabled: tt.IPv6Enabled,\n\t\t\t}\n\t\t\tgotIPv4Requested, gotIPv6Requested := ipam.serviceIPFamilyRequest(tt.svc)\n\t\t\tif gotIPv4Requested != tt.wantIPv4Requested {\n\t\t\t\tt.Errorf(\"LBIPAM.serviceIPFamilyRequest() gotIPv4Requested = %v, want %v\", gotIPv4Requested, tt.wantIPv4Requested)\n\t\t\t}\n\t\t\tif gotIPv6Requested != tt.wantIPv6Requested {\n\t\t\t\tt.Errorf(\"LBIPAM.serviceIPFamilyRequest() gotIPv6Requested = %v, want %v\", gotIPv6Requested, tt.wantIPv6Requested)\n\t\t\t}\n\t\t})\n\t}\n}", "func generateLoadBalancers(cr *svcapitypes.Service) []*svcsdk.LoadBalancer {\n\tloadBalancers := []*svcsdk.LoadBalancer{}\n\n\tif cr.Spec.ForProvider.LoadBalancers == nil {\n\t\treturn loadBalancers\n\t}\n\n\tfor _, loadBalancer := range cr.Spec.ForProvider.LoadBalancers {\n\t\tconvertedLB := &svcsdk.LoadBalancer{}\n\t\tconvertedLB.ContainerName = loadBalancer.ContainerName\n\t\tconvertedLB.ContainerPort = loadBalancer.ContainerPort\n\t\tconvertedLB.LoadBalancerName = loadBalancer.LoadBalancerName\n\t\tconvertedLB.TargetGroupArn = loadBalancer.TargetGroupARN\n\n\t\tloadBalancers = append(loadBalancers, convertedLB)\n\t}\n\treturn loadBalancers\n}", "func NewLoadBalancerCollector(logger log.Logger, client *hcloud.Client, failures *prometheus.CounterVec, duration *prometheus.HistogramVec, cfg config.Target) *LoadBalancerCollector {\n\tif failures != nil {\n\t\tfailures.WithLabelValues(\"load_balancer\").Add(0)\n\t}\n\n\tlabels := []string{\"id\", \"name\", \"datacenter\"}\n\treturn &LoadBalancerCollector{\n\t\tclient: client,\n\t\tlogger: log.With(logger, \"collector\", \"load-balancer\"),\n\t\tfailures: failures,\n\t\tduration: duration,\n\t\tconfig: cfg,\n\n\t\tCreated: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_created_timestamp\",\n\t\t\t\"Timestamp when the load balancer have been created\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tServices: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_services\",\n\t\t\t\"The number of configured services\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxServices: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_services\",\n\t\t\t\"The maximum number of services that can be configured\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargets: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets\",\n\t\t\t\"The number of targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxTargets: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_targets\",\n\t\t\t\"The maximum number of targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsHealthy: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_healthy\",\n\t\t\t\"The number of healthy targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsUnhealthy: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_unhealthy\",\n\t\t\t\"The number of unhealthy targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tTargetsUnknown: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_targets_unknown\",\n\t\t\t\"The number of unknown targets\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tAssignedCertificates: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_assigned_certificates\",\n\t\t\t\"The number of assigned certificates\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxAssignedCertificates: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_assigned_certificates\",\n\t\t\t\"The maximum number of certificates that can be assigned\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIngoingTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_ingoing_traffic\",\n\t\t\t\"The total amount of ingoing traffic in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutgoingTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_outgoing_traffic\",\n\t\t\t\"The total amount of outgoing traffic in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIncludedTraffic: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_included_traffic\",\n\t\t\t\"The amount of traffic that is included for the load balancer type in bytes\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tConnections: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections\",\n\t\t\t\"The number of open connections\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tMaxConnections: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_max_open_connections\",\n\t\t\t\"The maximum number of open connections\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tConnectionsPerSecond: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_connections_per_second\",\n\t\t\t\"The number of new connections per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tRequestsPerSecond: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_requests_per_second\",\n\t\t\t\"The number of requests per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tIncomingBandwidth: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections_bandwidth_in\",\n\t\t\t\"The incoming bandwidth in bytes per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutgoingBandwidth: prometheus.NewDesc(\n\t\t\t\"hcloud_loadbalancer_open_connections_bandwidth_out\",\n\t\t\t\"The outgoing bandwidth in bytes per second\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t}\n}", "func TestReallocOnInit(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"192.168.1.12\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP == \"192.168.1.12\" {\n\t\t\tt.Error(\"Expected ingress IP to not be the initial, bad IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n}", "func TestLifecycleExternalLB(t *testing.T) {\n\trunLifecycleTestAWS(&LifecycleTestOptions{\n\t\tt: t,\n\t\tSrcDir: \"externallb\",\n\t})\n}", "func (service *HTTPRestService) getUnhealthyIPAddresses(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"[Azure CNS] getUnhealthyIPAddresses\")\n\tlog.Request(service.Name, \"getUnhealthyIPAddresses\", nil)\n\n\treturnMessage := \"\"\n\treturnCode := 0\n\tcapacity := 0\n\tavailable := 0\n\tvar unhealthyAddrs []string\n\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tic := service.ipamClient\n\n\t\tifInfo, err := service.imdsClient.GetPrimaryInterfaceInfoFromMemory()\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetPrimaryIfaceInfo failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\tasID, err := ic.GetAddressSpace()\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetAddressSpace failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\tpoolID, err := ic.GetPoolID(asID, ifInfo.Subnet)\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetPoolID failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\n\t\tcapacity, available, unhealthyAddrs, err = ic.GetIPAddressUtilization(poolID)\n\t\tif err != nil {\n\t\t\treturnMessage = fmt.Sprintf(\"[Azure CNS] Error. GetIPUtilization failed %v\", err.Error())\n\t\t\treturnCode = UnexpectedError\n\t\t\tbreak\n\t\t}\n\t\tlog.Printf(\"[Azure CNS] Capacity %v Available %v UnhealthyAddrs %v\", capacity, available, unhealthyAddrs)\n\n\tdefault:\n\t\treturnMessage = \"[Azure CNS] Error. GetUnhealthyIP did not receive a POST.\"\n\t\treturnCode = InvalidParameter\n\t}\n\n\tresp := cns.Response{\n\t\tReturnCode: returnCode,\n\t\tMessage: returnMessage,\n\t}\n\n\tipResp := &cns.GetIPAddressesResponse{\n\t\tResponse: resp,\n\t\tIPAddresses: unhealthyAddrs,\n\t}\n\n\terr := service.Listener.Encode(w, &ipResp)\n\tlog.Response(service.Name, ipResp, resp.ReturnCode, ReturnCodeToString(resp.ReturnCode), err)\n}", "func TestGetNetworks(t *testing.T) {\n\trecord(t, \"getnetworks\", func(t *testing.T, svc *Service) {\n\t\t_, err := createServer(svc, \"TestGetNetworks\")\n\t\trequire.NoError(t, err)\n\n\t\tnetworks, err := svc.GetNetworks()\n\t\trequire.NoError(t, err)\n\n\t\tassert.NotEmpty(t, networks.Networks)\n\n\t\tassert.NotEmpty(t, networks.Networks[0].IPNetworks)\n\t\tassert.NotEmpty(t, networks.Networks[0].Name)\n\t\tassert.NotEmpty(t, networks.Networks[0].Type)\n\t\tassert.NotEmpty(t, networks.Networks[0].UUID)\n\t\tassert.NotEmpty(t, networks.Networks[0].Zone)\n\n\t\t// Find a network with a server\n\t\tvar found bool\n\t\tfor _, n := range networks.Networks {\n\t\t\tif len(n.Servers) > 0 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tassert.True(t, found)\n\t})\n}", "func HandleFullyPopulatedLoadbalancerCreationSuccessfully(t *testing.T, response string) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestJSONRequest(t, r, `{\n\t\t\t\"loadbalancer\": {\n\t\t\t\t\"admin_state_up\": true,\n\t\t\t\t\"flavor_id\": \"bba40eb2-ee8c-11e9-81b4-2a2ae2dbcce4\",\n\t\t\t\t\"listeners\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"default_pool\": {\n\t\t\t\t\t\t\t\"healthmonitor\": {\n\t\t\t\t\t\t\t\t\"delay\": 3,\n\t\t\t\t\t\t\t\t\"expected_codes\": \"200\",\n\t\t\t\t\t\t\t\t\"http_method\": \"GET\",\n\t\t\t\t\t\t\t\t\"max_retries\": 2,\n\t\t\t\t\t\t\t\t\"max_retries_down\": 3,\n\t\t\t\t\t\t\t\t\"name\": \"db\",\n\t\t\t\t\t\t\t\t\"timeout\": 1,\n\t\t\t\t\t\t\t\t\"type\": \"HTTP\",\n\t\t\t\t\t\t\t\t\"url_path\": \"/index.html\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"lb_algorithm\": \"ROUND_ROBIN\",\n\t\t\t\t\t\t\t\"members\": [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.51\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"address\": \"192.0.2.52\",\n\t\t\t\t\t\t\t\t\t\"protocol_port\": 80\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\"name\": \"Example pool\",\n\t\t\t\t\t\t\t\"protocol\": \"HTTP\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"l7policies\": [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"action\": \"REDIRECT_TO_URL\",\n\t\t\t\t\t\t\t\t\"name\": \"redirect-example.com\",\n\t\t\t\t\t\t\t\t\"redirect_url\": \"http://www.example.com\",\n\t\t\t\t\t\t\t\t\"rules\": [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"compare_type\": \"REGEX\",\n\t\t\t\t\t\t\t\t\t\t\"type\": \"PATH\",\n\t\t\t\t\t\t\t\t\t\t\"value\": \"/images*\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t],\n\t\t\t\t\t\t\"name\": \"redirect_listener\",\n\t\t\t\t\t\t\"protocol\": \"HTTP\",\n\t\t\t\t\t\t\"protocol_port\": 8080\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"name\": \"db_lb\",\n\t\t\t\t\"provider\": \"octavia\",\n\t\t\t\t\"tags\": [\n\t\t\t\t\t\"test\",\n\t\t\t\t\t\"stage\"\n\t\t\t\t],\n\t\t\t\t\"vip_address\": \"10.30.176.48\",\n\t\t\t\t\"vip_port_id\": \"2bf413c8-41a9-4477-b505-333d5cbe8b55\",\n\t\t\t\t\"vip_subnet_id\": \"9cedb85d-0759-4898-8a4b-fa5a5ea10086\"\n\t\t\t}\n\t\t}`)\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, response)\n\t})\n}", "func (nat *NATCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {\n\tnatProvider, err := nat.getNATClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnatGatewayId := service.ObjectMeta.Annotations[AnnotationsNATID]\n\tif natGatewayId == \"\" {\n\t\treturn fmt.Errorf(\"The id of natGateway should be set by %v in annotations \", AnnotationsNATID)\n\t}\n\n\tnatGateway, err := natProvider.GetNATGateway(natGatewayId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif natGateway.RouterId != nat.cloudConfig.VpcOpts.ID {\n\t\treturn fmt.Errorf(\"The natGateway is not in the same VPC with cluster. \")\n\t}\n\n\t//get floatingip id by floatingip address and check if it can be used\n\tdnatRuleList, err := listDnatRule(natProvider, natGatewayId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfloatingIp, err := nat.getFloatingIpInfoByIp(natProvider, service.Spec.LoadBalancerIP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallDnatRuleInFloatIP, err := listAllDnatRuleByFloatIP(natProvider, service.Spec.LoadBalancerIP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !nat.checkFloatingIp(allDnatRuleInFloatIP, floatingIp, natGatewayId) {\n\t\treturn fmt.Errorf(\"The floating ip %v is binding to port,and its not DNAT rule in natGateway %s \", floatingIp.FloatingIpAddress, natGateway.Name)\n\t}\n\n\tpodList, err := nat.getPods(service.Name, service.Namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar runningPod v1.Pod\n\tfor _, pod := range podList.Items {\n\t\tif podutil.IsPodReady(&pod) {\n\t\t\trunningPod = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tvar errs []error\n\tif len(runningPod.Status.HostIP) == 0 {\n\t\tklog.V(4).Infof(\"Delete all DNAT Rule if there is no available endpoint for service %s\", service.Name)\n\t\tfor _, servicePort := range service.Spec.Ports {\n\t\t\tdnatRule := nat.getDNATRule(dnatRuleList, &servicePort)\n\t\t\tif dnatRule != nil {\n\t\t\t\tif err = nat.ensureDeleteDNATRule(natProvider, dnatRule, natGatewayId); err != nil {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\"UpdateDNATRule Failed: %v\", err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(errs) != 0 {\n\t\t\treturn utilerrors.NewAggregate(errs)\n\t\t}\n\t\treturn nil\n\t}\n\n\tsubnetId := nat.getSubnetIdForPod(runningPod, nodes)\n\tnetPort, err := nat.getPortByFixedIp(natProvider, subnetId, runningPod.Status.HostIP)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, servicePort := range service.Spec.Ports {\n\t\tdnatRule := nat.getDNATRule(dnatRuleList, &servicePort)\n\t\tif dnatRule != nil {\n\t\t\tnetworkPort, err := natProvider.GetPort(dnatRule.PortId)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(networkPort.FixedIps) == 0 {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"The port has no ipAddress binded \"))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnode, err := nat.kubeClient.Nodes().Get(context.TODO(), networkPort.FixedIps[0].IpAddress, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"Get node(%s) error: %v\", networkPort.FixedIps[0].IpAddress, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstatus, err := CheckNodeHealth(node)\n\t\t\tif !status || err != nil {\n\t\t\t\tklog.Warningf(\"The node %v is not ready. %v\", node.Name, err)\n\t\t\t\tif err = nat.ensureDeleteDNATRule(natProvider, dnatRule, natGatewayId); err != nil {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\"UpdateDNATRule Failed: %v\", err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif status {\n\t\t\t\tklog.V(4).Infof(\"The status of node %s is normal,no need to update DnatRule\", node.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif err = nat.ensureCreateDNATRule(natProvider, &servicePort, netPort, floatingIp, natGateway.Id); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"UpdateDNATRule Failed: %v\", err))\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n\tif len(errs) != 0 {\n\t\treturn utilerrors.NewAggregate(errs)\n\t}\n\treturn nil\n}", "func TestRemoveRequestedIP(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124,10.0.10.125\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 2 {\n\t\t\tt.Error(\"Expected service to receive exactly two ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Annotations = map[string]string{\n\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.123' to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.124' to be allocated\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.125\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.125' to be released\")\n\t}\n}" ]
[ "0.66308504", "0.57295954", "0.53362983", "0.5327012", "0.5271462", "0.5241041", "0.52133095", "0.5211201", "0.51604736", "0.5054553", "0.49382243", "0.49274433", "0.4927198", "0.4926151", "0.48993278", "0.48872074", "0.48288015", "0.48288015", "0.48190293", "0.4803712", "0.47732303", "0.47438958", "0.4738468", "0.47056618", "0.4700377", "0.46983904", "0.46945104", "0.46810734", "0.46764493", "0.46681765", "0.4654498", "0.4648989", "0.4640643", "0.46386844", "0.46296638", "0.46238163", "0.46159893", "0.45882177", "0.4582044", "0.4573473", "0.45696837", "0.45693797", "0.45511672", "0.45502636", "0.4548985", "0.45363477", "0.45151448", "0.44983605", "0.44924578", "0.44733667", "0.44642153", "0.44634262", "0.44553998", "0.4448704", "0.44453532", "0.4445345", "0.44434232", "0.44314486", "0.44289985", "0.44082084", "0.4406803", "0.4400064", "0.43946618", "0.43905857", "0.43892395", "0.4385324", "0.43719885", "0.43640006", "0.4363736", "0.43538088", "0.43492812", "0.43464428", "0.433327", "0.43232286", "0.43126675", "0.43102345", "0.43031466", "0.4298551", "0.42879868", "0.42799997", "0.42776066", "0.42642877", "0.42635933", "0.4252867", "0.42511404", "0.4250754", "0.42457104", "0.4240931", "0.42383307", "0.42358238", "0.4234019", "0.4229859", "0.4229074", "0.42279196", "0.42270386", "0.42261127", "0.4220747", "0.42179093", "0.421654", "0.4209168" ]
0.7242526
0
IsNamespaced mocks base method
func (m *MockLoadBalance) IsNamespaced() bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IsNamespaced") ret0, _ := ret[0].(bool) return ret0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isNamespaced(r corev3.Resource) bool {\n\tgr, ok := r.(corev3.GlobalResource)\n\tif !ok {\n\t\treturn true\n\t}\n\treturn !gr.IsGlobalResource()\n}", "func IsNamespaced(u *unstructured.Unstructured) bool {\n\tif u == nil {\n\t\treturn false\n\t}\n\treturn u.GetNamespace() != \"\"\n}", "func TestNamespacedCommands(t *testing.T) {\n\tconst contextNS = \"from-context\"\n\tconst flagNS = \"from-flag\"\n\tconst allNS = \"\"\n\n\ttestcases := []struct {\n\t\tname string\n\t\tcmd string\n\t\twantNS string\n\t}{\n\t\t{name: \"get instances with flag namespace\", cmd: \"get instances --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"get instances with context namespace\", cmd: \"get instances\", wantNS: contextNS},\n\t\t{name: \"get all instances\", cmd: \"get instances --all-namespaces\", wantNS: allNS},\n\n\t\t{name: \"describe instance with flag namespace\", cmd: \"describe instance NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"describe instance with context namespace\", cmd: \"describe instances NAME\", wantNS: contextNS},\n\n\t\t{name: \"provision with flag namespace\", cmd: \"provision --class CLASS --plan PLAN NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"provision with context namespace\", cmd: \"provision --class CLASS --plan PLAN NAME\", wantNS: contextNS},\n\n\t\t{name: \"deprovision with flag namespace\", cmd: \"deprovision NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"deprovision with context namespace\", cmd: \"deprovision NAME\", wantNS: contextNS},\n\n\t\t{name: \"bind with flag namespace\", cmd: \"bind NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"bind with context namespace\", cmd: \"bind NAME\", wantNS: contextNS},\n\n\t\t{name: \"unbind with flag namespace\", cmd: \"unbind NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"unbind with context namespace\", cmd: \"unbind NAME\", wantNS: contextNS},\n\n\t\t{name: \"get bindings with flag namespace\", cmd: \"get bindings --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"get bindings with context namespace\", cmd: \"get bindings\", wantNS: contextNS},\n\t\t{name: \"get all bindings\", cmd: \"get bindings --all-namespaces\", wantNS: allNS},\n\n\t\t{name: \"describe binding with flag namespace\", cmd: \"describe binding NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"describe binding with context namespace\", cmd: \"describe binding NAME\", wantNS: contextNS},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tfakeClient := fake.NewSimpleClientset()\n\n\t\t\tcxt := newContext()\n\t\t\tcxt.App = &svcat.App{\n\t\t\t\tCurrentNamespace: contextNS,\n\t\t\t\tSDK: &servicecatalog.SDK{ServiceCatalogClient: fakeClient},\n\t\t\t}\n\t\t\tcxt.Output = ioutil.Discard\n\n\t\t\texecuteFakeCommand(t, tc.cmd, cxt, true)\n\n\t\t\tgotNamespace := fakeClient.Actions()[0].GetNamespace()\n\t\t\tif tc.wantNS != gotNamespace {\n\t\t\t\tt.Fatalf(\"the wrong namespace was used. WANT: %q, GOT: %q\", tc.wantNS, gotNamespace)\n\t\t\t}\n\t\t})\n\t}\n}", "func isKubeNamespace(ns string) bool {\n\treturn ns == metav1.NamespacePublic || ns == metav1.NamespaceSystem\n}", "func TestHasNamespace_NamespaceExisting(t *testing.T) {\n\tconst (\n\t\tprojectId = \"test-project-id\"\n\t\tprojectName = \"test-project-name\"\n\t\tclusterId = \"test-cluster-id\"\n\t\tnamespaceName = \"test-namespace\"\n\t)\n\tvar (\n\t\tactualListOpts *types.ListOpts\n\t\tclientConfig = ClientConfig{}\n\t\tnamespace = projectModel.Namespace{\n\t\t\tName: namespaceName,\n\t\t}\n\t\ttestClients = stubs.CreateBackendStubs(t)\n\t)\n\n\tnamespaceOperationsStub := stubs.CreateNamespaceOperationsStub(t)\n\tnamespaceOperationsStub.DoList = func(opts *types.ListOpts) (*clusterClient.NamespaceCollection, error) {\n\t\tactualListOpts = opts\n\t\treturn &clusterClient.NamespaceCollection{\n\t\t\tData: []clusterClient.Namespace{\n\t\t\t\tclusterClient.Namespace{\n\t\t\t\t\tName: namespaceName,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\n\t}\n\ttestClients.ClusterClient.Namespace = namespaceOperationsStub\n\n\tclient := rancherClient{\n\t\tclientConfig: clientConfig,\n\t\tprojectId: projectId,\n\t\tclusterClient: testClients.ClusterClient,\n\t\tnamespaceCache: make(map[string]clusterClient.Namespace),\n\t\tlogger: logrus.WithField(\"test\", true),\n\t}\n\t//Act\n\tresult, err := client.HasNamespace(namespace)\n\n\t//Assert\n\tassert.Ok(t, err)\n\tassert.Equals(t, true, result)\n\tassert.Equals(t, &types.ListOpts{Filters: map[string]interface{}{\"system\": \"false\", \"name\": \"test-namespace\"}}, actualListOpts)\n}", "func isKubeNamespace(ns string) bool {\n\treturn ns == v1.NamespacePublic || ns == v1.NamespaceSystem\n}", "func (_m *MockSeriesIterator) Namespace() ident.ID {\n\tret := _m.ctrl.Call(_m, \"Namespace\")\n\tret0, _ := ret[0].(ident.ID)\n\treturn ret0\n}", "func (m *MockWebhookResourceClient) GetSuperglooNamespace() (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetSuperglooNamespace\")\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLocalConfigProvider) GetNamespace() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetNamespace\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestHasNamespace_NamespaceNotExisting(t *testing.T) {\n\tconst (\n\t\tprojectId = \"test-project-id\"\n\t\tprojectName = \"test-project-name\"\n\t\tclusterId = \"test-cluster-id\"\n\t\tnamespaceName = \"test-namespace\"\n\t)\n\tvar (\n\t\tactualListOpts *types.ListOpts\n\t\tclientConfig = ClientConfig{}\n\t\tnamespace = projectModel.Namespace{\n\t\t\tName: namespaceName,\n\t\t}\n\t\ttestClients = stubs.CreateBackendStubs(t)\n\t)\n\n\tnamespaceOperationsStub := stubs.CreateNamespaceOperationsStub(t)\n\tnamespaceOperationsStub.DoList = func(opts *types.ListOpts) (*clusterClient.NamespaceCollection, error) {\n\t\tactualListOpts = opts\n\t\treturn &clusterClient.NamespaceCollection{\n\t\t\tData: []clusterClient.Namespace{},\n\t\t}, nil\n\n\t}\n\ttestClients.ClusterClient.Namespace = namespaceOperationsStub\n\n\tclient := rancherClient{\n\t\tclientConfig: clientConfig,\n\t\tprojectId: projectId,\n\t\tclusterClient: testClients.ClusterClient,\n\t\tlogger: logrus.WithField(\"test\", true),\n\t}\n\t//Act\n\tresult, err := client.HasNamespace(namespace)\n\n\t//Assert\n\tassert.Ok(t, err)\n\tassert.Equals(t, false, result)\n\tassert.Equals(t, &types.ListOpts{Filters: map[string]interface{}{\"system\": \"false\", \"name\": \"test-namespace\"}}, actualListOpts)\n}", "func TestGetNamespaceAllNamespacesNotDefined(t *testing.T) {\n\ttestCmd := testCommandGenerator(false)\n\texpectedNamespace := \"test1\"\n\ttestCmd.SetArgs([]string{\"--namespace\", expectedNamespace})\n\ttestCmd.Execute()\n\tkp := &KnParams{fixedCurrentNamespace: FakeNamespace}\n\tactualNamespace, err := kp.GetNamespace(testCmd)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif actualNamespace != expectedNamespace {\n\t\tt.Fatalf(\"Incorrect namespace retrieved: %v, expected: %v\", actualNamespace, expectedNamespace)\n\t}\n}", "func (mr *MockLoadBalanceMockRecorder) IsNamespaced() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsNamespaced\", reflect.TypeOf((*MockLoadBalance)(nil).IsNamespaced))\n}", "func isSystemNamespace(fedNamespace, namespace string) bool {\n\tswitch namespace {\n\tcase \"kube-system\", \"kube-public\", \"default\", fedNamespace:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (m *MockManager) SerializeNamespace(arg0 string) error {\n\tret := m.ctrl.Call(m, \"SerializeNamespace\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestGetNamespaceSample(t *testing.T) {\n\ttestCmd := testCommandGenerator(true)\n\texpectedNamespace := \"test1\"\n\ttestCmd.SetArgs([]string{\"--namespace\", expectedNamespace})\n\ttestCmd.Execute()\n\tkp := &KnParams{fixedCurrentNamespace: FakeNamespace}\n\tactualNamespace, err := kp.GetNamespace(testCmd)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif actualNamespace != expectedNamespace {\n\t\tt.Fatalf(\"Incorrect namespace retrieved: %v, expected: %v\", actualNamespace, expectedNamespace)\n\t}\n\tkp = &KnParams{}\n\t// Mock ClientConfig to avoid clash with real kubeconfig on host OS\n\tkp.ClientConfig, _ = clientcmd.NewClientConfigFromBytes([]byte(BASIC_KUBECONFIG))\n\ttestCmd = testCommandGenerator(false)\n\tactualNamespace, err = kp.GetNamespace(testCmd)\n\tassert.NilError(t, err)\n\tassert.Equal(t, \"default\", actualNamespace)\n}", "func (m *MockLogic) NamespaceKeeper() core.NamespaceKeeper {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceKeeper\")\n\tret0, _ := ret[0].(core.NamespaceKeeper)\n\treturn ret0\n}", "func MockNamespace(ctrlr *Controller) Namespace {\n\tn := MockNamespacePB()\n\treturn Namespace{\n\t\tID: n.Id,\n\t\tSize: n.Capacity,\n\t\tCtrlrPciAddr: ctrlr.PCIAddr,\n\t}\n}", "func MockScmNamespace(varIdx ...int32) *ctlpb.ScmNamespace {\n\tnative := storage.MockScmNamespace(varIdx...)\n\tpb := new(ScmNamespace)\n\n\tif err := pb.FromNative(native); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn pb.AsProto()\n}", "func TestGetNamespace(t *testing.T) {\n\n\t// Test Data\n\tnamespaceName1 := \"TestNamespaceName1\"\n\tnamespaceName2 := \"TestNamespaceName2\"\n\tnamespaceSecret1 := \"TestNamespaceSecret1\"\n\tnamespaceSecret2 := \"TestNamespaceSecret2\"\n\n\t// Create A Mock HubManager\n\tmockHubManager := &MockHubManager{}\n\n\t// Replace The NewHubManagerFromConnectionString Wrapper To Provide Mock Implementation & Defer Reset\n\tnewHubManagerFromConnectionStringWrapperPlaceholder := NewHubManagerFromConnectionStringWrapper\n\tNewHubManagerFromConnectionStringWrapper = func(connectionString string) (managerInterface HubManagerInterface, e error) {\n\t\treturn mockHubManager, nil\n\t}\n\tdefer func() { NewHubManagerFromConnectionStringWrapper = newHubManagerFromConnectionStringWrapperPlaceholder }()\n\n\t// Create A Test Logger\n\tlogger := logtesting.TestLogger(t).Desugar()\n\n\t// Create The Cache's EventHub Map\n\tvar err error\n\teventHubMap := make(map[string]*Namespace)\n\teventHubMap[namespaceName1], err = createTestNamespaceWithSecret(logger, namespaceName1, namespaceSecret1)\n\tassert.NotNil(t, eventHubMap[namespaceName1])\n\tassert.Nil(t, err)\n\teventHubMap[namespaceName2], err = createTestNamespaceWithSecret(logger, namespaceName2, namespaceSecret2)\n\tassert.NotNil(t, eventHubMap[namespaceName2])\n\tassert.Nil(t, err)\n\n\t// Create A Cache To Test\n\tcache := &Cache{\n\t\tlogger: logger,\n\t\teventhubMap: eventHubMap,\n\t}\n\n\t// Perform The Test\n\tnamespace := cache.GetNamespace(namespaceName2)\n\n\t// Verify The Results\n\tassert.NotNil(t, namespace)\n\tassert.Equal(t, namespaceName2, namespace.Name)\n\tassert.Equal(t, namespaceSecret2, namespace.Secret)\n}", "func (m *MockAtomicLogic) NamespaceKeeper() core.NamespaceKeeper {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceKeeper\")\n\tret0, _ := ret[0].(core.NamespaceKeeper)\n\treturn ret0\n}", "func (m *MockKubeCoreCache) NamespaceLister() v1.NamespaceLister {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceLister\")\n\tret0, _ := ret[0].(v1.NamespaceLister)\n\treturn ret0\n}", "func assertContainsNamespace(organizations []*server.Organization, expectedOrganization string) {\n\n\tfor _, organziation := range organizations {\n\t\tif organziation.Name == expectedOrganization {\n\t\t\treturn\n\t\t}\n\t}\n\n\tFail(fmt.Sprintf(\"Could not find organziation %s in list returned\", expectedOrganization))\n}", "func (m *MockStore) WriteNamespaces(arg0 *Namespaces) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WriteNamespaces\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func IsResourceNamespaced(kind string) bool {\n\tswitch kind {\n\tcase \"Namespace\",\n\t\t\"Node\",\n\t\t\"PersistentVolume\",\n\t\t\"PodSecurityPolicy\",\n\t\t\"CertificateSigningRequest\",\n\t\t\"ClusterRoleBinding\",\n\t\t\"ClusterRole\",\n\t\t\"VolumeAttachment\",\n\t\t\"StorageClass\",\n\t\t\"CSIDriver\",\n\t\t\"CSINode\",\n\t\t\"ValidatingWebhookConfiguration\",\n\t\t\"MutatingWebhookConfiguration\",\n\t\t\"CustomResourceDefinition\",\n\t\t\"PriorityClass\",\n\t\t\"RuntimeClass\":\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func TestNamespace(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tnode Node\n\t\twant *metadata.NodeMetadata\n\t}{\n\t\t{\n\t\t\tname: \"datasource\",\n\t\t\tnode: &DataSourceBuilder{\n\t\t\t\tName: \"source\",\n\t\t\t\tAlias: \"x\",\n\t\t\t\tPartition: 0,\n\t\t\t\tCardinality: metadata.BoundedFitsInLocalStorage,\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedFitsInLocalStorage,\n\t\t\t\toctosql.NewVariableName(\"\"),\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"x\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"distinct\",\n\t\t\tnode: &Distinct{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"a.event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"a.event_time\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"a\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"orderby\",\n\t\t\tnode: &OrderBy{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"a.event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"a.event_time\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"a\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"filter\",\n\t\t\tnode: &Filter{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\t\t\"event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\tnil,\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\"event_time\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\tnil,\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"groupby - unbounded with event time\",\n\t\t\tnode: &GroupBy{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"a.event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\tnil,\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tFields: []octosql.VariableName{\"a.event_time\", \"a.id\"},\n\t\t\t\tKey: []Expression{NewVariable(\"a.event_time\"), NewVariable(\"a.id\")},\n\t\t\t\tAggregates: []Aggregate{Key, Key},\n\t\t\t\tAs: []octosql.VariableName{\"out_event_time\", \"\"},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"out_event_time\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\tnil,\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"groupby - bounded doesnt fit, no event time\",\n\t\t\tnode: &GroupBy{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\t\t\"a.event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\tnil,\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tFields: []octosql.VariableName{\"b.event_time\", \"a.age\"},\n\t\t\t\tKey: []Expression{NewVariable(\"a.event_time\"), NewVariable(\"b.age\")},\n\t\t\t\tAggregates: []Aggregate{Key, Sum},\n\t\t\t\tAs: []octosql.VariableName{\"out_field\", \"\"},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedFitsInLocalStorage,\n\t\t\t\t\"\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\tnil,\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"requalifier\",\n\t\t\tnode: &Requalifier{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\toctosql.NewVariableName(\"event_field\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\", \"b\", \"c\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tQualifier: \"q\",\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\toctosql.NewVariableName(\"q.event_field\"),\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"q\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"stream join\",\n\t\t\tnode: &StreamJoin{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\t\toctosql.NewVariableName(\"source_event_time\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\", \"b\", \"c\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tJoined: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedFitsInLocalStorage,\n\t\t\t\t\t\toctosql.NewVariableName(\"joined_event_time\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"c\", \"d\", \"e\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tEventTimeField: \"source_event_time\",\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\toctosql.NewVariableName(\"source_event_time\"),\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"a\", \"b\", \"c\", \"d\", \"e\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"lookup join\",\n\t\t\tnode: &LookupJoin{\n\t\t\t\tsource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\t\toctosql.NewVariableName(\"source_event_time\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\", \"b\", \"c\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tjoined: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedFitsInLocalStorage,\n\t\t\t\t\t\toctosql.NewVariableName(\"joined_event_time\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"c\", \"d\", \"e\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\toctosql.NewVariableName(\"source_event_time\"),\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"a\", \"b\", \"c\", \"d\", \"e\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"map test 1 - unqualified star\",\n\t\t\tnode: &Map{\n\t\t\t\tExpressions: []NamedExpression{NewStarExpression(\"\")},\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"x\", \"y\", \"z\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tKeep: false,\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"x\", \"y\", \"z\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"map test 2 - keep = true and qualified star no match\",\n\t\t\tnode: &Map{\n\t\t\t\tExpressions: []NamedExpression{NewStarExpression(\"q\")},\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"x\", \"y\", \"z\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tKeep: true,\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"x\", \"y\", \"z\"}, // don't add q because it's not in source metadata\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"map test 3 - keep = false and qualified star match\",\n\t\t\tnode: &Map{\n\t\t\t\tExpressions: []NamedExpression{NewStarExpression(\"x\"), NewStarExpression(\"y\")},\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"x\", \"y\", \"z\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tKeep: false,\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"x\", \"y\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tt.Run(fmt.Sprint(i), func(t *testing.T) {\n\t\t\tgot := tt.node.Metadata()\n\n\t\t\tareNamespacesEqual := got.Namespace().Equal(tt.want.Namespace())\n\n\t\t\tif got.EventTimeField() != tt.want.EventTimeField() || got.Cardinality() != tt.want.Cardinality() || !areNamespacesEqual {\n\t\t\t\tt.Errorf(\"Metadata() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func (c *clusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) {\n\tif isNamespaced, ok := c.namespacedResources[gk]; ok {\n\t\treturn isNamespaced, nil\n\t}\n\treturn false, errors.NewNotFound(schema.GroupResource{Group: gk.Group}, \"\")\n}", "func (ck *clusterKinds) namespaced(kind string) bool {\n\tck.update()\n\treturn ck.isNamespaced[kind]\n}", "func TestNamespace2(t *testing.T) {\n\ttestName := \"TestNamespace2\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\t\"Application\": true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\tnsListkAppNavConfigMap,\n\t\tCrdApplication,\n\t\tns1AnnoApplication,\n\t\tns1Service,\n\t\tns1Deployment,\n\t\tns2Service,\n\t\tns2Deployment,\n\t\tns3Service,\n\t\tns3Deployment,\n\t\tns4Service,\n\t\tns4Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: only ns1 and ns2 resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\n\titeration0IDs[7].expectedStatus = NoStatus // ns3 Service\n\titeration0IDs[8].expectedStatus = NoStatus\n\titeration0IDs[9].expectedStatus = NoStatus // ns4 Service\n\titeration0IDs[10].expectedStatus = NoStatus\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t/* iteration 1: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// run all the test actions\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (m *MockKubeCoreCache) NamespacedServiceLister(arg0 string) cache.ServiceLister {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespacedServiceLister\", arg0)\n\tret0, _ := ret[0].(cache.ServiceLister)\n\treturn ret0\n}", "func (m *MockKeepers) NamespaceKeeper() core.NamespaceKeeper {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceKeeper\")\n\tret0, _ := ret[0].(core.NamespaceKeeper)\n\treturn ret0\n}", "func TestComponentNamespaces(t *testing.T) {\n\ttestName := \"TestComponentNamespaces\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\tAPPLICATION: true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\t/* 0 */ KappnavConfigFile,\n\t\t/* 1 */ CrdApplication,\n\t\t/* 2 */ ns1Service,\n\t\t/* 3 */ ns1Deployment,\n\t\t/* 4 */ ns1App,\n\t\t/* 5 */ ns2Service,\n\t\t/* 6 */ ns2Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: only namespace 1 resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\titeration0IDs[5].expectedStatus = NoStatus // don't expect status to be checked for namespace 2\n\titeration0IDs[6].expectedStatus = NoStatus // don't expect status to be checked for namespace 2\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t// iteration 1: add annotation \"kappnav.components.namespaces\" : \"ns2\"\n\tarrayLength := len(iteration0IDs)\n\tvar iteration1IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration1IDs, iteration0IDs)\n\titeration1IDs[4].fileName = ns1aApp\n\titeration1IDs[4].expectedStatus = Normal\n\titeration1IDs[5].expectedStatus = Normal\n\titeration1IDs[6].expectedStatus = Normal\n\ttestActions.addIteration(iteration1IDs, emptyIDs)\n\n\t// iteration 2: Change the other namespace resource to Warning\n\tvar iteration2IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration2IDs, iteration1IDs)\n\titeration2IDs[4].expectedStatus = warning\n\titeration2IDs[5].expectedStatus = warning\n\ttestActions.addIteration(iteration2IDs, emptyIDs)\n\n\t// iteration 3: Change ns1 namespace resource to Problem\n\tvar iteration3IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration3IDs, iteration2IDs)\n\titeration3IDs[2].expectedStatus = problem\n\titeration3IDs[4].expectedStatus = problem\n\ttestActions.addIteration(iteration3IDs, emptyIDs)\n\n\t// iteration 4: Remove problem. Back to Warning.\n\tvar iteration4IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration4IDs, iteration3IDs)\n\titeration4IDs[2].expectedStatus = Normal\n\titeration4IDs[4].expectedStatus = warning\n\ttestActions.addIteration(iteration4IDs, emptyIDs)\n\n\t// iteration 5: Remove Warning . Back to Normal\n\tvar iteration5IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration5IDs, iteration4IDs)\n\titeration4IDs[4].expectedStatus = Normal\n\titeration4IDs[5].expectedStatus = Normal\n\ttestActions.addIteration(iteration5IDs, emptyIDs)\n\n\t/* clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// make all trasition of testAction\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (c *client) IsObjectNamespaced(obj runtime.Object) (bool, error) {\n\treturn apiutil.IsObjectNamespaced(obj, c.scheme, c.mapper)\n}", "func (m *MockqueueTaskInfo) GetNamespaceId() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetNamespaceId\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func getNamespace(deploymentID string, objectMeta metav1.ObjectMeta) (string, bool) {\n\tvar isProvided bool\n\tvar namespace string\n\tvar providedNamespace string\n\tif &objectMeta != nil {\n\t\tprovidedNamespace = objectMeta.Namespace\n\t}\n\n\tif providedNamespace != \"\" {\n\t\tnamespace = providedNamespace\n\t\tisProvided = true\n\t} else {\n\t\tnamespace, _ = defaultNamespace(deploymentID)\n\t}\n\n\treturn namespace, isProvided\n}", "func (_m *TaskExecutionMetadata) GetNamespace() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (m *MockStore) ReadNamespaces() (*Namespaces, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ReadNamespaces\")\n\tret0, _ := ret[0].(*Namespaces)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockCacheOptions) NamespaceDetails() []NamespaceDetails {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceDetails\")\n\tret0, _ := ret[0].([]NamespaceDetails)\n\treturn ret0\n}", "func (m *MockqueueTask) GetNamespaceId() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetNamespaceId\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m Metadata) HasNamespace() bool {\n\treturn m2o(m).Get(\"namespace\").IsStr()\n}", "func createTestNamespaceWithSecret(logger *zap.Logger, name string, secret string) (*Namespace, error) {\n\treturn NewNamespace(logger, name, name, name, secret, 0)\n}", "func (_m *Factory) Namespace() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func NamespaceForTest(t *testing.T, kubectl *kubectlContext, rl corev1.ResourceList) (string, func()) {\n\ttestName := t.Name()\n\tname := testName\n\tname = strings.ReplaceAll(name, \"_\", \"-\")\n\tname = strings.ReplaceAll(name, \"/\", \"-\")\n\tname = strings.ToLower(name)\n\tlabel := fmt.Sprintf(\"%s=%s\", testNameLabelKey, name)\n\trq := &corev1.ResourceQuota{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"ResourceQuota\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"compute-resources\",\n\t\t\tNamespace: name,\n\t\t},\n\t\tSpec: corev1.ResourceQuotaSpec{\n\t\t\tHard: rl.DeepCopy(),\n\t\t},\n\t}\n\tcleanup := func() {\n\t\tkubectl := kubectl.WithDefaultNamespace(name)\n\t\tt.Logf(\"Saving logs for namespace: %s\", name)\n\t\tkubectl.ClusterInfoDump(path.Join(*fOutputDirectory, name), name)\n\t\tt.Logf(\"Evicting all pods from namespace: %s\", name)\n\t\trq.Spec.Hard[corev1.ResourcePods] = resource.MustParse(\"0\")\n\t\terr := kubectl.ApplyObject(rq)\n\t\trequire.NoError(t, err)\n\t\terr = kubectl.Delete(\"pods\", \"--all\", \"--wait=false\")\n\t\trequire.NoError(t, err)\n\t}\n\tt.Log(\"Creating new namespace for test\")\n\tout, err := kubectl.Create(\"namespace\", name)\n\trequire.NoError(t, err, out)\n\n\tt.Log(\"Labelling namespace\")\n\tout, err = kubectl.Label(\"namespace\", name, label)\n\trequire.NoError(t, err, out)\n\n\tt.Log(\"Adding resource quota\")\n\tkubectl = kubectl.WithDefaultNamespace(name)\n\terr = kubectl.ApplyObject(rq)\n\trequire.NoError(t, err)\n\n\treturn name, cleanup\n}", "func (m *MockConfig) Namespace() string {\n\targs := m.Called()\n\treturn args.String(0)\n}", "func TestDefaultNamespace(t *testing.T) {\n\tcommon.CreateFile(tmpProp)\n\tcommon.WriteFile(tmpProp, []string{\"APIHOST=xyz\"})\n\n\tos.Setenv(\"WSK_CONFIG_FILE\", tmpProp)\n\tassert.Equal(t, os.Getenv(\"WSK_CONFIG_FILE\"), tmpProp, \"The environment variable WSK_CONFIG_FILE has not been set.\")\n\n\tstdout, err := wsk.RunCommand(\"property\", \"get\", \"-i\", \"--namespace\")\n\tassert.Equal(t, nil, err, \"The command property get -i --namespace failed to run.\")\n\tassert.Contains(t, common.RemoveRedundantSpaces(string(stdout)), common.PropDisplayNamespace+\" _\",\n\t\t\"The output of the command does not contain \"+common.PropDisplayCLIVersion+\" _\")\n\tcommon.DeleteFile(tmpProp)\n}", "func TestGetNamespaceAllNamespacesSet(t *testing.T) {\n\ttestCmd := testCommandGenerator(true)\n\texpectedNamespace := \"\"\n\tsampleNamespace := \"test1\"\n\n\t// Test both variants of the \"all namespaces\" flag\n\tfor _, arg := range []string{\"--all-namespaces\", \"-A\"} {\n\t\ttestCmd.SetArgs([]string{\"--namespace\", sampleNamespace, arg})\n\t\ttestCmd.Execute()\n\t\tkp := &KnParams{fixedCurrentNamespace: FakeNamespace}\n\t\tactualNamespace, err := kp.GetNamespace(testCmd)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif actualNamespace != expectedNamespace {\n\t\t\tt.Fatalf(\"Incorrect namespace retrieved: %v, expected: %v\", actualNamespace, expectedNamespace)\n\t\t}\n\t}\n\n}", "func (m *MockKubeCoreCache) NamespacedSecretLister(arg0 string) cache.SecretLister {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespacedSecretLister\", arg0)\n\tret0, _ := ret[0].(cache.SecretLister)\n\treturn ret0\n}", "func TestMakeNamespaceDependency(t *testing.T) {\n\tfoo := Foo{}\n\tt.Run(`Foo.Bare`, func(t *testing.T) {\n\t\td, err := makeDependency(foo.Bare)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfd, ok := d.(targetDep)\n\t\tif !ok {\n\t\t\tt.Errorf(`expected makeDependency to produce a targetDep`)\n\t\t}\n\t\tif string(fd) != name(foo.Bare) {\n\t\t\tt.Errorf(`expected name to match target`)\n\t\t}\n\t\tif fd.getRun().fn == nil {\n\t\t\tt.Errorf(`expected makeDependency to specify fn`)\n\t\t}\n\t})\n\tt.Run(`Foo.BareCtx`, func(t *testing.T) {\n\t\td, err := makeDependency(foo.BareCtx)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfd, ok := d.(targetDep)\n\t\tif !ok {\n\t\t\tt.Errorf(`expected makeDependency to produce a targetDep`)\n\t\t}\n\t\tif string(fd) != name(foo.BareCtx) {\n\t\t\tt.Errorf(`expected name to match target`)\n\t\t}\n\t\tif fd.getRun().fn == nil {\n\t\t\tt.Errorf(`expected makeDependency to specify fn`)\n\t\t}\n\t})\n\tt.Run(`Foo.Error`, func(t *testing.T) {\n\t\td, err := makeDependency(foo.Error)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfd, ok := d.(targetDep)\n\t\tif !ok {\n\t\t\tt.Errorf(`expected makeDependency to produce a targetDep`)\n\t\t}\n\t\tif string(fd) != name(foo.Error) {\n\t\t\tt.Errorf(`expected name to match target`)\n\t\t}\n\t\tif fd.getRun().fn == nil {\n\t\t\tt.Errorf(`expected makeDependency to specify fn`)\n\t\t}\n\t})\n\tt.Run(`Foo.CtxError`, func(t *testing.T) {\n\t\td, err := makeDependency(foo.CtxError)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfd, ok := d.(targetDep)\n\t\tif !ok {\n\t\t\tt.Errorf(`expected makeDependency to produce a targetDep`)\n\t\t}\n\t\tif string(fd) != name(foo.CtxError) {\n\t\t\tt.Errorf(`expected name to match target`)\n\t\t}\n\t})\n}", "func TestToUnstructWithNamespaceAddsNamespace(t *testing.T) {\n\tfor _, example := range []string{exampleResourceWithLabels, exampleResource} {\n\t\tb := []byte(example)\n\t\tnamespace := \"foo\"\n\t\tgot := ToUnstructWithNamespace(t, b, namespace)\n\t\tif got.GetNamespace() != namespace {\n\t\t\tt.Errorf(\"ToUnstructWithNamespace(%v) namespace not set: got '%v', want '%v'\", example, got.GetNamespace(), namespace)\n\t\t}\n\t\tlabels := got.GetLabels()\n\t\tif labels[\"cnrm-test\"] != \"true\" {\n\t\t\tt.Errorf(\"ToUnstructWithNamespace(%v) label cnrm-test not set: got '%v', want '%v'\", example, labels[\"cnrm-test\"], \"true\")\n\t\t}\n\t}\n}", "func createTestingNS(t *testing.T, baseName string, c clientset.Interface) (*v1.Namespace, error) {\n\t// We don't use ObjectMeta.GenerateName feature, as in case of API call\n\t// failure we don't know whether the namespace was created and what is its\n\t// name.\n\tname := fmt.Sprintf(\"%v-%v\", baseName, strconv.Itoa(rand.Intn(10000)))\n\n\tnamespaceObj := &v1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: \"\",\n\t\t},\n\t\tStatus: v1.NamespaceStatus{},\n\t}\n\t// Be robust about making the namespace creation call.\n\tvar got *v1.Namespace\n\tif err := wait.PollImmediate(nsCreationPollInterval, 30*time.Second, func() (bool, error) {\n\t\tvar err error\n\t\tgot, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tif apierrors.IsAlreadyExists(err) {\n\t\t\t\t// regenerate on conflict\n\t\t\t\tt.Logf(\"Namespace name %q was already taken, generate a new name and retry\", namespaceObj.Name)\n\t\t\t\tnamespaceObj.Name = fmt.Sprintf(\"%v-%v\", baseName, strconv.Itoa(rand.Intn(10000)))\n\t\t\t} else {\n\t\t\t\tt.Logf(\"Unexpected error while creating namespace: %v\", err)\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn got, nil\n}", "func (s *server) namespaceExists(namespace string) error {\n\ts.logger.WithField(\"namespace\", namespace).Info(\"Checking existence of namespace.\")\n\n\tif _, err := s.kubeClient.CoreV1().Namespaces().Get(s.ctx, namespace, metav1.GetOptions{}); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\ts.logger.WithField(\"namespace\", namespace).Info(\"Namespace exists\")\n\treturn nil\n}", "func (m *MockQueue) FailoverNamespace(namespaceID string) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"FailoverNamespace\", namespaceID)\n}", "func (m *MockDocumentHandler) Namespace() string {\n\treturn m.namespace\n}", "func createTestNamespace(clientset kubernetes.Interface, prefix string) (string, error) {\n\ttemplate := &v1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: prefix,\n\t\t},\n\t}\n\tnamespace, err := clientset.CoreV1().Namespaces().Create(template)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create a namespace (error: %v)\", err)\n\t}\n\n\tlog.Infof(\"namespace %v is created\", namespace.GetName())\n\n\t// Create Role\n\terr = createIstioCARole(clientset, namespace.GetName())\n\tif err != nil {\n\t\t_ = deleteTestNamespace(clientset, namespace.GetName())\n\t\treturn \"\", fmt.Errorf(\"failed to create a role (error: %v)\", err)\n\t}\n\n\t// Create RoleBinding\n\terr = createIstioCARoleBinding(clientset, namespace.GetName())\n\tif err != nil {\n\t\t_ = deleteTestNamespace(clientset, namespace.GetName())\n\t\treturn \"\", fmt.Errorf(\"failed to create a rolebinding (error: %v)\", err)\n\t}\n\n\treturn namespace.GetName(), nil\n}", "func (m *MockCache) InfoFilesForNamespace(ns namespace.Metadata) (InfoFileResultsPerShard, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InfoFilesForNamespace\", ns)\n\tret0, _ := ret[0].(InfoFileResultsPerShard)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestGetNamespaceDefault(t *testing.T) {\n\ttestCmd := testCommandGenerator(true)\n\texpectedNamespace := \"current\"\n\ttestCmd.Execute()\n\tkp := &KnParams{fixedCurrentNamespace: FakeNamespace}\n\tactualNamespace, err := kp.GetNamespace(testCmd)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif actualNamespace != expectedNamespace {\n\t\tt.Fatalf(\"Incorrect namespace retrieved: %v, expected: %v\", actualNamespace, expectedNamespace)\n\t}\n}", "func (n *Name) IsFullyQualified() bool {\n\treturn strings.HasPrefix(n.Value, `\\`)\n}", "func (m *MockContext) GetNamespaceCache() cache.NamespaceCache {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetNamespaceCache\")\n\tret0, _ := ret[0].(cache.NamespaceCache)\n\treturn ret0\n}", "func Test_ServiceInstancePath_HasDirectParent_fail2(t *testing.T) {\n\n\t// Setup the DB for the UT environment\n\tdir, db, err := utsetup()\n\tif err != nil {\n\t\tt.Errorf(\"Error setting up UT DB: %v\", err)\n\t}\n\n\tdefer cleanTestDir(dir)\n\n\t// Setup initial variable values\n\tparentURL := \"url1\"\n\tparentOrg := \"myorg\"\n\tparentVersion := \"1.0.0\"\n\tchildURL := \"url2\"\n\tchildOrg := \"childorg\"\n\tchildVersion := \"2.0.0\"\n\tchild2URL := \"url3\"\n\tchild2Org := \"child2org\"\n\tchild2Version := \"3.0.0\"\n\n\t// Establish the dependency path objects\n\tparent := NewServiceInstancePathElement(parentURL, parentOrg, parentVersion)\n\tchild := NewServiceInstancePathElement(childURL, childOrg, childVersion)\n\tchild2 := NewServiceInstancePathElement(child2URL, child2Org, child2Version)\n\n\tdepPath := []ServiceInstancePathElement{*parent, *child, *child2}\n\t// Create the test microservice instance to represent the child\n\tif msi, err := NewMicroserviceInstance(db, child2URL, child2Org, child2Version, \"1234\", depPath, false); err != nil {\n\t\tt.Errorf(\"Error creating instance: %v\", err)\n\t} else if msi.HasDirectParent(parent) {\n\t\tt.Errorf(\"Child %v does not have direct parent: %v\", child2, depPath)\n\t}\n}", "func doIt(id resid.ResId, fs *config.FieldSpec) bool {\n\treturn fs.Path != metaNamespace ||\n\t\t(fs.Path == metaNamespace && id.IsNamespaceableKind())\n}", "func ValidNamespace(ctx Context, resource *ObjectMeta) bool {\n\tns, ok := NamespaceFrom(ctx)\n\tif len(resource.Namespace) == 0 {\n\t\tresource.Namespace = ns\n\t}\n\treturn ns == resource.Namespace && ok\n}", "func ServiceName(n echo.NamespacedName) Matcher {\n\treturn func(i echo.Instance) bool {\n\t\treturn n == i.NamespacedName()\n\t}\n}", "func skipTestNamespaceCustomization() bool {\n\ttestName := ginkgo.CurrentSpecReport().FullText()\n\treturn strings.Contains(testName, \"should always delete fast\") || strings.Contains(testName, \"should delete fast enough\")\n}", "func Test_ServiceInstancePath_HasDirectParent_simple(t *testing.T) {\n\n\t// Setup the DB for the UT environment\n\tdir, db, err := utsetup()\n\tif err != nil {\n\t\tt.Errorf(\"Error setting up UT DB: %v\", err)\n\t}\n\n\tdefer cleanTestDir(dir)\n\n\t// Setup initial variable values\n\tparentURL := \"url1\"\n\tparentVersion := \"1.0.0\"\n\tparentOrg := \"myorg\"\n\tchildURL := \"url2\"\n\tchildOrg := \"childorg\"\n\tchildVersion := \"2.0.0\"\n\n\t// Establish the dependency path objects\n\tparent := NewServiceInstancePathElement(parentURL, parentOrg, parentVersion)\n\tchild := NewServiceInstancePathElement(childURL, childOrg, childVersion)\n\n\tdepPath := []ServiceInstancePathElement{*parent, *child}\n\t// Create the test microservice instance to represent the child\n\tif msi, err := NewMicroserviceInstance(db, childURL, childOrg, childVersion, \"1234\", depPath, false); err != nil {\n\t\tt.Errorf(\"Error creating instance: %v\", err)\n\t} else if !msi.HasDirectParent(parent) {\n\t\tt.Errorf(\"Child %v has direct parent: %v\", child, depPath)\n\t}\n}", "func TestGetLeastPopulatedNamespace(t *testing.T) {\n\n\t// Test Data\n\tnamespaceName1 := \"TestNamespaceName1\"\n\tnamespaceName2 := \"TestNamespaceName2\"\n\tnamespaceName3 := \"TestNamespaceName3\"\n\tnamespaceName4 := \"TestNamespaceName4\"\n\tnamespaceName5 := \"TestNamespaceName5\"\n\tnamespaceCount1 := 4\n\tnamespaceCount2 := 2\n\tnamespaceCount3 := 0\n\tnamespaceCount4 := 2\n\tnamespaceCount5 := 1\n\n\t// Create A Mock HubManager\n\tmockHubManager := &MockHubManager{}\n\n\t// Replace The NewHubManagerFromConnectionString Wrapper To Provide Mock Implementation & Defer Reset\n\tnewHubManagerFromConnectionStringWrapperPlaceholder := NewHubManagerFromConnectionStringWrapper\n\tNewHubManagerFromConnectionStringWrapper = func(connectionString string) (managerInterface HubManagerInterface, e error) {\n\t\treturn mockHubManager, nil\n\t}\n\tdefer func() { NewHubManagerFromConnectionStringWrapper = newHubManagerFromConnectionStringWrapperPlaceholder }()\n\n\t// Create A Test Logger\n\tlogger := logtesting.TestLogger(t).Desugar()\n\n\t// Create The Cache's Namespace Map\n\tnamespaceMap := make(map[string]*Namespace)\n\tnamespaceMap[namespaceName1], _ = createTestNamespaceWithCount(logger, namespaceName1, namespaceCount1)\n\tnamespaceMap[namespaceName2], _ = createTestNamespaceWithCount(logger, namespaceName2, namespaceCount2)\n\tnamespaceMap[namespaceName3], _ = createTestNamespaceWithCount(logger, namespaceName3, namespaceCount3)\n\tnamespaceMap[namespaceName4], _ = createTestNamespaceWithCount(logger, namespaceName4, namespaceCount4)\n\tnamespaceMap[namespaceName5], _ = createTestNamespaceWithCount(logger, namespaceName5, namespaceCount5)\n\n\t// Create A Cache To Test\n\tcache := &Cache{\n\t\tlogger: logger,\n\t\tnamespaceMap: namespaceMap,\n\t}\n\n\t// Perform The Test\n\tnamespace := cache.GetLeastPopulatedNamespace()\n\n\t// Verify Results\n\tassert.NotNil(t, namespace)\n\tassert.Equal(t, namespaceName3, namespace.Name)\n\tassert.Equal(t, namespaceCount3, namespace.Count)\n}", "func (r Resource) namespaceExists(namespace string, response *restful.Response) bool {\n\t_, err := r.K8sClient.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})\n\tif err != nil {\n\t\terrorMessage := fmt.Sprintf(\"error: namespace does not exist: '%s'.\", namespace)\n\t\tutils.RespondMessageAndLogError(response, err, errorMessage, http.StatusBadRequest)\n\t\treturn false\n\t}\n\treturn true\n}", "func TestImplementationsExist(srcPrefix, dstPrefix string) error {\n\tswitch srcPrefix {\n\tcase \"k8s\":\n\tcase \"s3\":\n\tcase \"abs\":\n\tcase \"gcs\":\n\tdefault:\n\t\treturn fmt.Errorf(srcPrefix + \" not implemented\")\n\t}\n\n\tswitch dstPrefix {\n\tcase \"k8s\":\n\tcase \"s3\":\n\tcase \"abs\":\n\tcase \"gcs\":\n\tdefault:\n\t\treturn fmt.Errorf(dstPrefix + \" not implemented\")\n\t}\n\n\treturn nil\n}", "func Namespace(featureName FeatureName, componentName ComponentName, controlPlaneSpec *v1alpha2.IstioControlPlaneSpec) (string, error) {\n\tdefaultNamespaceI, found, err := tpath.GetFromStructPath(controlPlaneSpec, \"DefaultNamespace\")\n\tif !found {\n\t\treturn \"\", fmt.Errorf(\"can't find any setting for defaultNamespace for feature=%s, component=%s\", featureName, componentName)\n\t}\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error in Namepsace for feature=%s, component=%s: %s\", featureName, componentName, err)\n\n\t}\n\tdefaultNamespace, ok := defaultNamespaceI.(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"defaultNamespace has bad type %T, expect string\", defaultNamespaceI)\n\t}\n\tif defaultNamespace == \"\" {\n\t\treturn \"\", fmt.Errorf(\"defaultNamespace must be set\")\n\t}\n\n\tfeatureNamespace := defaultNamespace\n\tfeatureNodeI, found, err := tpath.GetFromStructPath(controlPlaneSpec, string(featureName)+\".Components.Namespace\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error in Namepsace GetFromStructPath featureNamespace for feature=%s, component=%s: %s\", featureName, componentName, err)\n\t}\n\tif found && featureNodeI != nil {\n\t\tfeatureNamespace, ok = featureNodeI.(string)\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"feature %s namespace has bad type %T, expect string\", featureName, featureNodeI)\n\t\t}\n\t\tif featureNamespace == \"\" {\n\t\t\tfeatureNamespace = defaultNamespace\n\t\t}\n\t}\n\n\tcomponentNodeI, found, err := tpath.GetFromStructPath(controlPlaneSpec, string(featureName)+\".Components.\"+string(componentName)+\".Namespace\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error in Namepsace GetFromStructPath componentNamespace for feature=%s, component=%s: %s\", featureName, componentName, err)\n\t}\n\tif !found {\n\t\treturn featureNamespace, nil\n\t}\n\tif componentNodeI == nil {\n\t\treturn featureNamespace, nil\n\t}\n\tcomponentNamespace, ok := componentNodeI.(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"component %s enabled has bad type %T, expect string\", componentName, componentNodeI)\n\t}\n\tif componentNamespace == \"\" {\n\t\treturn featureNamespace, nil\n\t}\n\treturn componentNamespace, nil\n}", "func (m *MockIDistributedEnforcer) HasNamedGroupingPolicy(arg0 string, arg1 ...interface{}) bool {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0}\n\tfor _, a := range arg1 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"HasNamedGroupingPolicy\", varargs...)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func IsMock(name string) bool {\n\treturn name == Mock\n}", "func IsObjectInOtherNamespace(namespace string) (bool, error) {\n\twatchNamespace, err := k8sutil.GetWatchNamespace()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif namespace != watchNamespace {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (o *InstanceStatusKubernetes) HasNamespace() bool {\n\tif o != nil && o.Namespace != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *MockKubeCoreCache) NamespacedPodLister(arg0 string) cache.PodLister {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespacedPodLister\", arg0)\n\tret0, _ := ret[0].(cache.PodLister)\n\treturn ret0\n}", "func namespace(t *testing.T) (*corev1.Namespace, func()) {\n\tnamespace, ok := namespaces[t.Name()]\n\tif !ok {\n\t\tfmt.Fprintf(os.Stderr, \"Error: test case %s did not have a namespace set up\\n\", t.Name())\n\t\tos.Exit(ExitCodeCantCreateCluster)\n\t}\n\n\tcleanup := func() {\n\t\tassert.NoError(t, clusters.CleanupGeneratedResources(ctx, env.Cluster(), t.Name()))\n\t}\n\n\treturn namespace, cleanup\n}", "func (strategy) NamespaceScoped() bool {\n\treturn true\n}", "func Test_HasPrefix(t *testing.T) {\n\tvalue := \"main.go\"\n\tprefix := \"main\"\n\tif !strings.HasPrefix(value, prefix) {\n\t\tt.Fatalf(\"expected %s to have prefix %s\", value, prefix)\n\t}\n}", "func checkNamespaces(podSpec PodSpecV1, result *Result) {\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-network\"); labelExists {\n\t\tif podSpec.HostNetwork {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostNetworkTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostNetwork to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostNetwork to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostNetwork {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostNetworkTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostNetwork is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-IPC\"); labelExists {\n\t\tif podSpec.HostIPC {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostIPCTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostIPC to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostIPC to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostIPC {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostIPCTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostIPC is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-PID\"); labelExists {\n\t\tif podSpec.HostPID {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostPIDTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostPID to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostPID to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostPID {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostPIDTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostPID is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\treturn\n}", "func Namespaced(t Type, ns string) Type {\n\treturn namespacedWrapper{\n\t\tns: ns,\n\t\tt: t,\n\t}\n}", "func (m *MockClusterScoper) ExtendedLocationName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ExtendedLocationName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestNamespace4(t *testing.T) {\n\ttestName := \"TestNamespace4\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\t\"Application\": true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\tnsNolistkAppNavConfigMap,\n\t\tCrdApplication,\n\t\tns1AnnoApplication,\n\t\tns1Service,\n\t\tns1Deployment,\n\t\tns2Service,\n\t\tns2Deployment,\n\t\tns3Service,\n\t\tns3Deployment,\n\t\tns4Service,\n\t\tns4Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: only ns1, ns2, ns3 resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\n\titeration0IDs[9].expectedStatus = NoStatus // ns4 Service\n\titeration0IDs[10].expectedStatus = NoStatus\n\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t/* iteration 1: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// run all the test actions\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func Test_ServiceInstancePath_HasDirectParent_fail1(t *testing.T) {\n\n\t// Setup the DB for the UT environment\n\tdir, db, err := utsetup()\n\tif err != nil {\n\t\tt.Errorf(\"Error setting up UT DB: %v\", err)\n\t}\n\n\tdefer cleanTestDir(dir)\n\n\t// Setup initial variable values\n\tparentURL := \"url1\"\n\tparentOrg := \"myorg\"\n\tparentVersion := \"1.0.0\"\n\tchildURL := \"url2\"\n\tchildOrg := \"childorg\"\n\tchildVersion := \"2.0.0\"\n\n\t// Establish the dependency path objects\n\tparent := NewServiceInstancePathElement(parentURL, parentOrg, parentVersion)\n\tchild := NewServiceInstancePathElement(childURL, childOrg, childVersion)\n\n\tnotParent := NewServiceInstancePathElement(\"other\", parentOrg, parentVersion)\n\n\tdepPath := []ServiceInstancePathElement{*parent, *child}\n\t// Create the test microservice instance to represent the child\n\tif msi, err := NewMicroserviceInstance(db, childURL, childOrg, childVersion, \"1234\", depPath, false); err != nil {\n\t\tt.Errorf(\"Error creating instance: %v\", err)\n\t} else if msi.HasDirectParent(notParent) {\n\t\tt.Errorf(\"Child %v does not have direct parent: %v\", child, depPath)\n\t}\n}", "func (mock *HarborRepositoryInterfaceMock) GetNamespacedCalls() []struct {\n\tNamespace string\n\tName string\n\tOpts v1.GetOptions\n} {\n\tvar calls []struct {\n\t\tNamespace string\n\t\tName string\n\t\tOpts v1.GetOptions\n\t}\n\tlockHarborRepositoryInterfaceMockGetNamespaced.RLock()\n\tcalls = mock.calls.GetNamespaced\n\tlockHarborRepositoryInterfaceMockGetNamespaced.RUnlock()\n\treturn calls\n}", "func createNamespace(t *testing.T, name string) *corev1.Namespace {\n\tt.Helper()\n\n\tt.Logf(\"Creating namespace %q...\", name)\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}\n\tif err := kclient.Create(context.TODO(), ns); err != nil {\n\t\tt.Fatalf(\"failed to create namespace: %v\", err)\n\t}\n\tt.Cleanup(func() {\n\t\tt.Logf(\"Dumping events in namespace %q...\", name)\n\t\tif t.Failed() {\n\t\t\tdumpEventsInNamespace(t, name)\n\t\t}\n\t\tt.Logf(\"Deleting namespace %q...\", name)\n\t\tif err := kclient.Delete(context.TODO(), ns); err != nil {\n\t\t\tt.Errorf(\"failed to delete namespace %s: %v\", ns.Name, err)\n\t\t}\n\t})\n\n\tsaName := types.NamespacedName{\n\t\tNamespace: name,\n\t\tName: \"default\",\n\t}\n\tt.Logf(\"Waiting for ServiceAccount %s to be provisioned...\", saName)\n\tif err := wait.PollImmediate(1*time.Second, 3*time.Minute, func() (bool, error) {\n\t\tvar sa corev1.ServiceAccount\n\t\tif err := kclient.Get(context.TODO(), saName, &sa); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, s := range sa.Secrets {\n\t\t\tif strings.Contains(s.Name, \"dockercfg\") {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t}); err != nil {\n\t\tt.Fatalf(`Timed out waiting for ServiceAccount %s to be provisioned: %v`, saName, err)\n\t}\n\n\trbName := types.NamespacedName{\n\t\tNamespace: name,\n\t\tName: \"system:image-pullers\",\n\t}\n\tt.Logf(\"Waiting for RoleBinding %s to be created...\", rbName)\n\tif err := wait.PollImmediate(1*time.Second, 3*time.Minute, func() (bool, error) {\n\t\tvar rb rbacv1.RoleBinding\n\t\tif err := kclient.Get(context.TODO(), rbName, &rb); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, nil\n\t}); err != nil {\n\t\tt.Fatalf(`Timed out waiting for RoleBinding \"default\" to be provisioned: %v`, err)\n\t}\n\n\treturn ns\n}", "func createNamespaceSpec(nsName string) *v1.Namespace {\n\tvar namespace = &v1.Namespace{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Namespace\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: nsName,\n\t\t},\n\t}\n\treturn namespace\n}", "func TestPathEmpty(linuxSpec *specs.LinuxSpec, linuxRuntimeSpec *specs.LinuxRuntimeSpec, hostNamespacePath string) (string, error) {\n\t//1. output json file for runc\n\tconfigFile := \"./config.json\"\n\truntimeFile := \"./runtime.json\"\n\terr := configconvert.LinuxSpecToConfig(configFile, linuxSpec)\n\terr = configconvert.LinuxRuntimeToConfig(runtimeFile, linuxRuntimeSpec)\n\tif err != nil {\n\t\tlog.Fatalf(\"write config error, %v\", err)\n\t}\n\n\t//2. get container's pid namespace after executing runc\n\tout, err := adaptor.StartRunc(configFile, runtimeFile)\n\tif err != nil {\n\t\treturn manager.UNSPPORTED, errors.New(string(out) + err.Error())\n\t}\n\tcontainerNs := strings.TrimSuffix(string(out), \"\\n\")\n\tcontainerNs = strings.TrimSpace(containerNs)\n\tif containerNs == \"\" {\n\t\tlog.Fatalf(\"can not find namespace in container.\")\n\t}\n\n\t//3. get host's all pid namespace\n\tcmd := \"readlink \" + hostNamespacePath + \"|sort -u\"\n\thostNs, err := getHostNs(cmd)\n\tif err != nil {\n\t\tlog.Fatalf(\"get host namespace error,%v\\n\", err)\n\t}\n\n\t//4. juge if the container's pid namespace is not in host namespaces\n\tvar result string\n\tif strings.Contains(hostNs, containerNs) {\n\t\tresult = manager.FAILED\n\t} else {\n\t\tresult = manager.PASSED\n\t}\n\n\treturn result, nil\n}", "func (m *MockInterface) ClusterGenevaLoggingNamespace() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ClusterGenevaLoggingNamespace\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func Test_ServiceInstancePath_HasDirectParent_fail3(t *testing.T) {\n\n\t// Setup the DB for the UT environment\n\tdir, db, err := utsetup()\n\tif err != nil {\n\t\tt.Errorf(\"Error setting up UT DB: %v\", err)\n\t}\n\n\tdefer cleanTestDir(dir)\n\n\t// Setup initial variable values\n\tparentURL := \"url1\"\n\tparentOrg := \"myorg\"\n\tparentVersion := \"1.0.0\"\n\tchildURL := \"url2\"\n\tchildOrg := \"childorg2\"\n\tchildVersion := \"2.0.0\"\n\tchild2URL := \"url3\"\n\tchild2Org := \"child2Org\"\n\tchild2Version := \"3.0.0\"\n\n\t// Establish the dependency path objects\n\tparent := NewServiceInstancePathElement(parentURL, parentOrg, parentVersion)\n\tchild := NewServiceInstancePathElement(childURL, childOrg, childVersion)\n\tchild2 := NewServiceInstancePathElement(child2URL, child2Org, child2Version)\n\n\tnotParent := NewServiceInstancePathElement(\"other\", parentOrg, parentVersion)\n\n\tdepPath := []ServiceInstancePathElement{*parent, *child, *child2}\n\tdp2 := []ServiceInstancePathElement{*parent, *child2}\n\n\t// Create the test microservice instance to represent the child\n\tif msi, err := NewMicroserviceInstance(db, child2URL, child2Org, child2Version, \"1234\", depPath, false); err != nil {\n\t\tt.Errorf(\"Error creating instance: %v\", err)\n\t} else if _, err := UpdateMSInstanceAddDependencyPath(db, msi.GetKey(), &dp2); err != nil {\n\t\tt.Errorf(\"Error updating instance: %v\", err)\n\t} else if msi.HasDirectParent(notParent) {\n\t\tt.Errorf(\"Child %v does not have direct parent: %v %v\", child2, depPath, dp2)\n\t}\n}", "func TestEncodeDecodeKey(t *testing.T) {\n\tt.Parallel()\n\n\tns := \"some-namespace\"\n\tdomain := \"example.google.com\"\n\tnn := types.NamespacedName{\n\t\tNamespace: ns,\n\t\tName: domain,\n\t}\n\n\tactualNs, actualDomain, err := cache.SplitMetaNamespaceKey(nn.String())\n\ttestutil.AssertNil(t, \"err\", err)\n\ttestutil.AssertEqual(t, \"namespace\", ns, actualNs)\n\ttestutil.AssertEqual(t, \"domain\", domain, actualDomain)\n}", "func (r Resource) Namespaced() bool {\n\treturn r.namespaced\n}", "func Namespaced(prefix string) Namespace {\n\treturn Namespace{\n\t\tprefix: prefix,\n\t}\n}", "func createNS(ctx context.Context, prefix string) string {\n\tnm := createNSName(prefix)\n\n\t// Create the namespace\n\tns := &corev1.Namespace{}\n\tns.Name = nm\n\tExpect(k8sClient.Create(ctx, ns)).Should(Succeed())\n\n\t// Wait for the Hierarchy singleton to be created\n\tsnm := types.NamespacedName{Namespace: nm, Name: tenancy.Singleton}\n\thier := &tenancy.Hierarchy{}\n\tEventually(func() error {\n\t\treturn k8sClient.Get(ctx, snm, hier)\n\t}).Should(Succeed())\n\n\treturn nm\n}", "func (mock *PodSecurityPolicyTemplateInterfaceMock) GetNamespacedCalls() []struct {\n\tNamespace string\n\tName string\n\tOpts metav1.GetOptions\n} {\n\tvar calls []struct {\n\t\tNamespace string\n\t\tName string\n\t\tOpts metav1.GetOptions\n\t}\n\tlockPodSecurityPolicyTemplateInterfaceMockGetNamespaced.RLock()\n\tcalls = mock.calls.GetNamespaced\n\tlockPodSecurityPolicyTemplateInterfaceMockGetNamespaced.RUnlock()\n\treturn calls\n}", "func AnyServiceName(expected echo.NamespacedNames) Matcher {\n\treturn func(instance echo.Instance) bool {\n\t\tserviceName := instance.NamespacedName()\n\t\tfor _, expectedName := range expected {\n\t\t\tif serviceName == expectedName {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}", "func (c *clientWrapper) lookupNamespace(ns string) string {\n\tif c.isNamespaceAll {\n\t\treturn metav1.NamespaceAll\n\t}\n\treturn ns\n}", "func TestNamespace1(t *testing.T) {\n\ttestName := \"TestNamespace1\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\t\"Application\": true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\tnsListkAppNavConfigMap,\n\t\tCrdApplication,\n\t\tns4NoAnnoApplication,\n\t\tns1Service,\n\t\tns1Deployment,\n\t\tns2Service,\n\t\tns2Deployment,\n\t\tns3Service,\n\t\tns3Deployment,\n\t\tns4Service,\n\t\tns4Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: no resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\n\titeration0IDs[2].expectedStatus = NoStatus // ns4 Application\n\titeration0IDs[3].expectedStatus = NoStatus // ns1 Service\n\titeration0IDs[4].expectedStatus = NoStatus\n\titeration0IDs[5].expectedStatus = NoStatus // ns2 Service\n\titeration0IDs[6].expectedStatus = NoStatus\n\titeration0IDs[7].expectedStatus = NoStatus // ns3 Service\n\titeration0IDs[8].expectedStatus = NoStatus\n\titeration0IDs[9].expectedStatus = NoStatus // ns4 Service\n\titeration0IDs[10].expectedStatus = NoStatus\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t/* iteration 1: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// run all the test actions\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func Test_ServiceInstancePath_HasDirectParent_second(t *testing.T) {\n\n\t// Setup the DB for the UT environment\n\tdir, db, err := utsetup()\n\tif err != nil {\n\t\tt.Errorf(\"Error setting up UT DB: %v\", err)\n\t}\n\n\tdefer cleanTestDir(dir)\n\n\t// Setup initial variable values\n\tparentURL := \"url1\"\n\tparentOrg := \"myorg\"\n\tparentVersion := \"1.0.0\"\n\tchildURL := \"url2\"\n\tchildOrg := \"childorg\"\n\tchildVersion := \"2.0.0\"\n\tchild2URL := \"url3\"\n\tchild2Org := \"childorg3\"\n\tchild2Version := \"3.0.0\"\n\n\t// Establish the dependency path objects\n\tparent := NewServiceInstancePathElement(parentURL, parentOrg, parentVersion)\n\tchild := NewServiceInstancePathElement(childURL, childOrg, childVersion)\n\tchild2 := NewServiceInstancePathElement(child2URL, child2Org, child2Version)\n\n\tdepPath := []ServiceInstancePathElement{*parent, *child, *child2}\n\tdp2 := []ServiceInstancePathElement{*parent, *child2}\n\n\t// Create the test microservice instance to represent the child\n\tif msi, err := NewMicroserviceInstance(db, child2URL, child2Org, child2Version, \"1234\", depPath, false); err != nil {\n\t\tt.Errorf(\"Error creating instance: %v\", err)\n\t} else if newmsi, err := UpdateMSInstanceAddDependencyPath(db, msi.GetKey(), &dp2); err != nil {\n\t\tt.Errorf(\"Error updating instance: %v\", err)\n\t} else if !newmsi.HasDirectParent(parent) {\n\t\tt.Errorf(\"Child %v does have direct parent: %v\", child2, dp2)\n\t}\n}", "func (n *nsSvc) NoHostPathInNamespace(hostName, pathName, namespace string) bool {\n\tres := 0\n\tn.mux.RLock()\n\tsvcPathsPtr := n.NsToSvc[namespace]\n\tn.mux.RUnlock()\n\tsvcPathsPtr.rLock()\n\tfor _, pathSet := range svcPathsPtr.StoPs {\n\t\tfor path := range pathSet.Iter() {\n\t\t\tpathPtr := path.(*Path)\n\t\t\tif pathPtr.GetHostName() == hostName &&\n\t\t\t\tpathPtr.GetPathName() == pathName {\n\t\t\t\tres++\n\t\t\t}\n\t\t}\n\t}\n\tsvcPathsPtr.rUnlock()\n\treturn res == 0\n}", "func (m *MockNamespaceKeeper) Update(name string, upd *state.Namespace) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Update\", name, upd)\n}", "func (m *MockManagedClusterScoper) ExtendedLocationName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ExtendedLocationName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (mock *PersistentVolumeClaimInterfaceMock) GetNamespacedCalls() []struct {\n\tNamespace string\n\tName string\n\tOpts metav1.GetOptions\n} {\n\tvar calls []struct {\n\t\tNamespace string\n\t\tName string\n\t\tOpts metav1.GetOptions\n\t}\n\tlockPersistentVolumeClaimInterfaceMockGetNamespaced.RLock()\n\tcalls = mock.calls.GetNamespaced\n\tlockPersistentVolumeClaimInterfaceMockGetNamespaced.RUnlock()\n\treturn calls\n}", "func (endpointSliceStrategy) NamespaceScoped() bool {\n\treturn true\n}" ]
[ "0.6514077", "0.6317604", "0.6231154", "0.6049468", "0.6036219", "0.59960115", "0.5976806", "0.5969981", "0.59484136", "0.5919721", "0.59055024", "0.5841225", "0.56644887", "0.5634663", "0.55910814", "0.55664366", "0.55596614", "0.55540234", "0.5547801", "0.5525685", "0.5478042", "0.54612863", "0.5458303", "0.5448701", "0.54464835", "0.5408549", "0.5375892", "0.53754115", "0.5353735", "0.5339835", "0.53256226", "0.5322609", "0.5317833", "0.5315759", "0.5301241", "0.5299276", "0.52933794", "0.5269008", "0.5260621", "0.52595186", "0.52589446", "0.5241145", "0.52172935", "0.52015656", "0.52006716", "0.51842415", "0.5163282", "0.51621735", "0.5157037", "0.51549834", "0.5094084", "0.5091907", "0.5071707", "0.5065172", "0.506123", "0.50592184", "0.50565046", "0.5039712", "0.5022447", "0.5019131", "0.5006694", "0.5003681", "0.5003292", "0.5002298", "0.49886057", "0.4987404", "0.4978005", "0.49603254", "0.49494705", "0.49369922", "0.49276617", "0.4922773", "0.492137", "0.49144378", "0.49133477", "0.49096817", "0.4909581", "0.49005485", "0.4899213", "0.4897605", "0.48975736", "0.48974243", "0.48931617", "0.48844838", "0.48831284", "0.48765445", "0.48764697", "0.48717797", "0.48713538", "0.48659486", "0.48603854", "0.48534274", "0.4852818", "0.48489404", "0.48472908", "0.4810335", "0.4810152", "0.48097187", "0.48082075", "0.4801633" ]
0.79193115
0
IsNamespaced indicates an expected call of IsNamespaced
func (mr *MockLoadBalanceMockRecorder) IsNamespaced() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsNamespaced", reflect.TypeOf((*MockLoadBalance)(nil).IsNamespaced)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsNamespaced(u *unstructured.Unstructured) bool {\n\tif u == nil {\n\t\treturn false\n\t}\n\treturn u.GetNamespace() != \"\"\n}", "func isNamespaced(r corev3.Resource) bool {\n\tgr, ok := r.(corev3.GlobalResource)\n\tif !ok {\n\t\treturn true\n\t}\n\treturn !gr.IsGlobalResource()\n}", "func (m *MockLoadBalance) IsNamespaced() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsNamespaced\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (c *clusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) {\n\tif isNamespaced, ok := c.namespacedResources[gk]; ok {\n\t\treturn isNamespaced, nil\n\t}\n\treturn false, errors.NewNotFound(schema.GroupResource{Group: gk.Group}, \"\")\n}", "func (ck *clusterKinds) namespaced(kind string) bool {\n\tck.update()\n\treturn ck.isNamespaced[kind]\n}", "func IsResourceNamespaced(kind string) bool {\n\tswitch kind {\n\tcase \"Namespace\",\n\t\t\"Node\",\n\t\t\"PersistentVolume\",\n\t\t\"PodSecurityPolicy\",\n\t\t\"CertificateSigningRequest\",\n\t\t\"ClusterRoleBinding\",\n\t\t\"ClusterRole\",\n\t\t\"VolumeAttachment\",\n\t\t\"StorageClass\",\n\t\t\"CSIDriver\",\n\t\t\"CSINode\",\n\t\t\"ValidatingWebhookConfiguration\",\n\t\t\"MutatingWebhookConfiguration\",\n\t\t\"CustomResourceDefinition\",\n\t\t\"PriorityClass\",\n\t\t\"RuntimeClass\":\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func isKubeNamespace(ns string) bool {\n\treturn ns == metav1.NamespacePublic || ns == metav1.NamespaceSystem\n}", "func isKubeNamespace(ns string) bool {\n\treturn ns == v1.NamespacePublic || ns == v1.NamespaceSystem\n}", "func (c *client) IsObjectNamespaced(obj runtime.Object) (bool, error) {\n\treturn apiutil.IsObjectNamespaced(obj, c.scheme, c.mapper)\n}", "func isSystemNamespace(fedNamespace, namespace string) bool {\n\tswitch namespace {\n\tcase \"kube-system\", \"kube-public\", \"default\", fedNamespace:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func IsNS(s string) bool {\n\tparts := strings.SplitN(s, \":\", 2)\n\treturn len(parts) > 1 && parts[0] == \"ns\"\n}", "func IsKindNamespace(u *unstructured.Unstructured) bool {\n\tif u == nil {\n\t\treturn false\n\t}\n\tgvk := u.GroupVersionKind()\n\treturn CoreNamespace == gvk.GroupKind()\n}", "func (m Metadata) HasNamespace() bool {\n\treturn m2o(m).Get(\"namespace\").IsStr()\n}", "func (r Resource) Namespaced() bool {\n\treturn r.namespaced\n}", "func ValidNamespace(ctx Context, resource *ObjectMeta) bool {\n\tns, ok := NamespaceFrom(ctx)\n\tif len(resource.Namespace) == 0 {\n\t\tresource.Namespace = ns\n\t}\n\treturn ns == resource.Namespace && ok\n}", "func ValidNamespace(ctx genericapirequest.Context, resource metav1.Object) bool {\n\tns, ok := genericapirequest.NamespaceFrom(ctx)\n\tif len(resource.GetNamespace()) == 0 {\n\t\tresource.SetNamespace(ns)\n\t}\n\treturn ns == resource.GetNamespace() && ok\n}", "func (o *InstanceStatusKubernetes) HasNamespace() bool {\n\tif o != nil && o.Namespace != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsObjectInOtherNamespace(namespace string) (bool, error) {\n\twatchNamespace, err := k8sutil.GetWatchNamespace()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif namespace != watchNamespace {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (a *MeshCheckAppender) namespaceOK(namespace string, namespaceInfo *graph.AppenderNamespaceInfo) bool {\n\tif namespace == namespaceInfo.Namespace {\n\t\treturn true\n\t}\n\tfor _, ns := range a.AccessibleNamespaces {\n\t\tif namespace == ns.Name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestGetNamespaceAllNamespacesNotDefined(t *testing.T) {\n\ttestCmd := testCommandGenerator(false)\n\texpectedNamespace := \"test1\"\n\ttestCmd.SetArgs([]string{\"--namespace\", expectedNamespace})\n\ttestCmd.Execute()\n\tkp := &KnParams{fixedCurrentNamespace: FakeNamespace}\n\tactualNamespace, err := kp.GetNamespace(testCmd)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif actualNamespace != expectedNamespace {\n\t\tt.Fatalf(\"Incorrect namespace retrieved: %v, expected: %v\", actualNamespace, expectedNamespace)\n\t}\n}", "func (n *Name) IsFullyQualified() bool {\n\treturn strings.HasPrefix(n.Value, `\\`)\n}", "func IsIgnoredNamespace(ns string) bool {\n\tfor _, ins := range IgnoredNamespaces {\n\t\tif ns == ins {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsValidNamespace(s string) bool {\n\treturn validNamespace.MatchString(s)\n}", "func IsReservedNamespace(namespace string) bool {\n\treturn namespace == NamespaceKarmadaSystem ||\n\t\tnamespace == NamespaceKarmadaCluster ||\n\t\tstrings.HasPrefix(namespace, ExecutionSpacePrefix)\n}", "func TestNamespacedCommands(t *testing.T) {\n\tconst contextNS = \"from-context\"\n\tconst flagNS = \"from-flag\"\n\tconst allNS = \"\"\n\n\ttestcases := []struct {\n\t\tname string\n\t\tcmd string\n\t\twantNS string\n\t}{\n\t\t{name: \"get instances with flag namespace\", cmd: \"get instances --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"get instances with context namespace\", cmd: \"get instances\", wantNS: contextNS},\n\t\t{name: \"get all instances\", cmd: \"get instances --all-namespaces\", wantNS: allNS},\n\n\t\t{name: \"describe instance with flag namespace\", cmd: \"describe instance NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"describe instance with context namespace\", cmd: \"describe instances NAME\", wantNS: contextNS},\n\n\t\t{name: \"provision with flag namespace\", cmd: \"provision --class CLASS --plan PLAN NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"provision with context namespace\", cmd: \"provision --class CLASS --plan PLAN NAME\", wantNS: contextNS},\n\n\t\t{name: \"deprovision with flag namespace\", cmd: \"deprovision NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"deprovision with context namespace\", cmd: \"deprovision NAME\", wantNS: contextNS},\n\n\t\t{name: \"bind with flag namespace\", cmd: \"bind NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"bind with context namespace\", cmd: \"bind NAME\", wantNS: contextNS},\n\n\t\t{name: \"unbind with flag namespace\", cmd: \"unbind NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"unbind with context namespace\", cmd: \"unbind NAME\", wantNS: contextNS},\n\n\t\t{name: \"get bindings with flag namespace\", cmd: \"get bindings --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"get bindings with context namespace\", cmd: \"get bindings\", wantNS: contextNS},\n\t\t{name: \"get all bindings\", cmd: \"get bindings --all-namespaces\", wantNS: allNS},\n\n\t\t{name: \"describe binding with flag namespace\", cmd: \"describe binding NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"describe binding with context namespace\", cmd: \"describe binding NAME\", wantNS: contextNS},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tfakeClient := fake.NewSimpleClientset()\n\n\t\t\tcxt := newContext()\n\t\t\tcxt.App = &svcat.App{\n\t\t\t\tCurrentNamespace: contextNS,\n\t\t\t\tSDK: &servicecatalog.SDK{ServiceCatalogClient: fakeClient},\n\t\t\t}\n\t\t\tcxt.Output = ioutil.Discard\n\n\t\t\texecuteFakeCommand(t, tc.cmd, cxt, true)\n\n\t\t\tgotNamespace := fakeClient.Actions()[0].GetNamespace()\n\t\t\tif tc.wantNS != gotNamespace {\n\t\t\t\tt.Fatalf(\"the wrong namespace was used. WANT: %q, GOT: %q\", tc.wantNS, gotNamespace)\n\t\t\t}\n\t\t})\n\t}\n}", "func doIt(id resid.ResId, fs *config.FieldSpec) bool {\n\treturn fs.Path != metaNamespace ||\n\t\t(fs.Path == metaNamespace && id.IsNamespaceableKind())\n}", "func TestHasNamespace_NamespaceNotExisting(t *testing.T) {\n\tconst (\n\t\tprojectId = \"test-project-id\"\n\t\tprojectName = \"test-project-name\"\n\t\tclusterId = \"test-cluster-id\"\n\t\tnamespaceName = \"test-namespace\"\n\t)\n\tvar (\n\t\tactualListOpts *types.ListOpts\n\t\tclientConfig = ClientConfig{}\n\t\tnamespace = projectModel.Namespace{\n\t\t\tName: namespaceName,\n\t\t}\n\t\ttestClients = stubs.CreateBackendStubs(t)\n\t)\n\n\tnamespaceOperationsStub := stubs.CreateNamespaceOperationsStub(t)\n\tnamespaceOperationsStub.DoList = func(opts *types.ListOpts) (*clusterClient.NamespaceCollection, error) {\n\t\tactualListOpts = opts\n\t\treturn &clusterClient.NamespaceCollection{\n\t\t\tData: []clusterClient.Namespace{},\n\t\t}, nil\n\n\t}\n\ttestClients.ClusterClient.Namespace = namespaceOperationsStub\n\n\tclient := rancherClient{\n\t\tclientConfig: clientConfig,\n\t\tprojectId: projectId,\n\t\tclusterClient: testClients.ClusterClient,\n\t\tlogger: logrus.WithField(\"test\", true),\n\t}\n\t//Act\n\tresult, err := client.HasNamespace(namespace)\n\n\t//Assert\n\tassert.Ok(t, err)\n\tassert.Equals(t, false, result)\n\tassert.Equals(t, &types.ListOpts{Filters: map[string]interface{}{\"system\": \"false\", \"name\": \"test-namespace\"}}, actualListOpts)\n}", "func (strategy) NamespaceScoped() bool {\n\treturn true\n}", "func namespaceRefersToCluster(namespace string) bool {\n\treturn len(namespace) == 0\n}", "func (m *podMetrics) NamespaceScoped() bool {\n\treturn true\n}", "func (endpointSliceStrategy) NamespaceScoped() bool {\n\treturn true\n}", "func TestHasNamespace_NamespaceExisting(t *testing.T) {\n\tconst (\n\t\tprojectId = \"test-project-id\"\n\t\tprojectName = \"test-project-name\"\n\t\tclusterId = \"test-cluster-id\"\n\t\tnamespaceName = \"test-namespace\"\n\t)\n\tvar (\n\t\tactualListOpts *types.ListOpts\n\t\tclientConfig = ClientConfig{}\n\t\tnamespace = projectModel.Namespace{\n\t\t\tName: namespaceName,\n\t\t}\n\t\ttestClients = stubs.CreateBackendStubs(t)\n\t)\n\n\tnamespaceOperationsStub := stubs.CreateNamespaceOperationsStub(t)\n\tnamespaceOperationsStub.DoList = func(opts *types.ListOpts) (*clusterClient.NamespaceCollection, error) {\n\t\tactualListOpts = opts\n\t\treturn &clusterClient.NamespaceCollection{\n\t\t\tData: []clusterClient.Namespace{\n\t\t\t\tclusterClient.Namespace{\n\t\t\t\t\tName: namespaceName,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\n\t}\n\ttestClients.ClusterClient.Namespace = namespaceOperationsStub\n\n\tclient := rancherClient{\n\t\tclientConfig: clientConfig,\n\t\tprojectId: projectId,\n\t\tclusterClient: testClients.ClusterClient,\n\t\tnamespaceCache: make(map[string]clusterClient.Namespace),\n\t\tlogger: logrus.WithField(\"test\", true),\n\t}\n\t//Act\n\tresult, err := client.HasNamespace(namespace)\n\n\t//Assert\n\tassert.Ok(t, err)\n\tassert.Equals(t, true, result)\n\tassert.Equals(t, &types.ListOpts{Filters: map[string]interface{}{\"system\": \"false\", \"name\": \"test-namespace\"}}, actualListOpts)\n}", "func checkNamespaces(podSpec PodSpecV1, result *Result) {\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-network\"); labelExists {\n\t\tif podSpec.HostNetwork {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostNetworkTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostNetwork to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostNetwork to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostNetwork {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostNetworkTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostNetwork is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-IPC\"); labelExists {\n\t\tif podSpec.HostIPC {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostIPCTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostIPC to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostIPC to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostIPC {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostIPCTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostIPC is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-PID\"); labelExists {\n\t\tif podSpec.HostPID {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostPIDTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostPID to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostPID to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostPID {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostPIDTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostPID is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\treturn\n}", "func isNamespaceCapabilityValid(cap string) bool {\n\tswitch cap {\n\tcase NamespaceCapabilityDeny, NamespaceCapabilityParseJob, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob,\n\t\tNamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs,\n\t\tNamespaceCapabilityReadFS, NamespaceCapabilityAllocLifecycle,\n\t\tNamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec,\n\t\tNamespaceCapabilityCSIReadVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilityCSIListVolume, NamespaceCapabilityCSIMountVolume, NamespaceCapabilityCSIRegisterPlugin,\n\t\tNamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, NamespaceCapabilityReadJobScaling, NamespaceCapabilityScaleJob:\n\t\treturn true\n\t// Separate the enterprise-only capabilities\n\tcase NamespaceCapabilitySentinelOverride, NamespaceCapabilitySubmitRecommendation:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (a *Container) IsSameNamespace(b *Container) bool {\n\treturn a.Name.IsEqualNs(b.Name)\n}", "func (podPresetStrategy) NamespaceScoped() bool {\n\treturn true\n}", "func (r *ProxyingREST) NamespaceScoped() bool {\n\treturn false\n}", "func (c *clientWrapper) isWatchedNamespace(ns string) bool {\n\tif c.isNamespaceAll {\n\t\treturn true\n\t}\n\tfor _, watchedNamespace := range c.watchedNamespaces {\n\t\tif watchedNamespace == ns {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r Resource) namespaceExists(namespace string, response *restful.Response) bool {\n\t_, err := r.K8sClient.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})\n\tif err != nil {\n\t\terrorMessage := fmt.Sprintf(\"error: namespace does not exist: '%s'.\", namespace)\n\t\tutils.RespondMessageAndLogError(response, err, errorMessage, http.StatusBadRequest)\n\t\treturn false\n\t}\n\treturn true\n}", "func getNamespace(deploymentID string, objectMeta metav1.ObjectMeta) (string, bool) {\n\tvar isProvided bool\n\tvar namespace string\n\tvar providedNamespace string\n\tif &objectMeta != nil {\n\t\tprovidedNamespace = objectMeta.Namespace\n\t}\n\n\tif providedNamespace != \"\" {\n\t\tnamespace = providedNamespace\n\t\tisProvided = true\n\t} else {\n\t\tnamespace, _ = defaultNamespace(deploymentID)\n\t}\n\n\treturn namespace, isProvided\n}", "func (mcd *metricsConfig) CheckNamespace(namespace string) bool {\n\tmcd.mux.RLock()\n\tdefer mcd.mux.RUnlock()\n\t// TODO(eddycharly): check we actually need `\"-\"`\n\tif namespace == \"\" || namespace == \"-\" {\n\t\treturn true\n\t}\n\tif slices.Contains(mcd.namespaces.ExcludeNamespaces, namespace) {\n\t\treturn false\n\t}\n\tif len(mcd.namespaces.IncludeNamespaces) == 0 {\n\t\treturn true\n\t}\n\treturn slices.Contains(mcd.namespaces.IncludeNamespaces, namespace)\n}", "func Namespaced(prefix string) Namespace {\n\treturn Namespace{\n\t\tprefix: prefix,\n\t}\n}", "func (s *server) namespaceExists(namespace string) error {\n\ts.logger.WithField(\"namespace\", namespace).Info(\"Checking existence of namespace.\")\n\n\tif _, err := s.kubeClient.CoreV1().Namespaces().Get(s.ctx, namespace, metav1.GetOptions{}); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\ts.logger.WithField(\"namespace\", namespace).Info(\"Namespace exists\")\n\treturn nil\n}", "func TestNamespace2(t *testing.T) {\n\ttestName := \"TestNamespace2\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\t\"Application\": true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\tnsListkAppNavConfigMap,\n\t\tCrdApplication,\n\t\tns1AnnoApplication,\n\t\tns1Service,\n\t\tns1Deployment,\n\t\tns2Service,\n\t\tns2Deployment,\n\t\tns3Service,\n\t\tns3Deployment,\n\t\tns4Service,\n\t\tns4Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: only ns1 and ns2 resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\n\titeration0IDs[7].expectedStatus = NoStatus // ns3 Service\n\titeration0IDs[8].expectedStatus = NoStatus\n\titeration0IDs[9].expectedStatus = NoStatus // ns4 Service\n\titeration0IDs[10].expectedStatus = NoStatus\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t/* iteration 1: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// run all the test actions\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func Namespaced(t Type, ns string) Type {\n\treturn namespacedWrapper{\n\t\tns: ns,\n\t\tt: t,\n\t}\n}", "func (n *nsSvc) NoHostPathInNamespace(hostName, pathName, namespace string) bool {\n\tres := 0\n\tn.mux.RLock()\n\tsvcPathsPtr := n.NsToSvc[namespace]\n\tn.mux.RUnlock()\n\tsvcPathsPtr.rLock()\n\tfor _, pathSet := range svcPathsPtr.StoPs {\n\t\tfor path := range pathSet.Iter() {\n\t\t\tpathPtr := path.(*Path)\n\t\t\tif pathPtr.GetHostName() == hostName &&\n\t\t\t\tpathPtr.GetPathName() == pathName {\n\t\t\t\tres++\n\t\t\t}\n\t\t}\n\t}\n\tsvcPathsPtr.rUnlock()\n\treturn res == 0\n}", "func IsNamespaceNotFoundErr(err error) bool {\n\tse, isStatusError := err.(*errors.StatusError)\n\tif !isStatusError {\n\t\treturn false\n\t}\n\n\treturn errors.IsNotFound(err) && se.Status().Details.Kind == \"namespaces\"\n}", "func (s imageStreamMappingStrategy) NamespaceScoped() bool {\n\treturn true\n}", "func (Strategy) NamespaceScoped() bool {\n\treturn false\n}", "func namespaceIsMonitored(ns string) bool {\n\tfor _, monitorNs := range config.MonitorNamespaces {\n\t\tif strings.HasPrefix(ns, monitorNs) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (r *SearchREST) NamespaceScoped() bool {\n\treturn false\n}", "func assertContainsNamespace(organizations []*server.Organization, expectedOrganization string) {\n\n\tfor _, organziation := range organizations {\n\t\tif organziation.Name == expectedOrganization {\n\t\t\treturn\n\t\t}\n\t}\n\n\tFail(fmt.Sprintf(\"Could not find organziation %s in list returned\", expectedOrganization))\n}", "func TestToUnstructWithNamespaceAddsNamespace(t *testing.T) {\n\tfor _, example := range []string{exampleResourceWithLabels, exampleResource} {\n\t\tb := []byte(example)\n\t\tnamespace := \"foo\"\n\t\tgot := ToUnstructWithNamespace(t, b, namespace)\n\t\tif got.GetNamespace() != namespace {\n\t\t\tt.Errorf(\"ToUnstructWithNamespace(%v) namespace not set: got '%v', want '%v'\", example, got.GetNamespace(), namespace)\n\t\t}\n\t\tlabels := got.GetLabels()\n\t\tif labels[\"cnrm-test\"] != \"true\" {\n\t\t\tt.Errorf(\"ToUnstructWithNamespace(%v) label cnrm-test not set: got '%v', want '%v'\", example, labels[\"cnrm-test\"], \"true\")\n\t\t}\n\t}\n}", "func checkNamespaceExists(namespace item) typeCheck {\n\treturn func(p *parser) {\n\t\tif _, ok := namespaceQuery(p.namespaces).find(namespace.Val); ok {\n\t\t\treturn\n\t\t}\n\t\tp.addErr(namespace, \"namespace %q was not declared\", namespace.Val)\n\t}\n}", "func (accountStrategy) NamespaceScoped() bool {\n return false\n}", "func (l *Logging) QualifiedNamespacedName(name string) string {\n\treturn fmt.Sprintf(\"%s-%s-%s\", l.Spec.ControlNamespace, l.Name, name)\n}", "func (me TxsdTaxType) IsNs() bool { return me.String() == \"NS\" }", "func (a *LogOptions) ValidNamespace() bool {\n\tif len(a.Namespace) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func TestNamespace(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tnode Node\n\t\twant *metadata.NodeMetadata\n\t}{\n\t\t{\n\t\t\tname: \"datasource\",\n\t\t\tnode: &DataSourceBuilder{\n\t\t\t\tName: \"source\",\n\t\t\t\tAlias: \"x\",\n\t\t\t\tPartition: 0,\n\t\t\t\tCardinality: metadata.BoundedFitsInLocalStorage,\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedFitsInLocalStorage,\n\t\t\t\toctosql.NewVariableName(\"\"),\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"x\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"distinct\",\n\t\t\tnode: &Distinct{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"a.event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"a.event_time\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"a\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"orderby\",\n\t\t\tnode: &OrderBy{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"a.event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"a.event_time\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"a\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"filter\",\n\t\t\tnode: &Filter{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\t\t\"event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\tnil,\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\"event_time\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\tnil,\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"groupby - unbounded with event time\",\n\t\t\tnode: &GroupBy{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"a.event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\tnil,\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tFields: []octosql.VariableName{\"a.event_time\", \"a.id\"},\n\t\t\t\tKey: []Expression{NewVariable(\"a.event_time\"), NewVariable(\"a.id\")},\n\t\t\t\tAggregates: []Aggregate{Key, Key},\n\t\t\t\tAs: []octosql.VariableName{\"out_event_time\", \"\"},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"out_event_time\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\tnil,\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"groupby - bounded doesnt fit, no event time\",\n\t\t\tnode: &GroupBy{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\t\t\"a.event_time\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\tnil,\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tFields: []octosql.VariableName{\"b.event_time\", \"a.age\"},\n\t\t\t\tKey: []Expression{NewVariable(\"a.event_time\"), NewVariable(\"b.age\")},\n\t\t\t\tAggregates: []Aggregate{Key, Sum},\n\t\t\t\tAs: []octosql.VariableName{\"out_field\", \"\"},\n\t\t\t},\n\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedFitsInLocalStorage,\n\t\t\t\t\"\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\tnil,\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"requalifier\",\n\t\t\tnode: &Requalifier{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\toctosql.NewVariableName(\"event_field\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\", \"b\", \"c\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tQualifier: \"q\",\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\toctosql.NewVariableName(\"q.event_field\"),\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"q\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"stream join\",\n\t\t\tnode: &StreamJoin{\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\t\toctosql.NewVariableName(\"source_event_time\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\", \"b\", \"c\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tJoined: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedFitsInLocalStorage,\n\t\t\t\t\t\toctosql.NewVariableName(\"joined_event_time\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"c\", \"d\", \"e\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tEventTimeField: \"source_event_time\",\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\toctosql.NewVariableName(\"source_event_time\"),\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"a\", \"b\", \"c\", \"d\", \"e\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"lookup join\",\n\t\t\tnode: &LookupJoin{\n\t\t\t\tsource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\t\t\toctosql.NewVariableName(\"source_event_time\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"a\", \"b\", \"c\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tjoined: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.BoundedFitsInLocalStorage,\n\t\t\t\t\t\toctosql.NewVariableName(\"joined_event_time\"),\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"c\", \"d\", \"e\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.BoundedDoesntFitInLocalStorage,\n\t\t\t\toctosql.NewVariableName(\"source_event_time\"),\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"a\", \"b\", \"c\", \"d\", \"e\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"map test 1 - unqualified star\",\n\t\t\tnode: &Map{\n\t\t\t\tExpressions: []NamedExpression{NewStarExpression(\"\")},\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"x\", \"y\", \"z\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tKeep: false,\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"x\", \"y\", \"z\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"map test 2 - keep = true and qualified star no match\",\n\t\t\tnode: &Map{\n\t\t\t\tExpressions: []NamedExpression{NewStarExpression(\"q\")},\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"x\", \"y\", \"z\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tKeep: true,\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"x\", \"y\", \"z\"}, // don't add q because it's not in source metadata\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"map test 3 - keep = false and qualified star match\",\n\t\t\tnode: &Map{\n\t\t\t\tExpressions: []NamedExpression{NewStarExpression(\"x\"), NewStarExpression(\"y\")},\n\t\t\t\tSource: &StubNode{\n\t\t\t\t\tNodeMetadata: metadata.NewNodeMetadata(\n\t\t\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t\t\t[]string{\"x\", \"y\", \"z\"},\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tKeep: false,\n\t\t\t},\n\t\t\twant: metadata.NewNodeMetadata(\n\t\t\t\tmetadata.Unbounded,\n\t\t\t\t\"\",\n\t\t\t\tmetadata.NewNamespace(\n\t\t\t\t\t[]string{\"x\", \"y\"},\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tt.Run(fmt.Sprint(i), func(t *testing.T) {\n\t\t\tgot := tt.node.Metadata()\n\n\t\t\tareNamespacesEqual := got.Namespace().Equal(tt.want.Namespace())\n\n\t\t\tif got.EventTimeField() != tt.want.EventTimeField() || got.Cardinality() != tt.want.Cardinality() || !areNamespacesEqual {\n\t\t\t\tt.Errorf(\"Metadata() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func splitNamespacedName(nameStr string) types.NamespacedName {\n\tsplitPoint := strings.IndexRune(nameStr, types.Separator)\n\tif splitPoint == -1 {\n\t\treturn types.NamespacedName{Name: nameStr}\n\t}\n\treturn types.NamespacedName{Namespace: nameStr[:splitPoint], Name: nameStr[splitPoint+1:]}\n}", "func ObjectsAreInTheSameNamespace(current, desired client.Object) error {\n\t// NOTE: this should never happen (webhooks prevent it), but checking for extra safety.\n\tif current.GetNamespace() != desired.GetNamespace() {\n\t\treturn errors.Errorf(\"invalid operation: it is not possible to change the namespace of %s/%s from %s to %s\",\n\t\t\tcurrent.GetObjectKind().GroupVersionKind(), current.GetName(), current.GetNamespace(), desired.GetNamespace())\n\t}\n\treturn nil\n}", "func (o *InstanceStatusKubernetes) GetNamespaceOk() (*string, bool) {\n\tif o == nil || o.Namespace == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Namespace, true\n}", "func Namespace(featureName FeatureName, componentName ComponentName, controlPlaneSpec *v1alpha2.IstioControlPlaneSpec) (string, error) {\n\tdefaultNamespaceI, found, err := tpath.GetFromStructPath(controlPlaneSpec, \"DefaultNamespace\")\n\tif !found {\n\t\treturn \"\", fmt.Errorf(\"can't find any setting for defaultNamespace for feature=%s, component=%s\", featureName, componentName)\n\t}\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error in Namepsace for feature=%s, component=%s: %s\", featureName, componentName, err)\n\n\t}\n\tdefaultNamespace, ok := defaultNamespaceI.(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"defaultNamespace has bad type %T, expect string\", defaultNamespaceI)\n\t}\n\tif defaultNamespace == \"\" {\n\t\treturn \"\", fmt.Errorf(\"defaultNamespace must be set\")\n\t}\n\n\tfeatureNamespace := defaultNamespace\n\tfeatureNodeI, found, err := tpath.GetFromStructPath(controlPlaneSpec, string(featureName)+\".Components.Namespace\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error in Namepsace GetFromStructPath featureNamespace for feature=%s, component=%s: %s\", featureName, componentName, err)\n\t}\n\tif found && featureNodeI != nil {\n\t\tfeatureNamespace, ok = featureNodeI.(string)\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"feature %s namespace has bad type %T, expect string\", featureName, featureNodeI)\n\t\t}\n\t\tif featureNamespace == \"\" {\n\t\t\tfeatureNamespace = defaultNamespace\n\t\t}\n\t}\n\n\tcomponentNodeI, found, err := tpath.GetFromStructPath(controlPlaneSpec, string(featureName)+\".Components.\"+string(componentName)+\".Namespace\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error in Namepsace GetFromStructPath componentNamespace for feature=%s, component=%s: %s\", featureName, componentName, err)\n\t}\n\tif !found {\n\t\treturn featureNamespace, nil\n\t}\n\tif componentNodeI == nil {\n\t\treturn featureNamespace, nil\n\t}\n\tcomponentNamespace, ok := componentNodeI.(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"component %s enabled has bad type %T, expect string\", componentName, componentNodeI)\n\t}\n\tif componentNamespace == \"\" {\n\t\treturn featureNamespace, nil\n\t}\n\treturn componentNamespace, nil\n}", "func doesNamespaceContainProtectedLabels(ns *corev1.Namespace) []string {\n\tfoundLabelNames := make([]string, 0)\n\tfor _, label := range protectedLabels {\n\t\tif _, found := ns.ObjectMeta.Labels[label]; found {\n\t\t\tfoundLabelNames = append(foundLabelNames, label)\n\t\t}\n\t}\n\treturn foundLabelNames\n}", "func (sar *ServiceAccountReflector) NewNamespaced(opts *options.NamespacedOpts) manager.NamespacedReflector {\n\tremoteSecrets := opts.RemoteFactory.Core().V1().Secrets()\n\n\t// Regardless of the type of the event, we always enqueue the key corresponding to the pod.\n\t_, err := remoteSecrets.Informer().AddEventHandler(opts.HandlerFactory(RemoteSASecretNamespacedKeyer(opts.LocalNamespace, forge.LiqoNodeName)))\n\tutilruntime.Must(err)\n\n\treturn &NamespacedServiceAccountReflector{\n\t\tNamespacedReflector: generic.NewNamespacedReflector(opts, ServiceAccountReflectorName),\n\n\t\tlocalPods: sar.localPods.Pods(opts.LocalNamespace),\n\t\tremoteSecrets: remoteSecrets.Lister().Secrets(opts.RemoteNamespace),\n\n\t\tlocalSAsClient: opts.LocalClient.CoreV1().ServiceAccounts(opts.LocalNamespace),\n\t\tremoteSecretsClient: opts.RemoteClient.CoreV1().Secrets(opts.RemoteNamespace),\n\t}\n}", "func TestComponentNamespaces(t *testing.T) {\n\ttestName := \"TestComponentNamespaces\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\tAPPLICATION: true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\t/* 0 */ KappnavConfigFile,\n\t\t/* 1 */ CrdApplication,\n\t\t/* 2 */ ns1Service,\n\t\t/* 3 */ ns1Deployment,\n\t\t/* 4 */ ns1App,\n\t\t/* 5 */ ns2Service,\n\t\t/* 6 */ ns2Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: only namespace 1 resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\titeration0IDs[5].expectedStatus = NoStatus // don't expect status to be checked for namespace 2\n\titeration0IDs[6].expectedStatus = NoStatus // don't expect status to be checked for namespace 2\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t// iteration 1: add annotation \"kappnav.components.namespaces\" : \"ns2\"\n\tarrayLength := len(iteration0IDs)\n\tvar iteration1IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration1IDs, iteration0IDs)\n\titeration1IDs[4].fileName = ns1aApp\n\titeration1IDs[4].expectedStatus = Normal\n\titeration1IDs[5].expectedStatus = Normal\n\titeration1IDs[6].expectedStatus = Normal\n\ttestActions.addIteration(iteration1IDs, emptyIDs)\n\n\t// iteration 2: Change the other namespace resource to Warning\n\tvar iteration2IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration2IDs, iteration1IDs)\n\titeration2IDs[4].expectedStatus = warning\n\titeration2IDs[5].expectedStatus = warning\n\ttestActions.addIteration(iteration2IDs, emptyIDs)\n\n\t// iteration 3: Change ns1 namespace resource to Problem\n\tvar iteration3IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration3IDs, iteration2IDs)\n\titeration3IDs[2].expectedStatus = problem\n\titeration3IDs[4].expectedStatus = problem\n\ttestActions.addIteration(iteration3IDs, emptyIDs)\n\n\t// iteration 4: Remove problem. Back to Warning.\n\tvar iteration4IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration4IDs, iteration3IDs)\n\titeration4IDs[2].expectedStatus = Normal\n\titeration4IDs[4].expectedStatus = warning\n\ttestActions.addIteration(iteration4IDs, emptyIDs)\n\n\t// iteration 5: Remove Warning . Back to Normal\n\tvar iteration5IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration5IDs, iteration4IDs)\n\titeration4IDs[4].expectedStatus = Normal\n\titeration4IDs[5].expectedStatus = Normal\n\ttestActions.addIteration(iteration5IDs, emptyIDs)\n\n\t/* clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// make all trasition of testAction\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func isPrivate(ns specs.LinuxNamespaceType, mode string) bool {\n\tswitch ns {\n\tcase specs.IPCNamespace:\n\t\treturn mode == \"private\"\n\tcase specs.NetworkNamespace, specs.PIDNamespace:\n\t\treturn !(isHost(mode) || isContainer(mode))\n\tcase specs.UserNamespace, specs.UTSNamespace:\n\t\treturn !(isHost(mode))\n\t}\n\treturn false\n}", "func isLabelUnique(ctx context.Context, ss do.ServerlessService, label string) (bool, error) {\n\tresp, err := ss.ListNamespaces(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, ns := range resp.Namespaces {\n\t\tif label == ns.Label {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn true, nil\n}", "func Is(name string) bool {\n\treturn strings.HasPrefix(name, Prefix)\n}", "func (_m *MockSeriesIterator) Namespace() ident.ID {\n\tret := _m.ctrl.Call(_m, \"Namespace\")\n\tret0, _ := ret[0].(ident.ID)\n\treturn ret0\n}", "func (s imageRepositoryStrategy) NamespaceScoped() bool {\n\treturn true\n}", "func (me TSAFTPTMovementTaxType) IsNs() bool { return me.String() == \"NS\" }", "func (c *Inventory) ensureNamespace(namespace string, objects []runtime.Object) error {\n\tfor i := range objects {\n\t\tobjMeta, err := meta.Accessor(objects[i])\n\t\tif err != nil {\n\t\t\treturn errors.WrapIfWithDetails(err, \"couldn't get meta data access for object\", \"gvk\", objects[i].GetObjectKind().GroupVersionKind().String())\n\t\t}\n\n\t\tisClusterScoped, err := c.IsClusterScoped(objects[i])\n\t\tif err != nil {\n\t\t\tc.log.Error(err, \"scope check failed\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif !isClusterScoped && objMeta.GetNamespace() == \"\" {\n\t\t\tc.log.V(2).Info(\"setting namespace field for namespace scoped object\", \"gvk\", objects[i].GetObjectKind().GroupVersionKind().String(), \"name\", objMeta.GetName())\n\t\t\tobjMeta.SetNamespace(namespace)\n\t\t}\n\t}\n\treturn nil\n}", "func (self *EasyHandler) Namespaces() []string {\n\treturn []string{\"000001\",\"000002\"};\n}", "func NamespaceIsReady(resource common.ComponentResource) (bool, error) {\n\tvar namespace v1.Namespace\n\tif err := getObject(resource, &namespace, true); err != nil {\n\t\treturn false, err\n\t}\n\n\t// if we have a name that is empty, we know we did not find the object\n\tif namespace.Name == \"\" {\n\t\treturn false, nil\n\t}\n\n\t// if the namespace is terminating, it is not considered ready\n\tif namespace.Status.Phase == v1.NamespaceTerminating {\n\t\treturn false, nil\n\t}\n\n\t// finally, rely on the active field to determine if this namespace is ready\n\treturn namespace.Status.Phase == v1.NamespaceActive, nil\n}", "func IsPod(s string) bool {\n\treturn s == Pod\n}", "func (o *KubernetesAddonDefinitionAllOf) HasDefaultNamespace() bool {\n\tif o != nil && o.DefaultNamespace != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *Roster) MatchesNamespace(namespace string, serverTarget bool) bool {\n\tif serverTarget {\n\t\treturn false\n\t}\n\treturn namespace == rosterNamespace\n}", "func isInteresting(request reconcile.Request) bool {\n\treturn request.Name == kedaControllerResourceName && request.Namespace == kedaControllerResourceNamespace\n}", "func (h *KubernetesHelper) CheckIfNamespaceExists(ctx context.Context, namespace string) error {\n\t_, err := h.clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})\n\treturn err\n}", "func ensureNamespace(ctx context.Context, clientSet kubernetes.Interface, nsName string) error {\n\tns := corev1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: nsName,\n\t\t\tLabels: multiClusterLabels(),\n\t\t},\n\t}\n\t_, err := clientSet.CoreV1().Namespaces().Create(ctx, &ns, metav1.CreateOptions{})\n\tif !errors.IsAlreadyExists(err) && err != nil {\n\t\treturn xerrors.Errorf(\"failed to create namespace %s: %w\", ns.Name, err)\n\t}\n\n\treturn nil\n}", "func (m *Mgr) InterestedInNamespaces() []string {\n\treturn m.ccInfoProvider.Namespaces()\n}", "func (namespace *Namespace) IsValid() bool {\n\treturn (namespace != nil && namespace.ID > 0)\n}", "func (p *Pod) HasServiceName(n string) bool {\n\tfor _, name := range p.GetServiceNames() {\n\t\tif name == n {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isInternal(pkgName string) bool {\n\tcleanedPath := filepath.Clean(pkgName)\n\texplodedPath := strings.Split(cleanedPath, string(os.PathSeparator))\n\n\tfor _, pathPart := range explodedPath {\n\t\tif pathPart == internal {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (q *QuarksStatefulSet) GetNamespacedName() string {\n\treturn fmt.Sprintf(\"%s/%s\", q.Namespace, q.Name)\n}", "func (n *Namespace) Wildcard() bool {\n\treturn n.Scope == \"*\"\n}", "func (c *Client) Namespace(ns string, onlyLeaf bool) ([]string, error) {\n\tvar res []string\n\tfullURL := fmt.Sprintf(\"%s/router/ns?ns=%s&format=list\", c.Addr, ns)\n\tresp, err := http.Get(fullURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode/100 != 2 {\n\t\treturn nil, fmt.Errorf(\"HTTP status error: %d\", resp.Status)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data namespace\n\tif err := json.Unmarshal(body, &data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tres = data.Data\n\n\tif !onlyLeaf {\n\t\ttmp := make(map[string]bool)\n\t\tfor _, leaf := range data.Data {\n\t\t\tarr := strings.SplitAfterN(leaf, \".\", 2)\n\t\t\tif len(arr) > 1 {\n\t\t\t\ttmp[arr[1]] = true\n\t\t\t}\n\t\t}\n\n\t\tfor k := range tmp {\n\t\t\tres = append(res, k)\n\t\t}\n\t}\n\n\treturn res, nil\n}", "func skipTestNamespaceCustomization() bool {\n\ttestName := ginkgo.CurrentSpecReport().FullText()\n\treturn strings.Contains(testName, \"should always delete fast\") || strings.Contains(testName, \"should delete fast enough\")\n}", "func (st *SymTable) IsDeclared(symbol string) bool {\n\tif _, ok := st.Entries[symbol]; ok {\n\t\treturn true\n\t}\n\tif st.Parent == nil {\n\t\treturn false\n\t}\n\treturn st.Parent.IsDeclared(symbol)\n}", "func isAPI(path string) bool {\n\treturn strings.HasPrefix(path, \"/api\")\n}", "func requireNamespace() {\n\tif len(params.NameSpace) == 0 {\n\t\tif len(params.NameSpace) == 0 {\n\t\t\tlog.Error(StrMissingNamespace)\n\t\t\tos.Exit(-1)\n\t\t}\n\t}\n}", "func isConnReady(c kubernetes.Interface) error {\n\t_, err := c.CoreV1().Namespaces().Get(context.TODO(), \"kube-system\", metav1.GetOptions{})\n\treturn err\n}", "func InPodNamespace() (string, error) {\n\t// The path is available in Pods.\n\t// https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/#directly-accessing-the-rest-api\n\tns, err := ioutil.ReadFile(\"/var/run/secrets/kubernetes.io/serviceaccount/namespace\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get namespace in Pod: %w\", err)\n\t}\n\treturn string(ns), nil\n}", "func IsMetaPackage(name string) bool {\n\treturn name == \"std\" || name == \"cmd\" || name == \"all\"\n}", "func (h *hostNamespaces) hostOnlyInNamespace(hostName, namespace string) bool {\n\th.mux.RLock()\n\tdefer h.mux.RUnlock()\n\t_, found := h.HtoN[hostName]\n\tif !found {\n\t\t// returning false here because host might\n\t\t// very well be deleted by another thread before this function\n\t\t// got ran; in which case, return false so caller don't do\n\t\t// anything that might be dangerous\n\t\treturn false\n\t}\n\treturn h.HtoN[hostName].Cardinality() == 1 &&\n\t\th.HtoN[hostName].Contains(namespace)\n\n}", "func Valid(s string, ns LinuxNS) bool {\n\treturn IsPod(s) || IsNS(s) || ns.Valid()\n}", "func IsSpaceRoot(r *Node) bool {\n\tpath := r.InternalPath()\n\tif spaceNameBytes, err := xattr.Get(path, xattrs.SpaceNameAttr); err == nil {\n\t\tif string(spaceNameBytes) != \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func createTestNamespaceWithSecret(logger *zap.Logger, name string, secret string) (*Namespace, error) {\n\treturn NewNamespace(logger, name, name, name, secret, 0)\n}", "func hasNoNamespaceSelector(spec v1alpha1.SyncConfigSpec) bool {\n\tif spec.NamespaceSelector == nil {\n\t\treturn true\n\t}\n\treturn spec.NamespaceSelector.LabelSelector == nil && len(spec.NamespaceSelector.MatchNames) == 0\n}" ]
[ "0.8179649", "0.772474", "0.7090225", "0.6819809", "0.681966", "0.6794537", "0.66282797", "0.65289366", "0.6509803", "0.6343726", "0.619336", "0.6130071", "0.6124255", "0.6038894", "0.60016686", "0.58861005", "0.58305144", "0.5798163", "0.5788891", "0.5735441", "0.57147056", "0.5702245", "0.5679555", "0.5679095", "0.5671072", "0.5670683", "0.5657302", "0.56560564", "0.5615908", "0.5574599", "0.5566699", "0.5525336", "0.54986376", "0.5453613", "0.5447893", "0.5444899", "0.5443284", "0.5438371", "0.5425984", "0.54231614", "0.5408933", "0.54058236", "0.5399742", "0.5360341", "0.5351127", "0.53154296", "0.53042716", "0.5301749", "0.52923894", "0.5278628", "0.5271257", "0.5268633", "0.52611136", "0.5247799", "0.52270347", "0.52245635", "0.52234477", "0.52050585", "0.5202514", "0.51969606", "0.519133", "0.51667047", "0.5166311", "0.5138451", "0.50904125", "0.508682", "0.508457", "0.50751466", "0.503831", "0.50319403", "0.4985939", "0.49758187", "0.4958282", "0.4954234", "0.4953431", "0.4941876", "0.49321437", "0.4920594", "0.49164978", "0.49146482", "0.4913864", "0.4899296", "0.48860863", "0.48849428", "0.48807803", "0.4880692", "0.48801062", "0.4875401", "0.4870684", "0.48647052", "0.48625454", "0.48606223", "0.48516846", "0.4851141", "0.48496488", "0.48486874", "0.48435327", "0.4840146", "0.48388034", "0.4831545" ]
0.7514168
2
EnsureListener mocks base method
func (m *MockLoadBalance) EnsureListener(region string, listener *v1.Listener) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EnsureListener", region, listener) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockAll) Listener() Listener {\n\tret := m.ctrl.Call(m, \"Listener\")\n\tret0, _ := ret[0].(Listener)\n\treturn ret0\n}", "func (m *MockHealthCheck) SetListener(arg0 discovery.LegacyHealthCheckStatsListener, arg1 bool) {\n\tm.ctrl.Call(m, \"SetListener\", arg0, arg1)\n}", "func (m *MockListener) Get(listenerKey api.ListenerKey) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Get\", listenerKey)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockDynamicCertPrivate) AddListener(arg0 dynamiccertificates.Listener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddListener\", arg0)\n}", "func (m *MockListener) Create(listener api.Listener) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Create\", listener)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func newMockListener(endpoint net.Conn) *mockListener {\n \n c := make(chan net.Conn, 1)\n c <- endpoint\n listener := &mockListener{\n connChannel: c,\n serverEndpoint: endpoint,\n }\n return listener\n}", "func (m *MockListener) Modify(listener api.Listener) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Modify\", listener)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalance) EnsureSegmentListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureSegmentListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func MockListener(t *testing.T, address string) {\n\tladdr, err := net.ResolveUDPAddr(\"udp\", address)\n\tif err != nil {\n\t\tt.Fatal(\"Couldn't resolve address\", err)\n\t}\n\n\t_, err = net.ListenUDP(\"udp\", laddr)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't listen to %s: %s\", address, err)\n\t}\n\n\ttime.Sleep(10 * time.Second)\n\treturn\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar memLog base.InMemLogger\n\tmem := vfs.NewMem()\n\trequire.NoError(t, mem.MkdirAll(\"ext\", 0755))\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(t *testing.T, td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tmemLog.Reset()\n\t\t\tlel := MakeLoggingEventListener(&memLog)\n\t\t\tflushBegin, flushEnd := lel.FlushBegin, lel.FlushEnd\n\t\t\tlel.FlushBegin = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushBegin(info)\n\t\t\t}\n\t\t\tlel.FlushEnd = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushEnd(info)\n\t\t\t}\n\t\t\topts := &Options{\n\t\t\t\tFS: vfs.WithLogging(mem, memLog.Infof),\n\t\t\t\tFormatMajorVersion: internalFormatNewest,\n\t\t\t\tEventListener: &lel,\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tL0CompactionThreshold: 10,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t}\n\t\t\t// The table stats collector runs asynchronously and its\n\t\t\t// timing is less predictable. It increments nextJobID, which\n\t\t\t// can make these tests flaky. The TableStatsLoaded event is\n\t\t\t// tested separately in TestTableStats.\n\t\t\topts.private.disableTableStats = true\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tt := time.Now()\n\t\t\td.timeNow = func() time.Time {\n\t\t\t\tt = t.Add(time.Second)\n\t\t\t\treturn t\n\t\t\t}\n\t\t\td.testingAlwaysWaitForCleanup = true\n\t\t\treturn memLog.String()\n\n\t\tcase \"close\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"flush\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"compact\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\"), false); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"checkpoint\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Checkpoint(\"checkpoint\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"disable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\td.mu.Lock()\n\t\t\td.disableFileDeletions()\n\t\t\td.mu.Unlock()\n\t\t\treturn memLog.String()\n\n\t\tcase \"enable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tmemLog.Infof(\"%v\", r)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\td.mu.Lock()\n\t\t\t\tdefer d.mu.Unlock()\n\t\t\t\td.enableFileDeletions()\n\t\t\t}()\n\t\t\td.TestOnlyWaitForCleaning()\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest\":\n\t\t\tmemLog.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest-flushable\":\n\t\t\tmemLog.Reset()\n\n\t\t\t// Prevent flushes during this test to ensure determinism.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = true\n\t\t\td.mu.Unlock()\n\n\t\t\tb := d.NewBatch()\n\t\t\tif err := b.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Apply(b, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\twriteTable := func(name string, key byte) error {\n\t\t\t\tf, err := mem.Create(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t\t})\n\t\t\t\tif err := w.Add(base.MakeInternalKey([]byte{key}, 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttableA, tableB := \"ext/a\", \"ext/b\"\n\t\t\tif err := writeTable(tableA, 'a'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := writeTable(tableB, 'b'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{tableA, tableB}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\n\t\t\t// Re-enable flushes, to allow the subsequent flush to proceed.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = false\n\t\t\td.mu.Unlock()\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"metrics\":\n\t\t\t// The asynchronous loading of table stats can change metrics, so\n\t\t\t// wait for all the tables' stats to be loaded.\n\t\t\td.mu.Lock()\n\t\t\td.waitTableStats()\n\t\t\td.mu.Unlock()\n\n\t\t\treturn d.Metrics().String()\n\n\t\tcase \"sstables\":\n\t\t\tvar buf bytes.Buffer\n\t\t\ttableInfos, _ := d.SSTables()\n\t\t\tfor i, level := range tableInfos {\n\t\t\t\tif len(level) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(&buf, \"%d:\\n\", i)\n\t\t\t\tfor _, m := range level {\n\t\t\t\t\tfmt.Fprintf(&buf, \" %d:[%s-%s]\\n\",\n\t\t\t\t\t\tm.FileNum, m.Smallest.UserKey, m.Largest.UserKey)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (m *MockStream) AddEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddEventListener\", streamEventListener)\n}", "func TestIntegrationNewListener(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short mode...\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*120)\n\tdefer cancel()\n\n\tlistener := NewAmqpConnection(ctx, config)\n\t// Remove topic to ensure each test has a clean topic to work with\n\tdefer deleteSubscription(listener, config)\n\n\tnonce := time.Now().String()\n\tsender, err := listener.CreateAmqpSender(config.SubscribesToEvent)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\terr = sender.Send(ctx, amqp.NewMessage([]byte(nonce)))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstats, err := listener.GetQueueDepth()\n\tdepth := stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 1 {\n\t\tt.Errorf(\"Expected queue depth of 1 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n\n\tamqpMessage, err := listener.Receiver.Receive(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmessage := messaging.NewAmqpMessageWrapper(amqpMessage)\n\n\tgo func() {\n\t\ttime.Sleep(time.Duration(45) * time.Second)\n\t\terr := listener.RenewLocks(ctx, []*amqp.Message{\n\t\t\tamqpMessage,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t//Added to ensure that locks are renewed\n\ttime.Sleep(time.Duration(75) * time.Second)\n\n\terr = message.Accept()\n\tif string(message.Body()) != nonce {\n\t\tt.Errorf(\"value not as expected in message Expected: %s Got: %s\", nonce, message.Body())\n\t}\n\n\tstats, err = listener.GetQueueDepth()\n\tdepth = stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 0 {\n\t\tt.Errorf(\"Expected queue depth of 0 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func Test_App_Listener(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\n\tgo func() {\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tutils.AssertEqual(t, nil, app.Shutdown())\n\t}()\n\n\tln := fasthttputil.NewInmemoryListener()\n\tutils.AssertEqual(t, nil, app.Listener(ln))\n}", "func TestIntegrationNewListener(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short mode...\")\n\t}\n\n\t// pick a random name to prevent previous tests affecting this test\n\tconfig.ModuleName = helpers.RandomName(8)\n\n\trenewEvery := time.Second * 35\n\tprocessingTime := time.Second * 240\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*310)\n\tdefer cancel()\n\n\tlistener := NewAmqpConnection(ctx, config)\n\t// Remove topic to ensure each test has a clean topic to work with\n\tdefer deleteSubscription(listener, config)\n\n\tnonce := time.Now().String()\n\tsender, err := listener.CreateAmqpSender(config.SubscribesToEvent)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\terr = sender.Send(ctx, amqp.NewMessage([]byte(nonce)))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstats, err := listener.GetQueueDepth()\n\tdepth := stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 1 {\n\t\tt.Errorf(\"Expected queue depth of 1 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n\n\tamqpMessage, err := listener.Receiver.Receive(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmessage := messaging.NewAmqpMessageWrapper(amqpMessage)\n\n\t// SUMMARY: Testing message lock renewal. By default SB messages's locks expire after 1min and the message is requeued\n\t// 1. Starts a loop renewing the message lock\n\t// 2. Block for more than 1min\n\t// 3. Accept the message (dequeuing it)\n\t// 4. Check the queue length is 0... if it's not we lost the lock and the message got put back on the queue.\n\trenewContext, cancel := context.WithCancel(ctx)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-renewContext.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(renewEvery)\n\t\t\t\terr := listener.RenewLocks(ctx, []*amqp.Message{\n\t\t\t\t\tamqpMessage,\n\t\t\t\t})\n\n\t\t\t\t// Report the test error if the context hasn't been cancelled.\n\t\t\t\tif err != nil {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-renewContext.Done():\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tt.Error(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\ttime.Sleep(processingTime)\n\tcancel()\n\n\terr = message.Accept()\n\tif string(message.Body()) != nonce {\n\t\tt.Errorf(\"value not as expected in message Expected: %s Got: %s\", nonce, message.Body())\n\t}\n\n\t// wait for the SB stats API to update\n\ttime.Sleep(time.Second * 30)\n\n\tstats, err = listener.GetQueueDepth()\n\tdepth = stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 0 {\n\t\tt.Errorf(\"Expected queue depth of 0 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n}", "func (m *MockStreamEventListener) OnDestroyStream() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnDestroyStream\")\n}", "func TestListener(t *testing.T) {\n\tlistener := &fakeListener{}\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, listener)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\tgo func() {\n\t\treq := <-rt.req\n\t\ttrace := httptrace.ContextClientTrace(req.Context())\n\t\ttrace.GotConn(httptrace.GotConnInfo{\n\t\t\tConn: &fakeConn{\n\t\t\t\tremoteAddr: &net.TCPAddr{\n\t\t\t\t\tIP: net.ParseIP(\"192.0.2.2\"),\n\t\t\t\t\tPort: 443,\n\t\t\t\t}}})\n\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\tw.Write([]byte{0, 0, 8, 9, 10})\n\t\tw.Close()\n\t}()\n\n\tdoh.Query(simpleQueryBytes)\n\ts := listener.summary\n\tif s.Latency < 0 {\n\t\tt.Errorf(\"Negative latency: %f\", s.Latency)\n\t}\n\tif !bytes.Equal(s.Query, simpleQueryBytes) {\n\t\tt.Errorf(\"Wrong query: %v\", s.Query)\n\t}\n\tif !bytes.Equal(s.Response, []byte{0xbe, 0xef, 8, 9, 10}) {\n\t\tt.Errorf(\"Wrong response: %v\", s.Response)\n\t}\n\tif s.Server != \"192.0.2.2\" {\n\t\tt.Errorf(\"Wrong server IP string: %s\", s.Server)\n\t}\n\tif s.Status != Complete {\n\t\tt.Errorf(\"Wrong status: %d\", s.Status)\n\t}\n}", "func (m *MockStream) RemoveEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RemoveEventListener\", streamEventListener)\n}", "func (m *MockStreamConnectionEventListener) OnGoAway() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnGoAway\")\n}", "func (m *MockServerStreamConnectionEventListener) OnGoAway() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnGoAway\")\n}", "func (l *MockListener) Close() error {\n\treturn nil\n}", "func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) {\n\tfs, clientCh, cleanup := setupOverrides()\n\tdefer cleanup()\n\n\t// Create a server option to get notified about serving mode changes. We don't\n\t// do anything other than throwing a log entry here. But this is required,\n\t// since the server code emits a log entry at the default level (which is\n\t// ERROR) if no callback is registered for serving mode changes. Our\n\t// testLogger fails the test if there is any log entry at ERROR level. It does\n\t// provide an ExpectError() method, but that takes a string and it would be\n\t// painful to construct the exact error message expected here. Instead this\n\t// works just fine.\n\tmodeChangeOpt := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) {\n\t\tt.Logf(\"Serving mode for listener %q changed to %q, err: %v\", addr.String(), args.Mode, args.Err)\n\t})\n\tserver := NewGRPCServer(modeChangeOpt)\n\tdefer server.Stop()\n\n\tlis, err := testutils.LocalTCPListener()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.LocalTCPListener() failed: %v\", err)\n\t}\n\n\t// Call Serve() in a goroutine, and push on a channel when Serve returns.\n\tserveDone := testutils.NewChannel()\n\tgo func() {\n\t\tif err := server.Serve(lis); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tserveDone.Send(nil)\n\t}()\n\n\t// Wait for an xdsClient to be created.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := clientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for new xdsClient to be created: %v\", err)\n\t}\n\tclient := c.(*fakeclient.Client)\n\n\t// Wait for a listener watch to be registered on the xdsClient.\n\tname, err := client.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a ListenerWatch: %v\", err)\n\t}\n\twantName := strings.Replace(testServerListenerResourceNameTemplate, \"%s\", lis.Addr().String(), -1)\n\tif name != wantName {\n\t\tt.Fatalf(\"LDS watch registered for name %q, want %q\", name, wantName)\n\t}\n\n\t// Push a good LDS response with security config, and wait for Serve() to be\n\t// invoked on the underlying grpc.Server. Also make sure that certificate\n\t// providers are not created.\n\tfcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{\n\t\tFilterChains: []*v3listenerpb.FilterChain{\n\t\t\t{\n\t\t\t\tTransportSocket: &v3corepb.TransportSocket{\n\t\t\t\t\tName: \"envoy.transport_sockets.tls\",\n\t\t\t\t\tConfigType: &v3corepb.TransportSocket_TypedConfig{\n\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{\n\t\t\t\t\t\t\tCommonTlsContext: &v3tlspb.CommonTlsContext{\n\t\t\t\t\t\t\t\tTlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{\n\t\t\t\t\t\t\t\t\tInstanceName: \"identityPluginInstance\",\n\t\t\t\t\t\t\t\t\tCertificateName: \"identityCertName\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tFilters: []*v3listenerpb.Filter{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"filter-1\",\n\t\t\t\t\t\tConfigType: &v3listenerpb.Filter_TypedConfig{\n\t\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{\n\t\t\t\t\t\t\t\tRouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{\n\t\t\t\t\t\t\t\t\tRouteConfig: &v3routepb.RouteConfiguration{\n\t\t\t\t\t\t\t\t\t\tName: \"routeName\",\n\t\t\t\t\t\t\t\t\t\tVirtualHosts: []*v3routepb.VirtualHost{{\n\t\t\t\t\t\t\t\t\t\t\tDomains: []string{\"lds.target.good:3333\"},\n\t\t\t\t\t\t\t\t\t\t\tRoutes: []*v3routepb.Route{{\n\t\t\t\t\t\t\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{\n\t\t\t\t\t\t\t\t\t\t\t\t\tPathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\tAction: &v3routepb.Route_NonForwardingAction{},\n\t\t\t\t\t\t\t\t\t\t\t}}}}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tHttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\taddr, port := splitHostPort(lis.Addr().String())\n\tclient.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tRouteConfigName: \"routeconfig\",\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: addr,\n\t\t\tPort: port,\n\t\t\tFilterChains: fcm,\n\t\t},\n\t}, nil)\n\tif _, err := fs.serveCh.Receive(ctx); err != nil {\n\t\tt.Fatalf(\"error when waiting for Serve() to be invoked on the grpc.Server\")\n\t}\n\n\t// Make sure the security configuration is not acted upon.\n\tif err := verifyCertProviderNotCreated(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (t *TestStore) RegisterListener(listener database.Listener) {\n}", "func TestStartListener(t *testing.T) {\n\ttimeout := time.Second * 10\n\tr := fixtureReceiver()\n\tmockedLogger, _ := logger.New(\"json\", \"info\")\n\tctx := context.Background()\n\n\t// used to simulate sending a stop signal\n\tctx, cancelFunc := context.WithCancel(ctx)\n\n\t// start receiver\n\twg := sync.WaitGroup{}\n\tstart := make(chan bool, 1)\n\tdefer close(start)\n\twg.Add(1)\n\tgo func(t *testing.T) {\n\t\tdefer wg.Done()\n\t\tstart <- true\n\t\tt.Log(\"starting receiver in goroutine\")\n\t\tif err := r.StartListen(ctx, &testHandler{}, mockedLogger); err != nil {\n\t\t\tt.Errorf(\"error while starting HTTPMessageReceiver: %v\", err)\n\t\t}\n\t\tt.Log(\"receiver goroutine ends here\")\n\t}(t)\n\n\t// wait for goroutine to start\n\t<-start\n\n\t// stop it\n\tcancelFunc()\n\tc := make(chan struct{})\n\tgo func() {\n\t\tdefer close(c)\n\t\twg.Wait()\n\t}()\n\n\tt.Log(\"Waiting for receiver to stop\")\n\tselect {\n\t// receiver shutdown properly\n\tcase <-c:\n\t\tt.Log(\"Waiting for receiver to stop [done]\")\n\t\tbreak\n\t// receiver shutdown in time\n\tcase <-time.Tick(timeout):\n\t\tt.Fatalf(\"Expected receiver to shutdown after timeout: %v\\n\", timeout)\n\t}\n}", "func (m *MockLoadBalance) EnsureMultiListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func testListener(t *testing.T, handler func(io.ReadWriter)) string {\n\tassert := assert.New(t)\n\trequire := require.New(t)\n\taddress := fmt.Sprintf(\"127.0.0.1:%d\", rand.Int31n(16384)+20000)\n\tl, err := net.Listen(`tcp4`, address)\n\trequire.Nil(err)\n\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\trequire.Nil(err)\n\t\tdefer func() {\n\t\t\tassert.Nil(c.Close())\n\t\t}()\n\n\t\tif handler != nil {\n\t\t\thandler(c)\n\t\t}\n\t}()\n\n\treturn address\n}", "func (m *MockLoadBalance) DeleteListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestMockOnEvent(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockEvent}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\n\tclient.OnEvent(\"AgentChannel\", func(conn *websocket.Conn, payload *Payload, error error) {\n\t\tcalled <- struct{}{}\n\t\treturn\n\t})\n\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) {\n\toldRBAC := envconfig.XDSRBAC\n\tenvconfig.XDSRBAC = true\n\tdefer func() {\n\t\tenvconfig.XDSRBAC = oldRBAC\n\t}()\n\t_, readyCh, xdsC, _, cleanup := newListenerWrapper(t)\n\tdefer cleanup()\n\n\t// Verify that the listener wrapper registers a listener watch for the\n\t// expected Listener resource name.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tname, err := xdsC.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a watch on a Listener resource: %v\", err)\n\t}\n\tif name != testListenerResourceName {\n\t\tt.Fatalf(\"listenerWrapper registered a lds watch on %s, want %s\", name, testListenerResourceName)\n\t}\n\tfcm, err := xdsresource.NewFilterChainManager(listenerWithRouteConfiguration)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\n\t// Push a good update which contains a Filter Chain that specifies dynamic\n\t// RDS Resources that need to be received. This should ping rds handler\n\t// about which rds names to start, which will eventually start a watch on\n\t// xds client for rds name \"route-1\".\n\txdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: fakeListenerHost,\n\t\t\tPort: strconv.Itoa(fakeListenerPort),\n\t\t\tFilterChains: fcm,\n\t\t}}, nil)\n\n\t// This should start a watch on xds client for rds name \"route-1\".\n\trouteName, err := xdsC.WaitForWatchRouteConfig(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a watch on a Route resource: %v\", err)\n\t}\n\tif routeName != \"route-1\" {\n\t\tt.Fatalf(\"listenerWrapper registered a lds watch on %s, want %s\", routeName, \"route-1\")\n\t}\n\n\t// This shouldn't invoke good update channel, as has not received rds updates yet.\n\ttimer := time.NewTimer(defaultTestShortTimeout)\n\tselect {\n\tcase <-timer.C:\n\t\ttimer.Stop()\n\tcase <-readyCh:\n\t\tt.Fatalf(\"ready channel written to without rds configuration specified\")\n\t}\n\n\t// Invoke rds callback for the started rds watch. This valid rds callback\n\t// should trigger the listener wrapper to fire GoodUpdate, as it has\n\t// received both it's LDS Configuration and also RDS Configuration,\n\t// specified in LDS Configuration.\n\txdsC.InvokeWatchRouteConfigCallback(\"route-1\", xdsresource.RouteConfigUpdate{}, nil)\n\n\t// All of the xDS updates have completed, so can expect to send a ping on\n\t// good update channel.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"timeout waiting for the ready channel to be written to after receipt of a good rds update\")\n\tcase <-readyCh:\n\t}\n}", "func TestSecurityModuleClientReconnect(t *testing.T) {\n\tctx := context.Background()\n\n\tclient := mocks.NewEventMonitoringModuleClient(t)\n\tstream := mocks.NewEventMonitoringModule_GetProcessEventsClient(t)\n\n\tl, err := NewSysProbeListener(nil, client, func(e *model.ProcessEvent) {})\n\trequire.NoError(t, err)\n\n\tl.retryInterval = 10 * time.Millisecond // force a fast retry for tests\n\trequire.NoError(t, err)\n\n\t// Simulate that the event listener starts connected to the SecurityModule server\n\tclient.On(\"GetProcessEvents\", ctx, &api.GetProcessEventParams{TimeoutSeconds: 1}).Return(stream, nil).Once()\n\tstream.On(\"Recv\").Return(nil, io.EOF)\n\n\t// Then disconnects from it\n\tdrop := make(chan time.Time)\n\tclient.On(\"GetProcessEvents\", ctx, &api.GetProcessEventParams{TimeoutSeconds: 1}).Return(stream, errors.New(\"server not available\")).WaitUntil(drop).Once()\n\n\t// And reconnects\n\treconnect := make(chan time.Time)\n\tclient.On(\"GetProcessEvents\", ctx, &api.GetProcessEventParams{TimeoutSeconds: 1}).Return(stream, nil).WaitUntil(reconnect)\n\n\tl.Run()\n\tassert.Eventually(t, func() bool { return l.connected.Load() == true }, 2*time.Second, 20*time.Millisecond,\n\t\t\"event listener can't connect to SecurityModule server\")\n\n\t// Next call to mocked GetProcessEvents blocks until drop channel is closed\n\tclose(drop)\n\tassert.Eventually(t, func() bool { return l.connected.Load() == false }, 2*time.Second, 20*time.Millisecond,\n\t\t\"event listener shouldn't be connected to SecurityModule server\")\n\n\t// Next call to mocked GetProcessEvents blocks until reconnect channel is closed\n\tclose(reconnect)\n\tassert.Eventually(t, func() bool { return l.connected.Load() == true }, 2*time.Second, 20*time.Millisecond,\n\t\t\"event listener should be connected to SecurityModule server\")\n\n\tl.Stop()\n\n\tclient.AssertExpectations(t)\n\tstream.AssertExpectations(t)\n}", "func (m *MockStreamEventListener) OnResetStream(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnResetStream\", reason)\n}", "func (m *MockProxy) Listen() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Listen\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *ListenerMock) Handle(msg interface{}) {\n\tm.Called(msg)\n}", "func (m *MockSession) AnnounceWorkerStopped() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AnnounceWorkerStopped\")\n}", "func (m *MockInterface) Listen() (net.Listener, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Listen\")\n\tret0, _ := ret[0].(net.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar buf syncedBuffer\n\tmem := vfs.NewMem()\n\terr := mem.MkdirAll(\"ext\", 0755)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tbuf.Reset()\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", &Options{\n\t\t\t\tFS: loggingFS{mem, &buf},\n\t\t\t\tEventListener: MakeLoggingEventListener(&buf),\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"flush\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"compact\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\")); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"ingest\":\n\t\t\tbuf.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(f, nil, LevelOptions{})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := mem.Remove(\"ext/0\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"metrics\":\n\t\t\treturn d.Metrics().String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (_m *ServerConnectionInterface) CloseListener() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *ServerConnectionInterface) ReceiveThroughListener() ([]byte, error) {\n\tret := _m.Called()\n\n\tvar r0 []byte\n\tif rf, ok := ret.Get(0).(func() []byte); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]byte)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = rf()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (suite *TransportTestSuite) TestAlreadyListening() {\n\ttrans := suite.Transport\n\tinboundChan := make(chan message.Request, 1)\n\tsuite.Assert().NoError(trans.Listen(testService, inboundChan))\n\tsuite.Assert().Equal(ErrAlreadyListening, trans.Listen(testService, inboundChan))\n}", "func (m *MockStreamReceiveListener) OnReceive(ctx context.Context, headers api.HeaderMap, data buffer.IoBuffer, trailers api.HeaderMap) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnReceive\", ctx, headers, data, trailers)\n}", "func (s) TestListenerResourceDeletionOnServerNotIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tupdateCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the listener to move to \"serving\" mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a mode change update.\")\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Listener received new mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{}, // empty listener resource\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"timed out waiting for a mode change update: %v\", err)\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeNotServing {\n\t\t\tt.Fatalf(\"listener received new mode %v, want %v\", mode, connectivity.ServingModeNotServing)\n\t\t}\n\t}\n}", "func (m *MockServerStreamConnectionEventListener) NewStreamDetect(context context.Context, sender types.StreamSender, span types.Span) types.StreamReceiveListener {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewStreamDetect\", context, sender, span)\n\tret0, _ := ret[0].(types.StreamReceiveListener)\n\treturn ret0\n}", "func waitForWatchListener(ctx context.Context, t *testing.T, xdsC *fakeclient.Client, wantTarget string) {\n\tt.Helper()\n\n\tgotTarget, err := xdsC.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.WatchService failed with error: %v\", err)\n\t}\n\tif gotTarget != wantTarget {\n\t\tt.Fatalf(\"xdsClient.WatchService() called with target: %v, want %v\", gotTarget, wantTarget)\n\t}\n}", "func (ml *MockMonitorListener) Close() {\n}", "func TestProcessEventHandling(t *testing.T) {\n\tctx := context.Background()\n\n\tclient := mocks.NewEventMonitoringModuleClient(t)\n\tstream := mocks.NewEventMonitoringModule_GetProcessEventsClient(t)\n\tclient.On(\"GetProcessEvents\", ctx, &api.GetProcessEventParams{TimeoutSeconds: 1}).Return(stream, nil)\n\n\tevents := make([]*model.ProcessEvent, 0)\n\tevents = append(events, model.NewMockedExecEvent(time.Now().Add(-10*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}))\n\tevents = append(events, model.NewMockedExitEvent(time.Now().Add(-9*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}, 0))\n\tevents = append(events, model.NewMockedExecEvent(time.Now().Add(-5*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"invalid-path\"}))\n\tevents = append(events, model.NewMockedExitEvent(time.Now().Add(-5*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"invalid-path\"}, 2))\n\n\tfor _, e := range events {\n\t\tdata, err := e.MarshalMsg(nil)\n\t\trequire.NoError(t, err)\n\n\t\tstream.On(\"Recv\").Once().Return(&api.ProcessEventMessage{Data: data}, nil)\n\t}\n\tstream.On(\"Recv\").Return(nil, io.EOF)\n\n\trcvMessage := make(chan bool)\n\ti := 0\n\thandler := func(e *model.ProcessEvent) {\n\t\tif i > len(events)-1 {\n\t\t\tt.Error(\"should not have received more process events\")\n\t\t}\n\n\t\tAssertProcessEvents(t, events[i], e)\n\t\t// all message have been consumed\n\t\tif i == len(events)-1 {\n\t\t\tclose(rcvMessage)\n\t\t}\n\n\t\ti++\n\t}\n\tl, err := NewSysProbeListener(nil, client, handler)\n\trequire.NoError(t, err)\n\tl.Run()\n\n\t<-rcvMessage\n\tl.Stop()\n\tclient.AssertExpectations(t)\n\tstream.AssertExpectations(t)\n}", "func (m *MockHooks) OnUpdate(existing, new proto.Message) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnUpdate\", existing, new)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (s) TestResolverWatchCallbackAfterClose(t *testing.T) {\n\t// Setup the management server that synchronizes with the test goroutine\n\t// using two channels. The management server signals the test goroutine when\n\t// it receives a discovery request for a route configuration resource. And\n\t// the test goroutine signals the management server when the resolver is\n\t// closed.\n\twaitForRouteConfigDiscoveryReqCh := make(chan struct{}, 1)\n\twaitForResolverCloseCh := make(chan struct{})\n\tmgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{\n\t\tOnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error {\n\t\t\tif req.GetTypeUrl() == version.V3RouteConfigURL {\n\t\t\t\tselect {\n\t\t\t\tcase waitForRouteConfigDiscoveryReqCh <- struct{}{}:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\t<-waitForResolverCloseCh\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to start xDS management server: %v\", err)\n\t}\n\tdefer mgmtServer.Stop()\n\n\t// Create a bootstrap configuration specifying the above management server.\n\tnodeID := uuid.New().String()\n\tcleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{\n\t\tNodeID: nodeID,\n\t\tServerURI: mgmtServer.Address,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cleanup()\n\n\t// Configure listener and route configuration resources on the management\n\t// server.\n\tconst serviceName = \"my-service-client-side-xds\"\n\trdsName := \"route-\" + serviceName\n\tcdsName := \"cluster-\" + serviceName\n\tresources := e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)},\n\t\tRoutes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, serviceName, cdsName)},\n\t\tSkipValidation: true,\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL(\"xds:///\" + serviceName)})\n\tdefer rClose()\n\n\t// Wait for a discovery request for a route configuration resource.\n\tselect {\n\tcase <-waitForRouteConfigDiscoveryReqCh:\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timeout when waiting for a discovery request for a route configuration resource\")\n\t}\n\n\t// Close the resolver and unblock the management server.\n\trClose()\n\tclose(waitForResolverCloseCh)\n\n\t// Verify that the update from the management server is not propagated to\n\t// the ClientConn. The xDS resolver, once closed, is expected to drop\n\t// updates from the xDS client.\n\tsCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)\n\tdefer sCancel()\n\tif _, err := tcc.stateCh.Receive(sCtx); err != context.DeadlineExceeded {\n\t\tt.Fatalf(\"ClientConn received an update from the resolver that was closed: %v\", err)\n\t}\n}", "func (m *MockIByIdPresenter) OnReceived(arg0 aggregates.Topic) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnReceived\", arg0)\n}", "func TestInterface(t *testing.T) {\n\tvar _ events.EventStore = &LoggingDecoratorEventStore{}\n}", "func NewMockMonitorListener(queueSize int) *MockMonitorListener {\n\treturn &MockMonitorListener{\n\t\tqueue: make(chan *payload.Payload, queueSize),\n\t}\n}", "func (m *MockListener) EXPECT() *MockListenerMockRecorder {\n\treturn m.recorder\n}", "func (m *MockProvider) OnEndpointsSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsSynced\")\n}", "func (_m *MockNetwork) AppendSubscriber() chan []net.IP {\n\tret := _m.ctrl.Call(_m, \"AppendSubscriber\")\n\tret0, _ := ret[0].(chan []net.IP)\n\treturn ret0\n}", "func (m *MockCallback) OnRemove(arg0 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockUsecase) ListenEvents(userID int) (chan map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListenEvents\", userID)\n\tret0, _ := ret[0].(chan map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestCmdConfigChangeEvents(t *testing.T) {\n\tdefer cleanTestArtifacts(t)\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\n\toldconf := &guerrilla.AppConfig{}\n\tif err := oldconf.Load([]byte(configJsonA)); err != nil {\n\t\tt.Error(\"configJsonA is invalid\", err)\n\t}\n\n\tnewconf := &guerrilla.AppConfig{}\n\tif err := newconf.Load([]byte(configJsonB)); err != nil {\n\t\tt.Error(\"configJsonB is invalid\", err)\n\t}\n\n\tnewerconf := &guerrilla.AppConfig{}\n\tif err := newerconf.Load([]byte(configJsonC)); err != nil {\n\t\tt.Error(\"configJsonC is invalid\", err)\n\t}\n\n\texpectedEvents := map[guerrilla.Event]bool{\n\t\tguerrilla.EventConfigBackendConfig: false,\n\t\tguerrilla.EventConfigServerNew: false,\n\t}\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\n\tbcfg := backends.BackendConfig{\"log_received_mails\": true}\n\tbackend, err := backends.New(bcfg, mainlog)\n\tapp, err := guerrilla.New(oldconf, backend, mainlog)\n\tif err != nil {\n\t\tt.Error(\"Failed to create new app\", err)\n\t}\n\ttoUnsubscribe := map[guerrilla.Event]func(c *guerrilla.AppConfig){}\n\ttoUnsubscribeS := map[guerrilla.Event]func(c *guerrilla.ServerConfig){}\n\n\tfor event := range expectedEvents {\n\t\t// Put in anon func since range is overwriting event\n\t\tfunc(e guerrilla.Event) {\n\t\t\tif strings.Index(e.String(), \"server_change\") == 0 {\n\t\t\t\tf := func(c *guerrilla.ServerConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(e, f)\n\t\t\t\ttoUnsubscribeS[e] = f\n\t\t\t} else {\n\t\t\t\tf := func(c *guerrilla.AppConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(e, f)\n\t\t\t\ttoUnsubscribe[e] = f\n\t\t\t}\n\n\t\t}(event)\n\t}\n\n\t// emit events\n\tnewconf.EmitChangeEvents(oldconf, app)\n\tnewerconf.EmitChangeEvents(newconf, app)\n\t// unsubscribe\n\tfor unevent, unfun := range toUnsubscribe {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\tfor unevent, unfun := range toUnsubscribeS {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\n\tfor event, val := range expectedEvents {\n\t\tif val == false {\n\t\t\tt.Error(\"Did not fire config change event:\", event)\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n}", "func (m *MockProvider) OnServiceSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceSynced\")\n}", "func verifyReceiverLifecycle(t *testing.T, factory component.ReceiverFactory, getConfigFn getReceiverConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\treceiverCreateSet := componenttest.NewNopReceiverCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tcreateFns := []createReceiverFn{\n\t\twrapCreateLogsRcvr(factory),\n\t\twrapCreateTracesRcvr(factory),\n\t\twrapCreateMetricsRcvr(factory),\n\t}\n\n\tfor _, createFn := range createFns {\n\t\tfirstRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\tif errors.Is(err, componenterror.ErrDataTypeIsNotSupported) {\n\t\t\tcontinue\n\t\t}\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, firstRcvr.Start(ctx, host))\n\t\trequire.NoError(t, firstRcvr.Shutdown(ctx))\n\n\t\tsecondRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, secondRcvr.Start(ctx, host))\n\t\trequire.NoError(t, secondRcvr.Shutdown(ctx))\n\t}\n}", "func (_m *ISession) UpdateListeningStatus(name string) error {\n\tret := _m.Called(name)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockCallback) OnRemoveAll() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemoveAll\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestLiteLifecycle(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tlim := test.TimeOut(time.Second * 30)\n\tdefer lim.Stop()\n\n\taNotifier, aConnected := onConnected()\n\n\taAgent, err := NewAgent(&AgentConfig{\n\t\tNetworkTypes: supportedNetworkTypes(),\n\t\tMulticastDNSMode: MulticastDNSModeDisabled,\n\t})\n\trequire.NoError(t, err)\n\trequire.NoError(t, aAgent.OnConnectionStateChange(aNotifier))\n\n\tdisconnectedDuration := time.Second\n\tfailedDuration := time.Second\n\tKeepaliveInterval := time.Duration(0)\n\tCheckInterval := 500 * time.Millisecond\n\tbAgent, err := NewAgent(&AgentConfig{\n\t\tLite: true,\n\t\tCandidateTypes: []CandidateType{CandidateTypeHost},\n\t\tNetworkTypes: supportedNetworkTypes(),\n\t\tMulticastDNSMode: MulticastDNSModeDisabled,\n\t\tDisconnectedTimeout: &disconnectedDuration,\n\t\tFailedTimeout: &failedDuration,\n\t\tKeepaliveInterval: &KeepaliveInterval,\n\t\tCheckInterval: &CheckInterval,\n\t})\n\trequire.NoError(t, err)\n\n\tbConnected := make(chan interface{})\n\tbDisconnected := make(chan interface{})\n\tbFailed := make(chan interface{})\n\n\trequire.NoError(t, bAgent.OnConnectionStateChange(func(c ConnectionState) {\n\t\tswitch c {\n\t\tcase ConnectionStateConnected:\n\t\t\tclose(bConnected)\n\t\tcase ConnectionStateDisconnected:\n\t\t\tclose(bDisconnected)\n\t\tcase ConnectionStateFailed:\n\t\t\tclose(bFailed)\n\t\tdefault:\n\t\t}\n\t}))\n\n\tconnectWithVNet(bAgent, aAgent)\n\n\t<-aConnected\n\t<-bConnected\n\tassert.NoError(t, aAgent.Close())\n\n\t<-bDisconnected\n\t<-bFailed\n\tassert.NoError(t, bAgent.Close())\n}", "func (s) TestListenerResourceDeletionOnServerIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, true, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tmodeChangeCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the server to update to ServingModeServing mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a server to change to ServingModeServing.\")\n\tcase mode := <-modeChangeCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Server switched to mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Update without a listener resource.\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Perform RPCs every 100 ms for 1s and verify that the serving mode does not\n\t// change on gRPC server.\n\ttimer := time.NewTimer(500 * time.Millisecond)\n\tticker := time.NewTicker(50 * time.Millisecond)\n\tt.Cleanup(ticker.Stop)\n\tfor {\n\t\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\treturn\n\t\tcase mode := <-modeChangeCh:\n\t\t\tt.Fatalf(\"Server switched to mode: %v when no switch was expected\", mode)\n\t\tcase <-ticker.C:\n\t\t}\n\t}\n}", "func Test_Listen_Happy_Path(t *testing.T) {\n\tcalledCounter := 0\n\tconsumeCalled := make(chan interface{})\n\tconsumerGroup := &mocks.ConsumerGroup{}\n\n\t// Mimic the end of a consumerGroup session by just not blocking\n\tconsumerGroup.On(\"Consume\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tcalledCounter++\n\t\t\tconsumeCalled <- true\n\t\t\tif calledCounter >= 2 {\n\t\t\t\ttime.Sleep(1000 * time.Second) // just wait\n\t\t\t}\n\t\t}).\n\t\tReturn(nil).Twice()\n\n\ttested := listener{consumerGroup: consumerGroup}\n\n\t// Listen() is blocking as long as there is no error or context is not canceled\n\tgo func() {\n\t\ttested.Listen(context.Background())\n\t\tassert.Fail(t, `We should have blocked on \"listen\", even if a consumer group session has ended`)\n\t}()\n\n\t// Assert that consume is called twice (2 consumer group sessions are expected)\n\t<-consumeCalled\n\t<-consumeCalled\n\n\tconsumerGroup.AssertExpectations(t)\n}", "func (l *MockListener) Listen() (net.Listener, error) {\n\treturn l, nil\n}", "func (m *MockProc) StopListen() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StopListen\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockProvider) OnServiceAdd(arg0 *v1.Service) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceAdd\", arg0)\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureListener), region, listener)\n}", "func verifyExtensionLifecycle(t *testing.T, factory extension.Factory, getConfigFn getExtensionConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\textCreateSet := extensiontest.NewNopCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tfirstExt, err := factory.CreateExtension(ctx, extCreateSet, getConfigFn())\n\trequire.NoError(t, err)\n\trequire.NoError(t, firstExt.Start(ctx, host))\n\trequire.NoError(t, firstExt.Shutdown(ctx))\n\n\tsecondExt, err := factory.CreateExtension(ctx, extCreateSet, getConfigFn())\n\trequire.NoError(t, err)\n\trequire.NoError(t, secondExt.Start(ctx, host))\n\trequire.NoError(t, secondExt.Shutdown(ctx))\n}", "func TestInitializeConfigWatcher(t *testing.T) {\n\n\t// Obtain a Test Logger (Required By be InitializeConfigWatcher function)\n\tlogger := logtesting.TestLogger(t)\n\n\t// Setup Environment\n\tcommontesting.SetTestEnvironment(t)\n\n\t// Create A Test Observability ConfigMap For The InitializeObservability() Call To Watch\n\tconfigMap := commontesting.GetTestSaramaConfigMap(commontesting.OldSaramaConfig, commontesting.TestEKConfig)\n\n\t// Create The Fake K8S Client And Add It To The ConfigMap\n\tfakeK8sClient := fake.NewSimpleClientset(configMap)\n\n\t// Add The Fake K8S Client To The Context (Required By InitializeObservability)\n\tctx := context.WithValue(context.TODO(), injectionclient.Key{}, fakeK8sClient)\n\n\t// The configWatcherHandler should change the nil \"watchedConfigMap\" to a valid ConfigMap when the watcher triggers\n\n\ttestConfigMap, err := fakeK8sClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, SettingsConfigMapName, metav1.GetOptions{})\n\tassert.Nil(t, err)\n\tassert.Equal(t, testConfigMap.Data[\"sarama\"], commontesting.OldSaramaConfig)\n\n\t// Perform The Test (Initialize The Observability Watcher)\n\terr = InitializeConfigWatcher(ctx, logger, configWatcherHandler, system.Namespace())\n\tassert.Nil(t, err)\n\n\t// Wait for the configWatcherHandler to be called (happens pretty quickly; loop usually only runs once)\n\tfor try := 0; getWatchedMap() == nil && try < 100; try++ {\n\t\ttime.Sleep(5 * time.Millisecond)\n\t}\n\n\tassert.Equal(t, getWatchedMap().Data[\"sarama\"], commontesting.OldSaramaConfig)\n\n\t// Change the config map and verify the handler is called\n\ttestConfigMap.Data[\"sarama\"] = commontesting.NewSaramaConfig\n\n\t// The configWatcherHandler should change this back to a valid ConfigMap\n\tsetWatchedMap(nil)\n\n\ttestConfigMap, err = fakeK8sClient.CoreV1().ConfigMaps(system.Namespace()).Update(ctx, testConfigMap, metav1.UpdateOptions{})\n\tassert.Nil(t, err)\n\tassert.Equal(t, testConfigMap.Data[\"sarama\"], commontesting.NewSaramaConfig)\n\n\t// Wait for the configWatcherHandler to be called (happens pretty quickly; loop usually only runs once)\n\tfor try := 0; getWatchedMap() == nil && try < 100; try++ {\n\t\ttime.Sleep(5 * time.Millisecond)\n\t}\n\tassert.NotNil(t, getWatchedMap())\n\tassert.Equal(t, getWatchedMap().Data[\"sarama\"], commontesting.NewSaramaConfig)\n}", "func (m *MockWerftServiceClient) Listen(ctx context.Context, in *v1.ListenRequest, opts ...grpc.CallOption) (v1.WerftService_ListenClient, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Listen\", varargs...)\n\tret0, _ := ret[0].(v1.WerftService_ListenClient)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockProvider) OnEndpointsAdd(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsAdd\", arg0)\n}", "func (m *MockProc) OnSvcConfigUpdate(arg0 *service.Config) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnSvcConfigUpdate\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockEventLogger) AppendCheck(assumedVersion uint64, event eventlog.EventData) (uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AppendCheck\", assumedVersion, event)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(time.Time)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func TestEventsAlertEngine(t *testing.T) {\n\t// setup events pipeline to record and distribute events\n\tti := tInfo{batchInterval: 100 * time.Millisecond, dedupInterval: 100 * time.Second}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\t// start spyglass (backend service for events)\n\tfdrTemp, fdrAddr, err := testutils.StartSpyglass(\"finder\", \"\", ti.mockResolver, nil, ti.logger, ti.esClient)\n\tAssertOk(t, err, \"failed to start spyglass finder, err: %v\", err)\n\tfdr := fdrTemp.(finder.Interface)\n\tdefer fdr.Stop()\n\tti.updateResolver(globals.Spyglass, fdrAddr)\n\n\t// API gateway\n\tapiGw, apiGwAddr, err := testutils.StartAPIGateway(\":0\", false,\n\t\tmap[string]string{}, []string{\"telemetry_query\", \"objstore\", \"tokenauth\", \"routing\"}, []string{}, ti.mockResolver, ti.logger)\n\tAssertOk(t, err, \"failed to start API gateway, err: %v\", err)\n\tdefer apiGw.Stop()\n\n\t// setup authn and get authz token\n\tuserCreds := &auth.PasswordCredential{Username: testutils.TestLocalUser, Password: testutils.TestLocalPassword, Tenant: testutils.TestTenant}\n\terr = testutils.SetupAuth(ti.apiServerAddr, true, nil, nil, userCreds, ti.logger)\n\tAssertOk(t, err, \"failed to setup authN service, err: %v\", err)\n\tdefer testutils.CleanupAuth(ti.apiServerAddr, true, false, userCreds, ti.logger)\n\tauthzHeader, err := testutils.GetAuthorizationHeader(apiGwAddr, userCreds)\n\tAssertOk(t, err, \"failed to get authZ header, err: %v\", err)\n\n\t// add event based alert policies\n\t// policy - 1\n\talertPolicy1 := policygen.CreateAlertPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, fmt.Sprintf(\"ap1-%s\", uuid.NewV4().String()),\n\t\t\"Event\", eventattrs.Severity_CRITICAL, \"critical alerts from events\",\n\t\t[]*fields.Requirement{\n\t\t\t{Key: \"count\", Operator: \"gte\", Values: []string{\"15\"}},\n\t\t\t{Key: \"source.node-name\", Operator: \"equals\", Values: []string{t.Name()}},\n\t\t}, []string{})\n\n\talertPolicy1, err = ti.apiClient.MonitoringV1().AlertPolicy().Create(context.Background(), alertPolicy1)\n\tAssertOk(t, err, \"failed to add alert policy{ap1-*}, err: %v\", err)\n\n\t// policy - 2\n\talertPolicy2 := policygen.CreateAlertPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, fmt.Sprintf(\"ap2-%s\", uuid.NewV4().String()),\n\t\t\"Event\", eventattrs.Severity_WARN, \"warning alerts from events\",\n\t\t[]*fields.Requirement{\n\t\t\t{Key: \"count\", Operator: \"gte\", Values: []string{\"5\"}},\n\t\t\t{Key: \"count\", Operator: \"lt\", Values: []string{\"7\"}},\n\t\t\t{Key: \"severity\", Operator: \"equals\", Values: []string{\n\t\t\t\teventattrs.Severity_DEBUG.String(),\n\t\t\t\teventattrs.Severity_WARN.String(),\n\t\t\t\teventattrs.Severity_INFO.String()}},\n\t\t\t{Key: \"type\", Operator: \"equals\", Values: []string{\n\t\t\t\teventtypes.SERVICE_STOPPED.String(),\n\t\t\t\teventtypes.LEADER_LOST.String()}},\n\t\t}, []string{})\n\n\talertPolicy2, err = ti.apiClient.MonitoringV1().AlertPolicy().Create(context.Background(), alertPolicy2)\n\tAssertOk(t, err, \"failed to add alert policy{ap2-*}, err: %v\", err)\n\n\talertPolicy3 := policygen.CreateAlertPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, fmt.Sprintf(\"ap3-%s\", uuid.NewV4().String()),\n\t\t\"Event\", eventattrs.Severity_WARN, \"policy with no reqs\", []*fields.Requirement{}, []string{})\n\talertPolicy3, err = ti.apiClient.MonitoringV1().AlertPolicy().Create(context.Background(), alertPolicy3)\n\tAssertOk(t, err, \"failed to add alert policy, err: %v\", err)\n\n\tdefer func() {\n\t\terr := ti.cleanupPolicies()\n\t\tAssertOk(t, err, \"failed to cleanup policies\")\n\t}()\n\n\t// generate events\n\t// define list of events to be recorded\n\tdummyObjRef := &cluster.Node{\n\t\tTypeMeta: api.TypeMeta{\n\t\t\tKind: \"Node\",\n\t\t},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: globals.DefaultTenant,\n\t\t\tNamespace: globals.DefaultNamespace,\n\t\t},\n\t}\n\trecordEvents := []*struct {\n\t\teventType eventtypes.EventType\n\t\tmessage string\n\t\tobjRef interface{}\n\t\trepeat int // number of times to repeat the event\n\t}{\n\t\t// any of these events can generate an alert based on when it is getting dispatched from evtsproxy\n\t\t// to evtsmgr. e.g. ap2-*, count >=5 and count < 7\n\t\t{eventtypes.SERVICE_STARTED, fmt.Sprintf(\"(tenant:%s) test %s started\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.SERVICE_RUNNING, fmt.Sprintf(\"(tenant:%s) test %s running\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.SERVICE_UNRESPONSIVE, fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 15}, // this should generate an alert (alertPolicy1)\n\t\t{eventtypes.SERVICE_STOPPED, fmt.Sprintf(\"(tenant:%s) test %s stopped\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 5}, // this should generate an alert (alertPolicy2)\n\n\t\t{eventtypes.ELECTION_STARTED, fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.LEADER_ELECTED, fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.LEADER_CHANGED, fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 15}, // this should generate an alert (alertPolicy1)\n\t\t{eventtypes.LEADER_LOST, fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 5}, // this should generate an alert (alertPolicy2)\n\n\t\t// events in non default tenant\n\t\t{eventtypes.SERVICE_STARTED, fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name()), nil, 10},\n\t\t{eventtypes.SERVICE_RUNNING, fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name()), nil, 10},\n\t\t{eventtypes.SERVICE_UNRESPONSIVE, fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()), nil, 15}, // this should generate an alert (alertPolicy1)\n\t\t{eventtypes.SERVICE_STOPPED, fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name()), nil, 5}, // this should generate an alert (alertPolicy2)\n\t}\n\n\twg := new(sync.WaitGroup)\n\twg.Add(2)\n\n\t// start recorder\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\t\tComponent: uuid.NewV4().String(),\n\t\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\t\tBackupDir: recorderEventsDir}, ti.logger)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create recorder, err: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tti.recorders.Lock()\n\t\tti.recorders.list = append(ti.recorders.list, evtsRecorder)\n\t\tti.recorders.Unlock()\n\n\t\t// record events\n\t\tfor i := range recordEvents {\n\t\t\tif objRef, ok := recordEvents[i].objRef.(cluster.Node); ok {\n\t\t\t\tobjRef.ObjectMeta.Name = CreateAlphabetString(5)\n\t\t\t\trecordEvents[i].objRef = &objRef\n\t\t\t}\n\t\t\tfor j := 0; j < recordEvents[i].repeat; j++ {\n\t\t\t\tevtsRecorder.Event(recordEvents[i].eventType, recordEvents[i].message, recordEvents[i].objRef)\n\t\t\t}\n\t\t}\n\n\t\t// wait for the batch interval\n\t\ttime.Sleep(3 * time.Second)\n\t\t// if objRef!=nil, this should increase the hits but not recreate the alerts.\n\t\t// it will recreate alerts otherwise.\n\t\tfor i := range recordEvents {\n\t\t\tevtsRecorder.Event(recordEvents[i].eventType, recordEvents[i].message, recordEvents[i].objRef)\n\t\t}\n\t}()\n\n\t// TODO: cannot add criteria meta.tenant=\"default\" or any meta.*\n\t// list of alerts to be generated by the alert engine\n\ttests := []struct {\n\t\tselector string\n\t\texpMessage string // stings will spaces are not allowed in field selector; so, this attribute\n\t\texpSuccess bool\n\t}{\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy1.GetName(), alertPolicy1.GetUUID(), alertPolicy1.Spec.GetSeverity(), dummyObjRef.GetKind(), dummyObjRef.GetTenant()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy1.GetName(), alertPolicy1.GetUUID(), alertPolicy1.Spec.GetSeverity(), dummyObjRef.GetKind(), dummyObjRef.GetTenant()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s\",\n\t\t\t\talertPolicy1.GetName(), alertPolicy1.GetUUID(), alertPolicy1.Spec.GetSeverity()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy2.GetName(), alertPolicy2.GetUUID(), alertPolicy2.Spec.GetSeverity(), dummyObjRef.GetKind(), dummyObjRef.GetTenant()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) test %s stopped\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy2.GetName(), alertPolicy2.GetUUID(), alertPolicy2.Spec.GetSeverity(), globals.DefaultTenant),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) test %s stopped\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy2.GetName(), alertPolicy2.GetUUID(), alertPolicy2.Spec.GetSeverity(),\n\t\t\t\tdummyObjRef.GetKind(), dummyObjRef.GetTenant()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s\",\n\t\t\t\talertPolicy2.GetName(), alertPolicy2.GetUUID(), alertPolicy2.Spec.GetSeverity(), \"invalid\"),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: false,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.object-ref.kind=invalid\"),\n\t\t\texpSuccess: false,\n\t\t},\n\t}\n\n\t// test if the expected alerts are generated\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor _, test := range tests {\n\t\t\tAssertEventually(t, func() (bool, interface{}) {\n\t\t\t\talerts, err := ti.apiClient.MonitoringV1().Alert().List(context.Background(),\n\t\t\t\t\t&api.ListWatchOptions{\n\t\t\t\t\t\tObjectMeta: api.ObjectMeta{Tenant: globals.DefaultTenant},\n\t\t\t\t\t\tFieldSelector: test.selector})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, fmt.Sprintf(\"%v failed, err: %v\", test.selector, err)\n\t\t\t\t}\n\n\t\t\t\tif test.expSuccess {\n\t\t\t\t\tfor _, alert := range alerts {\n\t\t\t\t\t\t// expecting a hit count of 2 since we duplicated all the events after the batch interval\n\t\t\t\t\t\tif alert.Status.Message == test.expMessage && alert.Status.TotalHits == 2 {\n\t\t\t\t\t\t\tif alert.ModTime == alert.CreationTime {\n\t\t\t\t\t\t\t\treturn false, fmt.Sprintf(\"mod-time of the alert did not get updated: %v\", alert.GetObjectMeta())\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn true, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !test.expSuccess && len(alerts) == 0 {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\n\t\t\t\treturn false, fmt.Sprintf(\"expected: %v, obtained: %v\", test, alerts)\n\t\t\t}, \"did not receive the expected alert\", string(\"1s\"), string(\"20s\"))\n\t\t}\n\t}()\n\n\twg.Wait()\n\n\t// make sure the policy status got updated\n\texpectedAlertStatus := []struct {\n\t\tpolicyMeta *api.ObjectMeta\n\t\tminTotalHits int32\n\t\tmaxTotalHits int32\n\t\topenAlerts int32\n\t\tacknowledgedAlerts int32\n\t}{\n\t\t{policyMeta: alertPolicy1.GetObjectMeta(), minTotalHits: 3, maxTotalHits: 6, openAlerts: 3, acknowledgedAlerts: 0},\n\t\t{policyMeta: alertPolicy2.GetObjectMeta(), minTotalHits: 3, maxTotalHits: 6, openAlerts: 3, acknowledgedAlerts: 0},\n\t\t{policyMeta: alertPolicy3.GetObjectMeta(), minTotalHits: 0, maxTotalHits: 0, openAlerts: 0, acknowledgedAlerts: 0}, // no reqs so, there should be no alerts\n\t}\n\tfor _, as := range expectedAlertStatus {\n\t\tAssertEventually(t, func() (bool, interface{}) {\n\t\t\tres, err := ti.apiClient.MonitoringV1().AlertPolicy().Get(context.Background(),\n\t\t\t\t&api.ObjectMeta{Name: as.policyMeta.GetName(), Tenant: as.policyMeta.GetTenant(), Namespace: as.policyMeta.GetNamespace(), UUID: as.policyMeta.GetUUID()})\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Sprintf(\":%v, err: %v\", as.policyMeta.GetName(), err)\n\t\t\t}\n\n\t\t\tif (res.Status.GetTotalHits() < as.minTotalHits) || (res.Status.GetTotalHits() > as.maxTotalHits) {\n\t\t\t\treturn false, fmt.Sprintf(\"total hits on policy %v expected total hits to be between (%v, %v) obtained: %v\", res.GetObjectMeta().GetName(), as.minTotalHits, as.maxTotalHits, res.Status.GetTotalHits())\n\t\t\t}\n\n\t\t\tif as.openAlerts != res.Status.GetOpenAlerts() {\n\t\t\t\treturn false, fmt.Sprintf(\"open alerts on policy %v expected: %v, obtained: %v\", res.GetObjectMeta().GetName(), as.openAlerts, res.Status.GetOpenAlerts())\n\t\t\t}\n\n\t\t\tif as.acknowledgedAlerts != res.Status.GetAcknowledgedAlerts() {\n\t\t\t\treturn false, fmt.Sprintf(\"acknowledged alerts on policy %v expected: %v, obtained: %v\", res.GetObjectMeta().GetName(), as.acknowledgedAlerts, res.Status.GetAcknowledgedAlerts())\n\t\t\t}\n\n\t\t\treturn true, nil\n\t\t}, \"alert status does not match the expected\", string(\"200ms\"), string(\"10s\"))\n\t}\n\n\t// resolve or acknowledge alerts\n\talerts, err := ti.apiClient.MonitoringV1().Alert().List(context.Background(),\n\t\t&api.ListWatchOptions{\n\t\t\tObjectMeta: api.ObjectMeta{Tenant: globals.DefaultTenant},\n\t\t\tFieldSelector: fmt.Sprintf(\"status.reason.alert-policy-id in (%s,%s,%s)\",\n\t\t\t\tfmt.Sprintf(\"%s/%s\", alertPolicy1.GetName(), alertPolicy1.GetUUID()),\n\t\t\t\tfmt.Sprintf(\"%s/%s\", alertPolicy2.GetName(), alertPolicy2.GetUUID()),\n\t\t\t\tfmt.Sprintf(\"%s/%s\", alertPolicy3.GetName(), alertPolicy3.GetUUID())),\n\t\t})\n\tAssertOk(t, err, \"failed to list alerts, err: %v\", err)\n\tAssert(t, len(alerts) > 2, \"expected more than 2 alerts, got: %v\", len(alerts))\n\n\talertTests := []*struct {\n\t\talert monitoring.Alert\n\t\tresolve bool\n\t\tacknowledge bool\n\t}{\n\t\t{alert: *alerts[0], resolve: true, acknowledge: false},\n\t\t{alert: *alerts[len(alerts)-1], resolve: false, acknowledge: true},\n\t}\n\n\tfor _, at := range alertTests {\n\t\taURL := fmt.Sprintf(\"https://%s/configs/monitoring/v1/alerts/%s\", apiGwAddr, at.alert.GetName())\n\t\tapURL := fmt.Sprintf(\"https://%s/configs/monitoring/v1/alertPolicies/%s\", apiGwAddr,\n\t\t\tstrings.Split(at.alert.Status.Reason.GetPolicyID(), \"/\")[0])\n\n\t\thttpClient := netutils.NewHTTPClient()\n\t\thttpClient.WithTLSConfig(&tls.Config{InsecureSkipVerify: true})\n\t\thttpClient.SetHeader(\"Authorization\", authzHeader)\n\t\thttpClient.DisableKeepAlives()\n\t\tdefer httpClient.CloseIdleConnections()\n\n\t\t// check alert policy before update\n\t\tap := &monitoring.AlertPolicy{}\n\t\tstatusCode, err := httpClient.Req(\"GET\", apURL, &api.ListWatchOptions{}, &ap)\n\t\tAssertOk(t, err, \"failed to get alert policy, err: %v\", err)\n\t\tAssert(t, statusCode == http.StatusOK, \"failed to get alert policy\")\n\n\t\t// UPDATE alert state (to acknowledged or resolved)\n\t\tif at.acknowledge {\n\t\t\tresp := monitoring.Alert{}\n\t\t\tAssertEventually(t,\n\t\t\t\tfunc() (bool, interface{}) {\n\t\t\t\t\tat.alert.Spec.State = monitoring.AlertState_ACKNOWLEDGED.String()\n\t\t\t\t\tstatusCode, err := httpClient.Req(\"PUT\", aURL, at.alert, &resp)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"err: %v\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tif statusCode != http.StatusOK {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"update failed with status: %d\", statusCode)\n\t\t\t\t\t}\n\n\t\t\t\t\tif resp.Status.Acknowledged == nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"alert status not updated, acknowledged: nil\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn true, nil\n\t\t\t\t}, \"failed to update alert state\", \"200ms\", \"6s\")\n\t\t} else if at.resolve {\n\t\t\tresp := monitoring.Alert{}\n\t\t\tAssertEventually(t,\n\t\t\t\tfunc() (bool, interface{}) {\n\t\t\t\t\tat.alert.Spec.State = monitoring.AlertState_RESOLVED.String()\n\t\t\t\t\tstatusCode, err := httpClient.Req(\"PUT\", aURL, at.alert, &resp)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"err: %v\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tif statusCode != http.StatusOK {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"update failed with status: %d\", statusCode)\n\t\t\t\t\t}\n\n\t\t\t\t\tif resp.Status.Resolved == nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"alert status not updated, resolved: nil\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn true, nil\n\t\t\t\t}, \"failed to update alert state\", \"200ms\", \"6s\")\n\t\t}\n\n\t\tupdatedAp := &monitoring.AlertPolicy{}\n\t\tstatusCode, err = httpClient.Req(\"GET\", apURL, &api.ListWatchOptions{}, &updatedAp)\n\t\tAssertOk(t, err, \"failed to get alert policy, err: %v\", err)\n\t\tAssert(t, statusCode == http.StatusOK, \"failed to get alert policy\")\n\t\tAssert(t, !at.acknowledge || (at.acknowledge && updatedAp.Status.AcknowledgedAlerts > ap.Status.AcknowledgedAlerts),\n\t\t\t\"expected #acknowledged alerts: >%d, got: %d\", ap.Status.AcknowledgedAlerts, updatedAp.Status.AcknowledgedAlerts)\n\t\tAssert(t, !at.resolve || (at.resolve && updatedAp.Status.OpenAlerts < ap.Status.OpenAlerts),\n\t\t\t\"expected #acknowledged alerts: <%d, got: %d\", ap.Status.OpenAlerts, updatedAp.Status.OpenAlerts)\n\t}\n}", "func (m *MockUsecase) StopListenEvents(userID int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StopListenEvents\", userID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockIRandomPresenter) OnListReceived(arg0 []aggregates.Topic) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnListReceived\", arg0)\n}", "func (m *MockHealthCheck) WaitForInitialStatsUpdates() {\n\tm.ctrl.Call(m, \"WaitForInitialStatsUpdates\")\n}", "func (m *MockisListenResponse_Content) isListenResponse_Content() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"isListenResponse_Content\")\n}", "func (m *MockStreamEventListener) EXPECT() *MockStreamEventListenerMockRecorder {\n\treturn m.recorder\n}", "func TestServerAddEvent(t *testing.T) {\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\tdefer cleanTestArtifacts(t)\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\t// start the server by emulating the serve command\n\tif err := ioutil.WriteFile(\"configJsonA.json\", []byte(configJsonA), 0644); err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tcmd := &cobra.Command{}\n\tconfigPath = \"configJsonA.json\"\n\tgo func() {\n\t\tserve(cmd, []string{})\n\t}()\n\n\t// allow the server to start\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:3536\", 0); err != nil {\n\t\tt.Error(\"server didn't start\")\n\t}\n\n\t// now change the config by adding a server\n\tconf := &guerrilla.AppConfig{} // blank one\n\terr = conf.Load([]byte(configJsonA)) // load configJsonA\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tnewServer := conf.Servers[0] // copy the first server config\n\tnewServer.ListenInterface = \"127.0.0.1:2526\" // change it\n\tnewConf := conf // copy the cmdConfg\n\tnewConf.Servers = append(newConf.Servers, newServer) // add the new server\n\tif jsonbytes, err := json.Marshal(newConf); err == nil {\n\t\tif err := ioutil.WriteFile(\"configJsonA.json\", jsonbytes, 0644); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\t// send a sighup signal to the server\n\tsigHup()\n\tif _, err := grepTestlog(\"[127.0.0.1:2526] Waiting for a new client\", 0); err != nil {\n\t\tt.Error(\"new server didn't start\")\n\t}\n\n\tif conn, buffin, err := test.Connect(newServer, 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", newServer.ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 mail.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t}\n\t\t} else {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\t// shutdown the server\n\td.Shutdown()\n\n\t// did backend started as expected?\n\tif _, err := grepTestlog(\"New server added [127.0.0.1:2526]\", 0); err != nil {\n\t\tt.Error(\"Did not add server [127.0.0.1:2526] after sighup\")\n\t}\n\n\tif _, err := grepTestlog(\"Backend shutdown completed\", 0); err != nil {\n\t\tt.Error(\"Server failed to stop\")\n\t}\n\n}", "func (m *MockProvider) OnServiceUpdate(arg0, arg1 *v1.Service) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceUpdate\", arg0, arg1)\n}", "func TestDefaultBrokerWithManyTriggers(t *testing.T) {\n\tclient := setup(t, true)\n\tdefer tearDown(client)\n\n\t// Label namespace so that it creates the default broker.\n\tif err := client.LabelNamespace(map[string]string{\"knative-eventing-injection\": \"enabled\"}); err != nil {\n\t\tt.Fatalf(\"Error annotating namespace: %v\", err)\n\t}\n\n\t// Wait for default broker ready.\n\tif err := client.WaitForResourceReady(defaultBrokerName, common.BrokerTypeMeta); err != nil {\n\t\tt.Fatalf(\"Error waiting for default broker to become ready: %v\", err)\n\t}\n\n\t// These are the event types and sources that triggers will listen to, as well as the selectors\n\t// to set in the subscriber and services pods.\n\teventsToReceive := []eventReceiver{\n\t\t{eventTypeAndSource{Type: any, Source: any}, newSelector()},\n\t\t{eventTypeAndSource{Type: eventType1, Source: any}, newSelector()},\n\t\t{eventTypeAndSource{Type: any, Source: eventSource1}, newSelector()},\n\t\t{eventTypeAndSource{Type: eventType1, Source: eventSource1}, newSelector()},\n\t}\n\n\t// Create subscribers.\n\tfor _, event := range eventsToReceive {\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tpod := resources.EventLoggerPod(subscriberName)\n\t\tclient.CreatePodOrFail(pod, common.WithService(subscriberName))\n\t}\n\n\t// Create triggers.\n\tfor _, event := range eventsToReceive {\n\t\ttriggerName := name(\"trigger\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tclient.CreateTriggerOrFail(triggerName,\n\t\t\tresources.WithSubscriberRefForTrigger(subscriberName),\n\t\t\tresources.WithTriggerFilter(event.typeAndSource.Source, event.typeAndSource.Type),\n\t\t)\n\t}\n\n\t// Wait for all test resources to become ready before sending the events.\n\tif err := client.WaitForAllTestResourcesReady(); err != nil {\n\t\tt.Fatalf(\"Failed to get all test resources ready: %v\", err)\n\t}\n\n\t// These are the event types and sources that will be send.\n\teventsToSend := []eventTypeAndSource{\n\t\t{eventType1, eventSource1},\n\t\t{eventType1, eventSource2},\n\t\t{eventType2, eventSource1},\n\t\t{eventType2, eventSource2},\n\t}\n\t// Map to save the expected events per dumper so that we can verify the delivery.\n\texpectedEvents := make(map[string][]string)\n\t// Map to save the unexpected events per dumper so that we can verify that they weren't delivered.\n\tunexpectedEvents := make(map[string][]string)\n\tfor _, eventToSend := range eventsToSend {\n\t\t// Create cloud event.\n\t\t// Using event type and source as part of the body for easier debugging.\n\t\tbody := fmt.Sprintf(\"Body-%s-%s\", eventToSend.Type, eventToSend.Source)\n\t\tcloudEvent := &resources.CloudEvent{\n\t\t\tSource: eventToSend.Source,\n\t\t\tType: eventToSend.Type,\n\t\t\tData: fmt.Sprintf(`{\"msg\":%q}`, body),\n\t\t}\n\t\t// Create sender pod.\n\t\tsenderPodName := name(\"sender\", eventToSend.Type, eventToSend.Source)\n\t\tif err := client.SendFakeEventToAddressable(senderPodName, defaultBrokerName, common.BrokerTypeMeta, cloudEvent); err != nil {\n\t\t\tt.Fatalf(\"Error send cloud event to broker: %v\", err)\n\t\t}\n\n\t\t// Check on every dumper whether we should expect this event or not, and add its body\n\t\t// to the expectedEvents/unexpectedEvents maps.\n\t\tfor _, eventToReceive := range eventsToReceive {\n\t\t\tsubscriberName := name(\"dumper\", eventToReceive.typeAndSource.Type, eventToReceive.typeAndSource.Source)\n\t\t\tif shouldExpectEvent(&eventToSend, &eventToReceive, t.Logf) {\n\t\t\t\texpectedEvents[subscriberName] = append(expectedEvents[subscriberName], body)\n\t\t\t} else {\n\t\t\t\tunexpectedEvents[subscriberName] = append(unexpectedEvents[subscriberName], body)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, event := range eventsToReceive {\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tif err := client.CheckLog(subscriberName, common.CheckerContainsAll(expectedEvents[subscriberName])); err != nil {\n\t\t\tt.Fatalf(\"Event(s) not found in logs of subscriber pod %q: %v\", subscriberName, err)\n\t\t}\n\t\t// At this point all the events should have been received in the pod.\n\t\t// We check whether we find unexpected events. If so, then we fail.\n\t\tfound, err := client.FindAnyLogContents(subscriberName, unexpectedEvents[subscriberName])\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed querying to find log contents in pod %q: %v\", subscriberName, err)\n\t\t}\n\t\tif found {\n\t\t\tt.Fatalf(\"Unexpected event(s) found in logs of subscriber pod %q\", subscriberName)\n\t\t}\n\t}\n}", "func newFailingMockListenUnix(network string, laddr *net.UnixAddr) (*net.UnixListener, error) {\n\treturn &net.UnixListener{}, nil\n}", "func (m *MockListener) Delete(listenerKey api.ListenerKey, checksum api.Checksum) error {\n\tret := m.ctrl.Call(m, \"Delete\", listenerKey, checksum)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (tl *testListener) Close() error {\n\treturn nil\n}", "func (m *MockWerftServiceServer) Listen(arg0 *v1.ListenRequest, arg1 v1.WerftService_ListenServer) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Listen\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockWatcherConstructor) EventType() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EventType\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func HealthListener(site site.API) {\n\t// nop\n}", "func (m *MockEventManager) Start(arg0 <-chan struct{}) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Start\", arg0)\n}", "func (socket *MockSocket) Listen() {\n}", "func TestTrapListener() {\n\tfile := os.Stdout\n\tconn := gosnmp.Default\n\tconn.Target = \"10.100.34.66\"\n\ttl := gosnmp.NewTrapListener()\n\ttl.OnNewTrap = resources.MyTrapHandler(file)\n\ttl.Params = conn\n\terr := tl.Listen(\"0.0.0.0:162\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func (m *MockSessionPool) OnAfterSessionBind(arg0 func(context.Context, session.Session) error) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnAfterSessionBind\", arg0)\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func (_m *ServerConnectionInterface) StartListener(ipAddr string, port int) (int, error) {\n\tret := _m.Called(ipAddr, port)\n\n\tvar r0 int\n\tif rf, ok := ret.Get(0).(func(string, int) int); ok {\n\t\tr0 = rf(ipAddr, port)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, int) error); ok {\n\t\tr1 = rf(ipAddr, port)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func TestWebHook(t *testing.T) {\n\tg := gomega.NewGomegaWithT(t)\n\tg.Expect(createCertificates(t)).NotTo(gomega.HaveOccurred())\n\n\t// create manager\n\tmgr, err := manager.New(cfg, manager.Options{\n\t\tMetricsBindAddress: \"0\",\n\t})\n\tg.Expect(err).NotTo(gomega.HaveOccurred())\n\n\tc = mgr.GetClient()\n\n\t// add webhook to manager\n\tAdd(mgr)\n\n\t// start manager\n\tstopMgr, mgrStopped := StartTestManager(mgr, g)\n\tdefer func() {\n\t\tclose(stopMgr)\n\t\tmgrStopped.Wait()\n\t}()\n\n\tg.Expect(c.Create(context.TODO(), fnConfig)).NotTo(gomega.HaveOccurred())\n\n\ttestInvalidFunc(t)\n\ttestHandleDefaults(t)\n}", "func (m *MockStreamReceiveListener) EXPECT() *MockStreamReceiveListenerMockRecorder {\n\treturn m.recorder\n}", "func (m *MockAcceptor) ListenAndServe() {\n\tm.ctrl.Call(m, \"ListenAndServe\")\n}", "func (l *Listener) create(rOpts *ReconcileOptions) error {\n\tlb := rOpts.loadbalancer\n\tl.DesiredListener.LoadBalancerArn = lb.CurrentLoadBalancer.LoadBalancerArn\n\n\t// TODO: If we couldn't resolve default, we 'default' to the first targetgroup known.\n\t// Questionable approach.\n\tl.DesiredListener.DefaultActions[0].TargetGroupArn = lb.TargetGroups[0].CurrentTargetGroup.TargetGroupArn\n\n\t// Look for the default rule in the list of rules known to the Listener. If the default is found,\n\t// use the Kubernetes service name attached to that.\n\tfor _, rule := range l.Rules {\n\t\tif *rule.DesiredRule.IsDefault {\n\t\t\tl.logger.Infof(\"Located default rule. Rule: %s\", log.Prettify(rule.DesiredRule))\n\t\t\ttgIndex := lb.TargetGroups.LookupBySvc(rule.SvcName)\n\t\t\tif tgIndex < 0 {\n\t\t\t\tl.logger.Errorf(\"Failed to locate TargetGroup related to this service. Defaulting to first Target Group. SVC: %s\",\n\t\t\t\t\trule.SvcName)\n\t\t\t} else {\n\t\t\t\tctg := lb.TargetGroups[tgIndex].CurrentTargetGroup\n\t\t\t\tl.DesiredListener.DefaultActions[0].TargetGroupArn = ctg.TargetGroupArn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Attempt listener creation.\n\tin := &elbv2.CreateListenerInput{\n\t\tCertificates: l.DesiredListener.Certificates,\n\t\tLoadBalancerArn: l.DesiredListener.LoadBalancerArn,\n\t\tProtocol: l.DesiredListener.Protocol,\n\t\tPort: l.DesiredListener.Port,\n\t\tDefaultActions: []*elbv2.Action{\n\t\t\t{\n\t\t\t\tType: l.DesiredListener.DefaultActions[0].Type,\n\t\t\t\tTargetGroupArn: l.DesiredListener.DefaultActions[0].TargetGroupArn,\n\t\t\t},\n\t\t},\n\t}\n\to, err := awsutil.ALBsvc.CreateListener(in)\n\tif err != nil {\n\t\trOpts.Eventf(api.EventTypeWarning, \"ERROR\", \"Error creating %v listener: %s\", *l.DesiredListener.Port, err.Error())\n\t\tl.logger.Errorf(\"Failed Listener creation: %s.\", err.Error())\n\t\treturn err\n\t}\n\n\tl.CurrentListener = o.Listeners[0]\n\treturn nil\n}" ]
[ "0.6896047", "0.64820975", "0.63564306", "0.6253495", "0.6248663", "0.62145543", "0.6213838", "0.6207292", "0.6190569", "0.608584", "0.6084038", "0.60075676", "0.5972869", "0.59480613", "0.5873997", "0.5824852", "0.5752023", "0.5728411", "0.5718626", "0.5689425", "0.5688967", "0.5684916", "0.56587166", "0.56514955", "0.5637658", "0.55208963", "0.55187076", "0.55162597", "0.5508888", "0.5500818", "0.5464915", "0.5451309", "0.5442767", "0.5428931", "0.5427056", "0.5423782", "0.5415657", "0.5407253", "0.5399062", "0.5383966", "0.53827345", "0.5379475", "0.5369705", "0.53304905", "0.5289865", "0.52804357", "0.5279219", "0.52437866", "0.5236405", "0.52301514", "0.52185977", "0.52079636", "0.52014744", "0.51851857", "0.51821625", "0.5182127", "0.5181996", "0.51705116", "0.51673776", "0.516208", "0.5160824", "0.5155642", "0.5148937", "0.5148124", "0.51368034", "0.51239747", "0.51122075", "0.50971484", "0.5090562", "0.5083849", "0.50813305", "0.5072824", "0.5072797", "0.50663465", "0.5049904", "0.50459605", "0.50364226", "0.50344914", "0.5030876", "0.5029913", "0.5026943", "0.5022441", "0.5020065", "0.5017519", "0.500745", "0.50050575", "0.49851173", "0.49802095", "0.4978989", "0.4977833", "0.497111", "0.49707466", "0.49640936", "0.49465588", "0.49446756", "0.49441534", "0.49344823", "0.49308467", "0.49289057", "0.4924693" ]
0.70509106
0
EnsureListener indicates an expected call of EnsureListener
func (mr *MockLoadBalanceMockRecorder) EnsureListener(region, listener interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureListener", reflect.TypeOf((*MockLoadBalance)(nil).EnsureListener), region, listener) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) EnsureListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureSegmentListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureSegmentListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureSegmentListener), region, listener)\n}", "func (m *MockLoadBalance) EnsureSegmentListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureSegmentListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiListeners), region, lbID, listeners)\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiSegmentListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners)\n}", "func Ensure(depManager *dep.DepManager, args ...string) error {\n\treturn depManager.Ensure(args...)\n}", "func HealthListener(site site.API) {\n\t// nop\n}", "func Test_App_Listener(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\n\tgo func() {\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tutils.AssertEqual(t, nil, app.Shutdown())\n\t}()\n\n\tln := fasthttputil.NewInmemoryListener()\n\tutils.AssertEqual(t, nil, app.Listener(ln))\n}", "func (t *TestStore) RegisterListener(listener database.Listener) {\n}", "func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) {\n\tfs, clientCh, cleanup := setupOverrides()\n\tdefer cleanup()\n\n\t// Create a server option to get notified about serving mode changes. We don't\n\t// do anything other than throwing a log entry here. But this is required,\n\t// since the server code emits a log entry at the default level (which is\n\t// ERROR) if no callback is registered for serving mode changes. Our\n\t// testLogger fails the test if there is any log entry at ERROR level. It does\n\t// provide an ExpectError() method, but that takes a string and it would be\n\t// painful to construct the exact error message expected here. Instead this\n\t// works just fine.\n\tmodeChangeOpt := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) {\n\t\tt.Logf(\"Serving mode for listener %q changed to %q, err: %v\", addr.String(), args.Mode, args.Err)\n\t})\n\tserver := NewGRPCServer(modeChangeOpt)\n\tdefer server.Stop()\n\n\tlis, err := testutils.LocalTCPListener()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.LocalTCPListener() failed: %v\", err)\n\t}\n\n\t// Call Serve() in a goroutine, and push on a channel when Serve returns.\n\tserveDone := testutils.NewChannel()\n\tgo func() {\n\t\tif err := server.Serve(lis); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tserveDone.Send(nil)\n\t}()\n\n\t// Wait for an xdsClient to be created.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := clientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for new xdsClient to be created: %v\", err)\n\t}\n\tclient := c.(*fakeclient.Client)\n\n\t// Wait for a listener watch to be registered on the xdsClient.\n\tname, err := client.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a ListenerWatch: %v\", err)\n\t}\n\twantName := strings.Replace(testServerListenerResourceNameTemplate, \"%s\", lis.Addr().String(), -1)\n\tif name != wantName {\n\t\tt.Fatalf(\"LDS watch registered for name %q, want %q\", name, wantName)\n\t}\n\n\t// Push a good LDS response with security config, and wait for Serve() to be\n\t// invoked on the underlying grpc.Server. Also make sure that certificate\n\t// providers are not created.\n\tfcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{\n\t\tFilterChains: []*v3listenerpb.FilterChain{\n\t\t\t{\n\t\t\t\tTransportSocket: &v3corepb.TransportSocket{\n\t\t\t\t\tName: \"envoy.transport_sockets.tls\",\n\t\t\t\t\tConfigType: &v3corepb.TransportSocket_TypedConfig{\n\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{\n\t\t\t\t\t\t\tCommonTlsContext: &v3tlspb.CommonTlsContext{\n\t\t\t\t\t\t\t\tTlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{\n\t\t\t\t\t\t\t\t\tInstanceName: \"identityPluginInstance\",\n\t\t\t\t\t\t\t\t\tCertificateName: \"identityCertName\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tFilters: []*v3listenerpb.Filter{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"filter-1\",\n\t\t\t\t\t\tConfigType: &v3listenerpb.Filter_TypedConfig{\n\t\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{\n\t\t\t\t\t\t\t\tRouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{\n\t\t\t\t\t\t\t\t\tRouteConfig: &v3routepb.RouteConfiguration{\n\t\t\t\t\t\t\t\t\t\tName: \"routeName\",\n\t\t\t\t\t\t\t\t\t\tVirtualHosts: []*v3routepb.VirtualHost{{\n\t\t\t\t\t\t\t\t\t\t\tDomains: []string{\"lds.target.good:3333\"},\n\t\t\t\t\t\t\t\t\t\t\tRoutes: []*v3routepb.Route{{\n\t\t\t\t\t\t\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{\n\t\t\t\t\t\t\t\t\t\t\t\t\tPathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\tAction: &v3routepb.Route_NonForwardingAction{},\n\t\t\t\t\t\t\t\t\t\t\t}}}}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tHttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\taddr, port := splitHostPort(lis.Addr().String())\n\tclient.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tRouteConfigName: \"routeconfig\",\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: addr,\n\t\t\tPort: port,\n\t\t\tFilterChains: fcm,\n\t\t},\n\t}, nil)\n\tif _, err := fs.serveCh.Receive(ctx); err != nil {\n\t\tt.Fatalf(\"error when waiting for Serve() to be invoked on the grpc.Server\")\n\t}\n\n\t// Make sure the security configuration is not acted upon.\n\tif err := verifyCertProviderNotCreated(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (l *Listener) create(rOpts *ReconcileOptions) error {\n\tlb := rOpts.loadbalancer\n\tl.DesiredListener.LoadBalancerArn = lb.CurrentLoadBalancer.LoadBalancerArn\n\n\t// TODO: If we couldn't resolve default, we 'default' to the first targetgroup known.\n\t// Questionable approach.\n\tl.DesiredListener.DefaultActions[0].TargetGroupArn = lb.TargetGroups[0].CurrentTargetGroup.TargetGroupArn\n\n\t// Look for the default rule in the list of rules known to the Listener. If the default is found,\n\t// use the Kubernetes service name attached to that.\n\tfor _, rule := range l.Rules {\n\t\tif *rule.DesiredRule.IsDefault {\n\t\t\tl.logger.Infof(\"Located default rule. Rule: %s\", log.Prettify(rule.DesiredRule))\n\t\t\ttgIndex := lb.TargetGroups.LookupBySvc(rule.SvcName)\n\t\t\tif tgIndex < 0 {\n\t\t\t\tl.logger.Errorf(\"Failed to locate TargetGroup related to this service. Defaulting to first Target Group. SVC: %s\",\n\t\t\t\t\trule.SvcName)\n\t\t\t} else {\n\t\t\t\tctg := lb.TargetGroups[tgIndex].CurrentTargetGroup\n\t\t\t\tl.DesiredListener.DefaultActions[0].TargetGroupArn = ctg.TargetGroupArn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Attempt listener creation.\n\tin := &elbv2.CreateListenerInput{\n\t\tCertificates: l.DesiredListener.Certificates,\n\t\tLoadBalancerArn: l.DesiredListener.LoadBalancerArn,\n\t\tProtocol: l.DesiredListener.Protocol,\n\t\tPort: l.DesiredListener.Port,\n\t\tDefaultActions: []*elbv2.Action{\n\t\t\t{\n\t\t\t\tType: l.DesiredListener.DefaultActions[0].Type,\n\t\t\t\tTargetGroupArn: l.DesiredListener.DefaultActions[0].TargetGroupArn,\n\t\t\t},\n\t\t},\n\t}\n\to, err := awsutil.ALBsvc.CreateListener(in)\n\tif err != nil {\n\t\trOpts.Eventf(api.EventTypeWarning, \"ERROR\", \"Error creating %v listener: %s\", *l.DesiredListener.Port, err.Error())\n\t\tl.logger.Errorf(\"Failed Listener creation: %s.\", err.Error())\n\t\treturn err\n\t}\n\n\tl.CurrentListener = o.Listeners[0]\n\treturn nil\n}", "func CheckSuiteListener(checkSuite event.CheckSuite) (bool, error) {\n\tlogger.Infof(\"CheckSuite event listener fired [%v]!\", checkSuite)\n\treturn true, nil\n}", "func (m *WatchManager) Ensure(\n\tprovider *api.Provider,\n\tresource interface{},\n\thandler libweb.EventHandler) (watch *libweb.Watch, err error) {\n\t//\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tif m.providerMap == nil {\n\t\tm.providerMap = make(ProviderMap)\n\t}\n\twatchMap, found := m.providerMap[provider.UID]\n\tif !found {\n\t\twatchMap = make(map[string]*libweb.Watch)\n\t\tm.providerMap[provider.UID] = watchMap\n\t}\n\tkind := libref.ToKind(resource)\n\tif w, found := watchMap[kind]; found {\n\t\tif w.Alive() {\n\t\t\twatch = w\n\t\t\treturn\n\t\t}\n\t}\n\tclient, err := web.NewClient(provider)\n\tif err != nil {\n\t\treturn\n\t}\n\tw, err := client.Watch(resource, handler)\n\tif err != nil {\n\t\treturn\n\t}\n\twatchMap[kind] = w\n\twatch = w\n\n\treturn\n}", "func waitForWatchListener(ctx context.Context, t *testing.T, xdsC *fakeclient.Client, wantTarget string) {\n\tt.Helper()\n\n\tgotTarget, err := xdsC.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.WatchService failed with error: %v\", err)\n\t}\n\tif gotTarget != wantTarget {\n\t\tt.Fatalf(\"xdsClient.WatchService() called with target: %v, want %v\", gotTarget, wantTarget)\n\t}\n}", "func (mr *MockHealthCheckMockRecorder) SetListener(arg0, arg1 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"SetListener\", reflect.TypeOf((*MockHealthCheck)(nil).SetListener), arg0, arg1)\n}", "func CheckRunListener(checkRun event.CheckRun) (bool, error) {\n\tlogger.Infof(\"CheckRun event listener fired [%v]!\", checkRun)\n\treturn true, nil\n}", "func (ew *EventWatcher) EnsureNoEvents(ctx context.Context, duration time.Duration) error {\n\t// First, clears the list of events beforehand.\n\tif _, err := ew.events(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to clear the event list\")\n\t}\n\t// wait, and check the events in the wait.\n\tif err := testing.Sleep(ctx, duration); err != nil {\n\t\treturn errors.Wrap(err, \"failed to wait\")\n\t}\n\tevents, err := ew.events(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to access to the event list\")\n\t}\n\tif len(events) > 0 {\n\t\treturn errors.Errorf(\"there are %d events\", len(events))\n\t}\n\treturn nil\n}", "func (mr *MockAllMockRecorder) Listener() *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Listener\", reflect.TypeOf((*MockAll)(nil).Listener))\n}", "func checkListener(listener interface{}) (reflect.Type, error) {\n lisVal := reflect.TypeOf(listener)\n\n if lisVal.Kind() != reflect.Func || // Listener must obviously be a function\n lisVal.NumIn() != 1 || // Listener function must take only 1 input argument\n lisVal.NumOut() != 0 { // Listener must have no returning argument\n // Listener interface not valid\n return lisVal, ListenerInvalidErr\n }\n\n // Returns input value kind\n return lisVal.In(0), nil\n}", "func (t *SelfTester) SetOnNewPoliciesReadyCb(cb func()) {\n}", "func (m *MockHealthCheck) SetListener(arg0 discovery.LegacyHealthCheckStatsListener, arg1 bool) {\n\tm.ctrl.Call(m, \"SetListener\", arg0, arg1)\n}", "func (jm *JobManager) shouldTriggerListeners(t Task) bool {\n\tif typed, isTyped := t.(EventTriggerListenersProvider); isTyped {\n\t\treturn typed.ShouldTriggerListeners()\n\t}\n\n\treturn true\n}", "func Ensure(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (mr *MockDynamicCertPrivateMockRecorder) AddListener(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddListener\", reflect.TypeOf((*MockDynamicCertPrivate)(nil).AddListener), arg0)\n}", "func AssertProcessEventRequired(obj ProcessEvent) error {\n\treturn nil\n}", "func (m *MockLoadBalance) EnsureMultiListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func verifyExtensionLifecycle(t *testing.T, factory extension.Factory, getConfigFn getExtensionConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\textCreateSet := extensiontest.NewNopCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tfirstExt, err := factory.CreateExtension(ctx, extCreateSet, getConfigFn())\n\trequire.NoError(t, err)\n\trequire.NoError(t, firstExt.Start(ctx, host))\n\trequire.NoError(t, firstExt.Shutdown(ctx))\n\n\tsecondExt, err := factory.CreateExtension(ctx, extCreateSet, getConfigFn())\n\trequire.NoError(t, err)\n\trequire.NoError(t, secondExt.Start(ctx, host))\n\trequire.NoError(t, secondExt.Shutdown(ctx))\n}", "func (m *MockAll) Listener() Listener {\n\tret := m.ctrl.Call(m, \"Listener\")\n\tret0, _ := ret[0].(Listener)\n\treturn ret0\n}", "func (cli *FakeDatabaseClient) CreateListener(ctx context.Context, in *dbdpb.CreateListenerRequest, opts ...grpc.CallOption) (*dbdpb.CreateListenerResponse, error) {\n\tpanic(\"implement me\")\n}", "func TestIntegrationNewListener(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short mode...\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*120)\n\tdefer cancel()\n\n\tlistener := NewAmqpConnection(ctx, config)\n\t// Remove topic to ensure each test has a clean topic to work with\n\tdefer deleteSubscription(listener, config)\n\n\tnonce := time.Now().String()\n\tsender, err := listener.CreateAmqpSender(config.SubscribesToEvent)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\terr = sender.Send(ctx, amqp.NewMessage([]byte(nonce)))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstats, err := listener.GetQueueDepth()\n\tdepth := stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 1 {\n\t\tt.Errorf(\"Expected queue depth of 1 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n\n\tamqpMessage, err := listener.Receiver.Receive(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmessage := messaging.NewAmqpMessageWrapper(amqpMessage)\n\n\tgo func() {\n\t\ttime.Sleep(time.Duration(45) * time.Second)\n\t\terr := listener.RenewLocks(ctx, []*amqp.Message{\n\t\t\tamqpMessage,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t//Added to ensure that locks are renewed\n\ttime.Sleep(time.Duration(75) * time.Second)\n\n\terr = message.Accept()\n\tif string(message.Body()) != nonce {\n\t\tt.Errorf(\"value not as expected in message Expected: %s Got: %s\", nonce, message.Body())\n\t}\n\n\tstats, err = listener.GetQueueDepth()\n\tdepth = stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 0 {\n\t\tt.Errorf(\"Expected queue depth of 0 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n}", "func preNotifyReady() {\n}", "func (hc *LegacyHealthCheckImpl) SetListener(listener LegacyHealthCheckStatsListener, sendDownEvents bool) {\n\tif hc.listener != nil {\n\t\tpanic(\"must not call SetListener twice\")\n\t}\n\n\thc.mu.Lock()\n\tdefer hc.mu.Unlock()\n\tif len(hc.addrToHealth) > 0 {\n\t\tpanic(\"must not call SetListener after tablets were added\")\n\t}\n\n\thc.listener = listener\n\thc.sendDownEvents = sendDownEvents\n}", "func (s) TestListenerResourceDeletionOnServerNotIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tupdateCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the listener to move to \"serving\" mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a mode change update.\")\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Listener received new mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{}, // empty listener resource\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"timed out waiting for a mode change update: %v\", err)\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeNotServing {\n\t\t\tt.Fatalf(\"listener received new mode %v, want %v\", mode, connectivity.ServingModeNotServing)\n\t\t}\n\t}\n}", "func (opts *Options) ensure() error {\n\tif !opts.Enabled {\n\t\treturn nil\n\t}\n\n\tif opts.Name == \"\" {\n\t\topts.Name = config.GetString(\"name\")\n\t}\n\tif opts.Name == \"\" {\n\t\treturn ErrEmptyName\n\t}\n\n\t//if opts.Reporter.Address == \"\" {\n\t//\topts.Reporter.Address = \"127.0.0.1\"\n\t//}\n\tif opts.Reporter.FlushInterval <= 0 {\n\t\topts.Reporter.FlushInterval = 10 * time.Second\n\t}\n\tif opts.Reporter.QueueSize <= 0 {\n\t\topts.Reporter.QueueSize = 1000\n\t}\n\treturn nil\n}", "func AddListener(lis Listener) bool {\n\treturn stdLogger.AddListener(lis)\n}", "func verifyReceiverLifecycle(t *testing.T, factory component.ReceiverFactory, getConfigFn getReceiverConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\treceiverCreateSet := componenttest.NewNopReceiverCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tcreateFns := []createReceiverFn{\n\t\twrapCreateLogsRcvr(factory),\n\t\twrapCreateTracesRcvr(factory),\n\t\twrapCreateMetricsRcvr(factory),\n\t}\n\n\tfor _, createFn := range createFns {\n\t\tfirstRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\tif errors.Is(err, componenterror.ErrDataTypeIsNotSupported) {\n\t\t\tcontinue\n\t\t}\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, firstRcvr.Start(ctx, host))\n\t\trequire.NoError(t, firstRcvr.Shutdown(ctx))\n\n\t\tsecondRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, secondRcvr.Start(ctx, host))\n\t\trequire.NoError(t, secondRcvr.Shutdown(ctx))\n\t}\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar memLog base.InMemLogger\n\tmem := vfs.NewMem()\n\trequire.NoError(t, mem.MkdirAll(\"ext\", 0755))\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(t *testing.T, td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tmemLog.Reset()\n\t\t\tlel := MakeLoggingEventListener(&memLog)\n\t\t\tflushBegin, flushEnd := lel.FlushBegin, lel.FlushEnd\n\t\t\tlel.FlushBegin = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushBegin(info)\n\t\t\t}\n\t\t\tlel.FlushEnd = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushEnd(info)\n\t\t\t}\n\t\t\topts := &Options{\n\t\t\t\tFS: vfs.WithLogging(mem, memLog.Infof),\n\t\t\t\tFormatMajorVersion: internalFormatNewest,\n\t\t\t\tEventListener: &lel,\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tL0CompactionThreshold: 10,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t}\n\t\t\t// The table stats collector runs asynchronously and its\n\t\t\t// timing is less predictable. It increments nextJobID, which\n\t\t\t// can make these tests flaky. The TableStatsLoaded event is\n\t\t\t// tested separately in TestTableStats.\n\t\t\topts.private.disableTableStats = true\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tt := time.Now()\n\t\t\td.timeNow = func() time.Time {\n\t\t\t\tt = t.Add(time.Second)\n\t\t\t\treturn t\n\t\t\t}\n\t\t\td.testingAlwaysWaitForCleanup = true\n\t\t\treturn memLog.String()\n\n\t\tcase \"close\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"flush\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"compact\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\"), false); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"checkpoint\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Checkpoint(\"checkpoint\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"disable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\td.mu.Lock()\n\t\t\td.disableFileDeletions()\n\t\t\td.mu.Unlock()\n\t\t\treturn memLog.String()\n\n\t\tcase \"enable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tmemLog.Infof(\"%v\", r)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\td.mu.Lock()\n\t\t\t\tdefer d.mu.Unlock()\n\t\t\t\td.enableFileDeletions()\n\t\t\t}()\n\t\t\td.TestOnlyWaitForCleaning()\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest\":\n\t\t\tmemLog.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest-flushable\":\n\t\t\tmemLog.Reset()\n\n\t\t\t// Prevent flushes during this test to ensure determinism.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = true\n\t\t\td.mu.Unlock()\n\n\t\t\tb := d.NewBatch()\n\t\t\tif err := b.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Apply(b, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\twriteTable := func(name string, key byte) error {\n\t\t\t\tf, err := mem.Create(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t\t})\n\t\t\t\tif err := w.Add(base.MakeInternalKey([]byte{key}, 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttableA, tableB := \"ext/a\", \"ext/b\"\n\t\t\tif err := writeTable(tableA, 'a'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := writeTable(tableB, 'b'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{tableA, tableB}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\n\t\t\t// Re-enable flushes, to allow the subsequent flush to proceed.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = false\n\t\t\td.mu.Unlock()\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"metrics\":\n\t\t\t// The asynchronous loading of table stats can change metrics, so\n\t\t\t// wait for all the tables' stats to be loaded.\n\t\t\td.mu.Lock()\n\t\t\td.waitTableStats()\n\t\t\td.mu.Unlock()\n\n\t\t\treturn d.Metrics().String()\n\n\t\tcase \"sstables\":\n\t\t\tvar buf bytes.Buffer\n\t\t\ttableInfos, _ := d.SSTables()\n\t\t\tfor i, level := range tableInfos {\n\t\t\t\tif len(level) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(&buf, \"%d:\\n\", i)\n\t\t\t\tfor _, m := range level {\n\t\t\t\t\tfmt.Fprintf(&buf, \" %d:[%s-%s]\\n\",\n\t\t\t\t\t\tm.FileNum, m.Smallest.UserKey, m.Largest.UserKey)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (ans *ANS) CheckCorrectSetup() error {\n\tconst testPath = \"/cf/consumer/v1/matched-events\"\n\tentireUrl := strings.TrimRight(ans.URL, \"/\") + testPath\n\n\tresponse, err := ans.sendRequest(http.MethodGet, entireUrl, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn handleStatusCode(entireUrl, http.StatusOK, response)\n}", "func (l *PoolListener) Ready() (err error) { return nil }", "func CreateListener(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer) (*listeners.Listener, error) {\n\tlistenerName := tools.RandomString(\"TESTACCT-\", 8)\n\tlistenerDescription := tools.RandomString(\"TESTACCT-DESC-\", 8)\n\tlistenerPort := tools.RandomInt(1, 100)\n\n\tt.Logf(\"Attempting to create listener %s on port %d\", listenerName, listenerPort)\n\n\tcreateOpts := listeners.CreateOpts{\n\t\tName: listenerName,\n\t\tDescription: listenerDescription,\n\t\tLoadbalancerID: lb.ID,\n\t\tProtocol: listeners.ProtocolHTTP,\n\t\tProtocolPort: listenerPort,\n\t}\n\n\tlistener, err := listeners.Create(client, createOpts).Extract()\n\tif err != nil {\n\t\treturn listener, err\n\t}\n\n\tt.Logf(\"Successfully created listener %s\", listenerName)\n\n\tif err := WaitForLoadBalancerState(client, lb.ID, \"ACTIVE\"); err != nil {\n\t\treturn listener, fmt.Errorf(\"Timed out waiting for loadbalancer to become active: %s\", err)\n\t}\n\n\tth.AssertEquals(t, listener.Name, listenerName)\n\tth.AssertEquals(t, listener.Description, listenerDescription)\n\tth.AssertEquals(t, listener.Loadbalancers[0].ID, lb.ID)\n\tth.AssertEquals(t, listener.Protocol, string(listeners.ProtocolHTTP))\n\tth.AssertEquals(t, listener.ProtocolPort, listenerPort)\n\n\treturn listener, nil\n}", "func (m *MockDynamicCertPrivate) AddListener(arg0 dynamiccertificates.Listener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddListener\", arg0)\n}", "func Ensure(err error, message string) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif message != \"\" {\n\t\tlog.Infoln(message)\n\t}\n}", "func (l *Listener) markAsReady() {\n\tl.ready = true\n\tclose(l.readyChan)\n}", "func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) {\n\toldRBAC := envconfig.XDSRBAC\n\tenvconfig.XDSRBAC = true\n\tdefer func() {\n\t\tenvconfig.XDSRBAC = oldRBAC\n\t}()\n\t_, readyCh, xdsC, _, cleanup := newListenerWrapper(t)\n\tdefer cleanup()\n\n\t// Verify that the listener wrapper registers a listener watch for the\n\t// expected Listener resource name.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tname, err := xdsC.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a watch on a Listener resource: %v\", err)\n\t}\n\tif name != testListenerResourceName {\n\t\tt.Fatalf(\"listenerWrapper registered a lds watch on %s, want %s\", name, testListenerResourceName)\n\t}\n\tfcm, err := xdsresource.NewFilterChainManager(listenerWithRouteConfiguration)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\n\t// Push a good update which contains a Filter Chain that specifies dynamic\n\t// RDS Resources that need to be received. This should ping rds handler\n\t// about which rds names to start, which will eventually start a watch on\n\t// xds client for rds name \"route-1\".\n\txdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: fakeListenerHost,\n\t\t\tPort: strconv.Itoa(fakeListenerPort),\n\t\t\tFilterChains: fcm,\n\t\t}}, nil)\n\n\t// This should start a watch on xds client for rds name \"route-1\".\n\trouteName, err := xdsC.WaitForWatchRouteConfig(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a watch on a Route resource: %v\", err)\n\t}\n\tif routeName != \"route-1\" {\n\t\tt.Fatalf(\"listenerWrapper registered a lds watch on %s, want %s\", routeName, \"route-1\")\n\t}\n\n\t// This shouldn't invoke good update channel, as has not received rds updates yet.\n\ttimer := time.NewTimer(defaultTestShortTimeout)\n\tselect {\n\tcase <-timer.C:\n\t\ttimer.Stop()\n\tcase <-readyCh:\n\t\tt.Fatalf(\"ready channel written to without rds configuration specified\")\n\t}\n\n\t// Invoke rds callback for the started rds watch. This valid rds callback\n\t// should trigger the listener wrapper to fire GoodUpdate, as it has\n\t// received both it's LDS Configuration and also RDS Configuration,\n\t// specified in LDS Configuration.\n\txdsC.InvokeWatchRouteConfigCallback(\"route-1\", xdsresource.RouteConfigUpdate{}, nil)\n\n\t// All of the xDS updates have completed, so can expect to send a ping on\n\t// good update channel.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"timeout waiting for the ready channel to be written to after receipt of a good rds update\")\n\tcase <-readyCh:\n\t}\n}", "func NoMainListener() {\n}", "func (_m *Broadcaster) DependentReady() {\n\t_m.Called()\n}", "func (cli *FakeDatabaseClient) BounceListener(ctx context.Context, in *dbdpb.BounceListenerRequest, opts ...grpc.CallOption) (*dbdpb.BounceListenerResponse, error) {\n\tpanic(\"implement me\")\n}", "func TestIntegrationNewListener(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short mode...\")\n\t}\n\n\t// pick a random name to prevent previous tests affecting this test\n\tconfig.ModuleName = helpers.RandomName(8)\n\n\trenewEvery := time.Second * 35\n\tprocessingTime := time.Second * 240\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*310)\n\tdefer cancel()\n\n\tlistener := NewAmqpConnection(ctx, config)\n\t// Remove topic to ensure each test has a clean topic to work with\n\tdefer deleteSubscription(listener, config)\n\n\tnonce := time.Now().String()\n\tsender, err := listener.CreateAmqpSender(config.SubscribesToEvent)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\terr = sender.Send(ctx, amqp.NewMessage([]byte(nonce)))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstats, err := listener.GetQueueDepth()\n\tdepth := stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 1 {\n\t\tt.Errorf(\"Expected queue depth of 1 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n\n\tamqpMessage, err := listener.Receiver.Receive(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmessage := messaging.NewAmqpMessageWrapper(amqpMessage)\n\n\t// SUMMARY: Testing message lock renewal. By default SB messages's locks expire after 1min and the message is requeued\n\t// 1. Starts a loop renewing the message lock\n\t// 2. Block for more than 1min\n\t// 3. Accept the message (dequeuing it)\n\t// 4. Check the queue length is 0... if it's not we lost the lock and the message got put back on the queue.\n\trenewContext, cancel := context.WithCancel(ctx)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-renewContext.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(renewEvery)\n\t\t\t\terr := listener.RenewLocks(ctx, []*amqp.Message{\n\t\t\t\t\tamqpMessage,\n\t\t\t\t})\n\n\t\t\t\t// Report the test error if the context hasn't been cancelled.\n\t\t\t\tif err != nil {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-renewContext.Done():\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tt.Error(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\ttime.Sleep(processingTime)\n\tcancel()\n\n\terr = message.Accept()\n\tif string(message.Body()) != nonce {\n\t\tt.Errorf(\"value not as expected in message Expected: %s Got: %s\", nonce, message.Body())\n\t}\n\n\t// wait for the SB stats API to update\n\ttime.Sleep(time.Second * 30)\n\n\tstats, err = listener.GetQueueDepth()\n\tdepth = stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 0 {\n\t\tt.Errorf(\"Expected queue depth of 0 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n}", "func TeamAddListener(teamAdd event.TeamAdd) (bool, error) {\n\tlogger.Infof(\"TeamAdd event listener fired [%v]!\", teamAdd)\n\treturn true, nil\n}", "func brokerEventVersionNotUpgraded(ctx context.Context, t feature.T) {\n\t// brokerName := state.GetStringOrFail(ctx, t, \"brokerName\")\n\n\t// Create a trigger,\n}", "func notifyReady() {\n}", "func (t *SelfTester) BeginWaitingForEvent() error {\n\tif t.waitingForEvent.Swap(true) {\n\t\treturn errors.New(\"a self test is already running\")\n\t}\n\treturn nil\n}", "func requireWaitForEvent(\n\tt *testing.T, keptnAPI *KeptnAPI, waitFor time.Duration, tick time.Duration, keptnContext *models.EventContext,\n\teventType string, eventValidator func(c *models.KeptnContextExtendedCE) bool, source string,\n) {\n\trequireWaitForFilteredEvent(t,\n\t\tkeptnAPI,\n\t\twaitFor,\n\t\ttick,\n\t\t&api.EventFilter{\n\t\t\tKeptnContext: *keptnContext.KeptnContext,\n\t\t\tEventType: eventType,\n\t\t},\n\t\teventValidator,\n\t\tsource,\n\t)\n}", "func (m *MockListener) Create(listener api.Listener) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Create\", listener)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func balancerWaitForRolloutsChange(b IRolloutBalancer, wantedLen int) {\n\tr, ok := b.(*rolloutBalancer)\n\tif !ok {\n\t\tpanic(\"rolloutBalancer instance is expected\")\n\t}\n\twaitForRolloutsChange(r.watcher, wantedLen)\n}", "func SelfCheck(logger Logger) {\n\tid, callbackRemove := callbacks.register(logger)\n\tdefer callbackRemove()\n\tC.check_library(C.user_callback_data(id), C.log_callback(C.goLogCallback))\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func (*listener) OnConnect() {}", "func (e *AvroKafkaDockerEnv) SetListener(states interface{}, listener MqListener) {\n\t// TODO\n}", "func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {\n\tpods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{\n\t\t// Find all running pods\n\t\tFieldSelector: \"status.phase=Running\",\n\t\t// Find all injected pods. We don't care about non-injected pods, because the new behavior\n\t\t// mirrors Kubernetes; this is only a breaking change for existing Istio users.\n\t\tLabelSelector: \"security.istio.io/tlsMode=istio\",\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar messages diag.Messages = make([]diag.Message, 0)\n\tg := errgroup.Group{}\n\n\tsem := semaphore.NewWeighted(25)\n\tfor _, pod := range pods.Items {\n\t\tpod := pod\n\t\tif !fromLegacyNetworkingVersion(pod) {\n\t\t\t// Skip check. This pod is already on a version where the change has been made; if they were going\n\t\t\t// to break they would already be broken.\n\t\t\tcontinue\n\t\t}\n\t\tg.Go(func() error {\n\t\t\t_ = sem.Acquire(context.Background(), 1)\n\t\t\tdefer sem.Release(1)\n\t\t\t// Fetch list of all clusters to get which ports we care about\n\t\t\tresp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, \"GET\", \"config_dump?resource=dynamic_active_clusters&mask=cluster.name\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get config dump: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tports, err := extractInboundPorts(resp)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get ports: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Next, look at what ports the pod is actually listening on\n\t\t\t// This requires parsing the output from ss; the version we use doesn't support JSON\n\t\t\tout, _, err := cli.PodExec(pod.Name, pod.Namespace, \"istio-proxy\", \"ss -ltnH\")\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"executable file not found\") {\n\t\t\t\t\t// Likely distroless or other custom build without ss. Nothing we can do here...\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"failed to get listener state: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfor _, ss := range strings.Split(out, \"\\n\") {\n\t\t\t\tif len(ss) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbind, port, err := net.SplitHostPort(getColumn(ss, 3))\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"failed to get parse state: \", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tip, _ := netip.ParseAddr(bind)\n\t\t\t\tportn, _ := strconv.Atoi(port)\n\t\t\t\tif _, f := ports[portn]; f {\n\t\t\t\t\tc := ports[portn]\n\t\t\t\t\tif bind == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if bind == \"*\" || ip.IsUnspecified() {\n\t\t\t\t\t\tc.Wildcard = true\n\t\t\t\t\t} else if ip.IsLoopback() {\n\t\t\t\t\t\tc.Lo = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Explicit = true\n\t\t\t\t\t}\n\t\t\t\t\tports[portn] = c\n\t\t\t\t}\n\t\t\t}\n\n\t\t\torigin := &kube3.Origin{\n\t\t\t\tType: gvk.Pod,\n\t\t\t\tFullName: resource.FullName{\n\t\t\t\t\tNamespace: resource.Namespace(pod.Namespace),\n\t\t\t\t\tName: resource.LocalName(pod.Name),\n\t\t\t\t},\n\t\t\t\tResourceVersion: resource.Version(pod.ResourceVersion),\n\t\t\t}\n\t\t\tfor port, status := range ports {\n\t\t\t\t// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.\n\t\t\t\tif status.Lo {\n\t\t\t\t\tmessages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn messages, nil\n}", "func Check() error {\n\tmg.Deps(Lint)\n\tmg.Deps(TestRace)\n\treturn nil\n}", "func ReleaseListener(release event.Release) (bool, error) {\n\tlogger.Infof(\"Release event listener fired [%v]!\", release)\n\treturn true, nil\n}", "func MembershipListener(membership event.Membership) (bool, error) {\n\tlogger.Infof(\"Membership event listener fired [%v]!\", membership)\n\treturn true, nil\n}", "func TestCallbackInvokedWhenSetEarly(t *testing.T) {\n\twg := &sync.WaitGroup{}\n\twg.Add(1)\n\tapp := blockedABCIApplication{\n\t\twg: wg,\n\t}\n\t_, c := setupClientServer(t, app)\n\treqRes := c.CheckTxAsync(types.RequestCheckTx{})\n\n\tdone := make(chan struct{})\n\tcb := func(_ *types.Response) {\n\t\tclose(done)\n\t}\n\treqRes.SetCallback(cb)\n\tapp.wg.Done()\n\n\tcalled := func() bool {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\trequire.Eventually(t, called, time.Second, time.Millisecond*25)\n}", "func (l *Listener) Reconcile(rOpts *ReconcileOptions) error {\n\tswitch {\n\n\tcase l.DesiredListener == nil: // listener should be deleted\n\t\tif l.CurrentListener == nil {\n\t\t\tbreak\n\t\t}\n\t\tl.logger.Infof(\"Start Listener deletion.\")\n\t\tif err := l.delete(rOpts); err != nil {\n\t\t\treturn err\n\t\t}\n\t\trOpts.Eventf(api.EventTypeNormal, \"DELETE\", \"%v listener deleted\", *l.CurrentListener.Port)\n\t\tl.logger.Infof(\"Completed Listener deletion.\")\n\n\tcase l.CurrentListener == nil: // listener doesn't exist and should be created\n\t\tl.logger.Infof(\"Start Listener creation.\")\n\t\tif err := l.create(rOpts); err != nil {\n\t\t\treturn err\n\t\t}\n\t\trOpts.Eventf(api.EventTypeNormal, \"CREATE\", \"%v listener created\", *l.CurrentListener.Port)\n\t\tl.logger.Infof(\"Completed Listener creation. ARN: %s | Port: %v | Proto: %s.\",\n\t\t\t*l.CurrentListener.ListenerArn, *l.CurrentListener.Port,\n\t\t\t*l.CurrentListener.Protocol)\n\n\tcase l.needsModification(l.DesiredListener): // current and desired diff; needs mod\n\t\tl.logger.Infof(\"Start Listener modification.\")\n\t\tif err := l.modify(rOpts); err != nil {\n\t\t\treturn err\n\t\t}\n\t\trOpts.Eventf(api.EventTypeNormal, \"MODIFY\", \"%v listener modified\", *l.CurrentListener.Port)\n\t\tl.logger.Infof(\"Completed Listener modification. ARN: %s | Port: %s | Proto: %s.\",\n\t\t\t*l.CurrentListener.ListenerArn, *l.CurrentListener.Port, *l.CurrentListener.Protocol)\n\n\tdefault:\n\t\tl.logger.Debugf(\"No listener modification required.\")\n\t}\n\n\treturn nil\n}", "func (e Event) Check() error {\n\tif len(e.Title) == 0 {\n\t\treturn fmt.Errorf(\"statsd.Event title is required\")\n\t}\n\tif len(e.Text) == 0 {\n\t\treturn fmt.Errorf(\"statsd.Event text is required\")\n\t}\n\treturn nil\n}", "func ensureVWHReady(){\n\tMustRunWithTimeout(certsReadyTime, \"kubectl create ns check-webhook\")\n\tMustRun(\"kubectl delete ns check-webhook\")\n}", "func (h *HealthCheck) verifyBrokerIsReady() error {\n\th.frameworkError = nil\n\tglog.V(4).Infof(\"checking for endpoint %v/%v\", h.brokernamespace, h.brokerendpointName)\n\terr := WaitForEndpoint(h.kubeClientSet, h.brokernamespace, h.brokerendpointName)\n\tif err != nil {\n\t\treturn h.setError(\"endpoint not found: %v\", err.Error())\n\t}\n\n\turl := \"http://\" + h.brokername + \".\" + h.brokernamespace + \".svc.cluster.local\"\n\tbroker := &v1beta1.ClusterServiceBroker{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: h.brokername,\n\t\t},\n\t\tSpec: v1beta1.ClusterServiceBrokerSpec{\n\t\t\tCommonServiceBrokerSpec: v1beta1.CommonServiceBrokerSpec{\n\t\t\t\tURL: url,\n\t\t\t},\n\t\t},\n\t}\n\n\tglog.V(4).Infof(\"checking for Broker %v to be ready\", broker.Name)\n\terr = util.WaitForBrokerCondition(h.serviceCatalogClientSet.ServicecatalogV1beta1(),\n\t\tbroker.Name,\n\t\tv1beta1.ServiceBrokerCondition{\n\t\t\tType: v1beta1.ServiceBrokerConditionReady,\n\t\t\tStatus: v1beta1.ConditionTrue,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn h.setError(\"broker not ready: %v\", err.Error())\n\t}\n\n\terr = util.WaitForClusterServiceClassToExist(h.serviceCatalogClientSet.ServicecatalogV1beta1(), h.serviceclassID)\n\tif err != nil {\n\t\treturn h.setError(\"service class not found: %v\", err.Error())\n\t}\n\treturn nil\n}", "func (l *SharedLoadBalancer) ensureHealthCheck(loadbalancerID string, pool *elbmodel.PoolResp,\n\tport v1.ServicePort, service *v1.Service, node *v1.Node) error {\n\thealthCheckOpts := getHealthCheckOptionFromAnnotation(service, l.loadbalancerOpts)\n\tmonitorID := pool.HealthmonitorId\n\tklog.Infof(\"add or update or remove health check: %s : %#v\", monitorID, healthCheckOpts)\n\n\tif healthCheckOpts.Enable {\n\t\terr := l.allowHealthCheckRule(node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tprotocolStr := parseProtocol(service, port)\n\t// create health monitor\n\tif monitorID == \"\" && healthCheckOpts.Enable {\n\t\t_, err := l.createHealthMonitor(loadbalancerID, pool.Id, protocolStr, healthCheckOpts)\n\t\treturn err\n\t}\n\n\t// update health monitor\n\tif monitorID != \"\" && healthCheckOpts.Enable {\n\t\treturn l.updateHealthMonitor(monitorID, protocolStr, healthCheckOpts)\n\t}\n\n\t// delete health monitor\n\tif monitorID != \"\" && !healthCheckOpts.Enable {\n\t\tklog.Infof(\"Deleting health monitor %s for pool %s\", monitorID, pool.Id)\n\t\terr := l.sharedELBClient.DeleteHealthMonitor(monitorID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to delete health monitor %s for pool %s, error: %v\", monitorID, pool.Id, err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func WaitUntilGatewayActiveListenerStatsGE(t *testing.T, ing ingress.Instance, expectedListeners int,\n\ttimeout time.Duration) error {\n\tstart := time.Now()\n\tactiveListeners := 0\n\tvar err error\n\tfor {\n\t\tif time.Since(start) > timeout {\n\t\t\treturn fmt.Errorf(\"active listener stats does not meet expectation in %v: Expected %v, \"+\n\t\t\t\t\"Last stats: %v\", timeout, expectedListeners, activeListeners)\n\t\t}\n\t\tactiveListeners, err = GetStatsByName(t, ing, \"listener_manager.total_listeners_active\")\n\t\tif err == nil && activeListeners >= expectedListeners {\n\t\t\tt.Logf(\"ingress gateway total number active listeners meets expectation within %v. \"+\n\t\t\t\t\"got %v vs expected %v\", time.Since(start), activeListeners, expectedListeners)\n\t\t\treturn nil\n\t\t}\n\t\tt.Logf(\"total active listener stats does not match (get %d vs expected %d), error: %v\",\n\t\t\tactiveListeners, expectedListeners, err)\n\t\ttime.Sleep(3 * time.Second)\n\t}\n}", "func TestConfChangeCheckBeforeCampaign(t *testing.T) {\n\ttestConfChangeCheckBeforeCampaign(t, false)\n}", "func (se *StateEngine) Ensure() error {\n\tse.mgrLock.Lock()\n\tdefer se.mgrLock.Unlock()\n\tif se.stopped {\n\t\treturn fmt.Errorf(\"state engine already stopped\")\n\t}\n\tvar errs []error\n\tfor _, m := range se.managers {\n\t\terr := m.Ensure()\n\t\tif err != nil {\n\t\t\tlogger.Noticef(\"state ensure error: %v\", err)\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif len(errs) != 0 {\n\t\treturn &ensureError{errs}\n\t}\n\treturn nil\n}", "func GlooctlCheckEventuallyHealthy(offset int, testHelper *helper.SoloTestHelper, timeoutInterval string) {\n\tEventuallyWithOffset(offset, func() error {\n\t\tcontextWithCancel, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\t\topts := &options.Options{\n\t\t\tMetadata: core.Metadata{\n\t\t\t\tNamespace: testHelper.InstallNamespace,\n\t\t\t},\n\t\t\tTop: options.Top{\n\t\t\t\tCtx: contextWithCancel,\n\t\t\t},\n\t\t}\n\t\terr := check.CheckResources(opts)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"glooctl check detected a problem with the installation\")\n\t\t}\n\t\treturn nil\n\t}, timeoutInterval, \"5s\").Should(BeNil())\n}", "func EnsureNoSubscription(ctx context.Context, cli servicebustopics.SubscriptionsClient) reconciler.Event {\n\tif skip.Skip(ctx) {\n\t\treturn nil\n\t}\n\n\tsrc := commonv1alpha1.ReconcilableFromContext(ctx)\n\ttypedSrc := src.(*v1alpha1.AzureServiceBusTopicSource)\n\n\ttopic := typedSrc.Spec.TopicID.String()\n\tsubsName := subscriptionName(src)\n\n\trestCtx, cancel := context.WithTimeout(ctx, crudTimeout)\n\tdefer cancel()\n\n\t_, err := cli.Delete(restCtx, typedSrc.Spec.TopicID.ResourceGroup, typedSrc.Spec.TopicID.Namespace,\n\t\ttypedSrc.Spec.TopicID.ResourceName, subsName)\n\tswitch {\n\tcase isNotFound(err):\n\t\tevent.Warn(ctx, ReasonUnsubscribed, \"Subscription not found, skipping deletion\")\n\t\treturn nil\n\tcase isDenied(err):\n\t\t// it is unlikely that we recover from auth errors in the\n\t\t// finalizer, so we simply record a warning event and return\n\t\tevent.Warn(ctx, ReasonFailedUnsubscribe,\n\t\t\t\"Access denied to Subscription API. Ignoring: %s\", toErrMsg(err))\n\t\treturn nil\n\tcase err != nil:\n\t\treturn failUnsubscribeEvent(topic, err)\n\t}\n\n\tevent.Normal(ctx, ReasonUnsubscribed, \"Deleted Subscription %q for Topic %q\",\n\t\tsubsName, topic)\n\n\treturn nil\n}", "func EnsureCanCreate(storer ServerStorer) CreatingServerStorer {\n\ts, ok := storer.(CreatingServerStorer)\n\tif !ok {\n\t\tpanic(\"could not upgrade ServerStorer to CreatingServerStorer, check your struct\")\n\t}\n\n\treturn s\n}", "func requiresTestStart() {\n\tif !testsStarted {\n\t\tpanic(\"May only be called from within a test case\")\n\t}\n}", "func (con *IRCConn) AddListener(listen IRCListener, check EventChecker) {\n con.listeners[listen] = check;\n}", "func testListener(t *testing.T, handler func(io.ReadWriter)) string {\n\tassert := assert.New(t)\n\trequire := require.New(t)\n\taddress := fmt.Sprintf(\"127.0.0.1:%d\", rand.Int31n(16384)+20000)\n\tl, err := net.Listen(`tcp4`, address)\n\trequire.Nil(err)\n\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\trequire.Nil(err)\n\t\tdefer func() {\n\t\t\tassert.Nil(c.Close())\n\t\t}()\n\n\t\tif handler != nil {\n\t\t\thandler(c)\n\t\t}\n\t}()\n\n\treturn address\n}", "func (l *SharedLoadBalancer) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {\n\tklog.Infof(\"EnsureLoadBalancer: called with service %s/%s, node: %d\",\n\t\tservice.Namespace, service.Name, len(nodes))\n\n\tif err := ensureLoadBalancerValidation(service, nodes); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get exits or create a new ELB instance\n\tloadbalancer, err := l.getLoadBalancerInstance(ctx, clusterName, service)\n\tspecifiedID := getStringFromSvsAnnotation(service, ElbID, \"\")\n\tif common.IsNotFound(err) && specifiedID != \"\" {\n\t\treturn nil, err\n\t}\n\tif err != nil && common.IsNotFound(err) {\n\t\tsubnetID, e := l.getSubnetID(service, nodes[0])\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tloadbalancer, err = l.createLoadbalancer(clusterName, subnetID, service)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// query ELB listeners list\n\tlisteners, err := l.sharedELBClient.ListListeners(&elbmodel.ListListenersRequest{LoadbalancerId: &loadbalancer.Id})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, port := range service.Spec.Ports {\n\t\tlistener := l.filterListenerByPort(listeners, service, port)\n\t\t// add or update listener\n\t\tif listener == nil {\n\t\t\tlistener, err = l.createListener(loadbalancer.Id, service, port)\n\t\t} else {\n\t\t\terr = l.updateListener(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlisteners = popListener(listeners, listener.Id)\n\n\t\t// query pool or create pool\n\t\tpool, err := l.getPool(loadbalancer.Id, listener.Id)\n\t\tif err != nil && common.IsNotFound(err) {\n\t\t\tpool, err = l.createPool(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add new members and remove the obsolete members.\n\t\tif err = l.addOrRemoveMembers(loadbalancer, service, pool, port, nodes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// add or remove health monitor\n\t\tif err = l.ensureHealthCheck(loadbalancer.Id, pool, port, service, nodes[0]); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif specifiedID == \"\" {\n\t\t// All remaining listeners are obsolete, delete them\n\t\terr = l.deleteListeners(loadbalancer.Id, listeners)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tingressIP := loadbalancer.VipAddress\n\tpublicIPAddr, err := l.createOrAssociateEIP(loadbalancer, service)\n\tif err == nil {\n\t\tif publicIPAddr != \"\" {\n\t\t\tingressIP = publicIPAddr\n\t\t}\n\n\t\treturn &corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{{IP: ingressIP}},\n\t\t}, nil\n\t}\n\n\t// rollback\n\tklog.Errorf(\"rollback:failed to create the EIP, delete ELB instance created, error: %s\", err)\n\terrs := []error{err}\n\terr = l.EnsureLoadBalancerDeleted(ctx, clusterName, service)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\tklog.Errorf(\"rollback: error deleting ELB instance: %s\", err)\n\t}\n\treturn nil, errors.NewAggregate(errs)\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func WatchEventSequenceVerifier(ctx context.Context, dc dynamic.Interface, resourceType schema.GroupVersionResource, namespace string, resourceName string, listOptions metav1.ListOptions, expectedWatchEvents []watch.Event, scenario func(*watchtools.RetryWatcher) []watch.Event, retryCleanup func() error) {\n\tlistWatcher := &cache.ListWatch{\n\t\tWatchFunc: func(listOptions metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn dc.Resource(resourceType).Namespace(namespace).Watch(ctx, listOptions)\n\t\t},\n\t}\n\n\tretries := 3\nretriesLoop:\n\tfor try := 1; try <= retries; try++ {\n\t\tinitResource, err := dc.Resource(resourceType).Namespace(namespace).List(ctx, listOptions)\n\t\tExpectNoError(err, \"Failed to fetch initial resource\")\n\n\t\tresourceWatch, err := watchtools.NewRetryWatcher(initResource.GetResourceVersion(), listWatcher)\n\t\tExpectNoError(err, \"Failed to create a resource watch of %v in namespace %v\", resourceType.Resource, namespace)\n\n\t\t// NOTE the test may need access to the events to see what's going on, such as a change in status\n\t\tactualWatchEvents := scenario(resourceWatch)\n\t\terrs := sets.NewString()\n\t\tgomega.Expect(len(expectedWatchEvents)).To(gomega.BeNumerically(\"<=\", len(actualWatchEvents)), \"Did not get enough watch events\")\n\n\t\ttotalValidWatchEvents := 0\n\t\tfoundEventIndexes := map[int]*int{}\n\n\t\tfor watchEventIndex, expectedWatchEvent := range expectedWatchEvents {\n\t\t\tfoundExpectedWatchEvent := false\n\t\tactualWatchEventsLoop:\n\t\t\tfor actualWatchEventIndex, actualWatchEvent := range actualWatchEvents {\n\t\t\t\tif foundEventIndexes[actualWatchEventIndex] != nil {\n\t\t\t\t\tcontinue actualWatchEventsLoop\n\t\t\t\t}\n\t\t\t\tif actualWatchEvent.Type == expectedWatchEvent.Type {\n\t\t\t\t\tfoundExpectedWatchEvent = true\n\t\t\t\t\tfoundEventIndexes[actualWatchEventIndex] = &watchEventIndex\n\t\t\t\t\tbreak actualWatchEventsLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !foundExpectedWatchEvent {\n\t\t\t\terrs.Insert(fmt.Sprintf(\"Watch event %v not found\", expectedWatchEvent.Type))\n\t\t\t}\n\t\t\ttotalValidWatchEvents++\n\t\t}\n\t\terr = retryCleanup()\n\t\tExpectNoError(err, \"Error occurred when cleaning up resources\")\n\t\tif errs.Len() > 0 && try < retries {\n\t\t\tfmt.Println(\"invariants violated:\\n\", strings.Join(errs.List(), \"\\n - \"))\n\t\t\tcontinue retriesLoop\n\t\t}\n\t\tif errs.Len() > 0 {\n\t\t\tFailf(\"Unexpected error(s): %v\", strings.Join(errs.List(), \"\\n - \"))\n\t\t}\n\t\tExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), \"Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)\", totalValidWatchEvents, len(expectedWatchEvents))\n\t\tbreak retriesLoop\n\t}\n}", "func TestNotifyUpgradeCompleted(t *testing.T) {\n\tf := heartbeatFixture{t: t}\n\tdefer f.Cleanup()\n\n\tgateway0 := f.Bootstrap()\n\tgateway1 := f.Grow()\n\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\tgo func() {\n\t\tgateway1.WaitUpgradeNotification()\n\t\twg.Done()\n\t}()\n\n\tstate0 := f.State(gateway0)\n\n\t// Populate state.LocalConfig after nodes created above.\n\tvar err error\n\tvar nodeConfig *node.Config\n\terr = state0.DB.Node.Transaction(context.TODO(), func(ctx context.Context, tx *db.NodeTx) error {\n\t\tnodeConfig, err = node.ConfigLoad(ctx, tx)\n\t\treturn err\n\t})\n\trequire.NoError(t, err)\n\n\tstate0.LocalConfig = nodeConfig\n\n\tserverCert0 := gateway0.ServerCert()\n\terr = cluster.NotifyUpgradeCompleted(state0, serverCert0, serverCert0)\n\trequire.NoError(t, err)\n\n\twg.Wait()\n}", "func GollumListener(gollum event.Gollum) (bool, error) {\n\tlogger.Infof(\"Gollum event listener fired [%v]!\", gollum)\n\treturn true, nil\n}", "func EnsureFinalizerAdded(r *ReferenceAdapter) (util.OperationResult, error) {\n\tif !util.Contains(r.ProjectReference.GetFinalizers(), FinalizerName) {\n\t\tr.ProjectReference.SetFinalizers(append(r.ProjectReference.GetFinalizers(), FinalizerName))\n\t\treturn util.RequeueOnErrorOrStop(r.kubeClient.Update(context.TODO(), r.ProjectReference))\n\t}\n\treturn util.ContinueProcessing()\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar buf syncedBuffer\n\tmem := vfs.NewMem()\n\terr := mem.MkdirAll(\"ext\", 0755)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tbuf.Reset()\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", &Options{\n\t\t\t\tFS: loggingFS{mem, &buf},\n\t\t\t\tEventListener: MakeLoggingEventListener(&buf),\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"flush\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"compact\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\")); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"ingest\":\n\t\t\tbuf.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(f, nil, LevelOptions{})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := mem.Remove(\"ext/0\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"metrics\":\n\t\t\treturn d.Metrics().String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (b *BaseImpl) ShouldCheckBound() bool {\n\treturn true\n}", "func OrgBlockListener(orgBlock event.OrgBlock) (bool, error) {\n\tlogger.Infof(\"OrgBlock event listener fired [%v]!\", orgBlock)\n\treturn true, nil\n}", "func (c *Client) EnsureGoogleCloudClusterRoleBinding(log log.Logger) error {\n\treturn nil\n}", "func (emitter *EventEmitter) PrependOnceListener(eventName string, listener func(...interface{})) *EventEmitter {\n\treturn emitter\n}", "func (l *Listener) Ready() bool {\n\treturn l.ready\n}", "func (b *testBroker) Listen(lsn func(event int, ctx interface{})) {\n\tb.lsn = lsn\n}", "func InstallationListener(installation event.Installation) (bool, error) {\n\tlogger.Infof(\"Installation event listener fired [%v]!\", installation)\n\treturn true, nil\n}", "func TeamListener(team event.Team) (bool, error) {\n\tlogger.Infof(\"Team event listener fired [%v]!\", team)\n\treturn true, nil\n}", "func (s) TestListenerResourceDeletionOnServerIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, true, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tmodeChangeCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the server to update to ServingModeServing mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a server to change to ServingModeServing.\")\n\tcase mode := <-modeChangeCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Server switched to mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Update without a listener resource.\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Perform RPCs every 100 ms for 1s and verify that the serving mode does not\n\t// change on gRPC server.\n\ttimer := time.NewTimer(500 * time.Millisecond)\n\tticker := time.NewTicker(50 * time.Millisecond)\n\tt.Cleanup(ticker.Stop)\n\tfor {\n\t\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\treturn\n\t\tcase mode := <-modeChangeCh:\n\t\t\tt.Fatalf(\"Server switched to mode: %v when no switch was expected\", mode)\n\t\tcase <-ticker.C:\n\t\t}\n\t}\n}", "func AssertNoLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\treturn\n\t}\n\n\tassert.Equal(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func checkReceiver(name string, t *testing.T, receiver *receiver, expected bool) {\n\ttriggered := receiver.reset()\n\tif expected && !triggered {\n\t\tt.Fatalf(\"%s: function should have been called\", name)\n\t} else if !expected && triggered {\n\t\tt.Fatalf(\"%s: function should not have been called\", name)\n\t}\n}", "func bindListener(g *G.Gilmour) {\n\tg.ReplyTo(\"test.handler.one\", fetchReply(g), nil)\n}", "func getListenerPredicate() predicate.Predicate {\n\treturn predicate.Funcs{\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\tnewListener, okNew := e.ObjectNew.(*networkextensionv1.Listener)\n\t\t\toldListener, okOld := e.ObjectOld.(*networkextensionv1.Listener)\n\t\t\tif !okNew || !okOld {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif newListener.DeletionTimestamp != nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif reflect.DeepEqual(newListener.Spec, oldListener.Spec) {\n\t\t\t\tblog.V(5).Infof(\"listener %+v updated, but spec not change\", oldListener)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t}\n}" ]
[ "0.6573767", "0.62322366", "0.5828939", "0.5737886", "0.563924", "0.539708", "0.5286952", "0.52551275", "0.5237625", "0.51760316", "0.517539", "0.5157494", "0.51405114", "0.51253146", "0.510969", "0.510237", "0.5100668", "0.5095541", "0.5026376", "0.50112855", "0.4984619", "0.49832135", "0.49549678", "0.49311048", "0.49310184", "0.49260646", "0.4921309", "0.4903965", "0.4890039", "0.48889062", "0.48842672", "0.48619848", "0.4831095", "0.48025218", "0.4794468", "0.47942358", "0.47890055", "0.47668672", "0.4722248", "0.47186878", "0.471736", "0.47046602", "0.47020438", "0.46971032", "0.4692539", "0.46767366", "0.46753046", "0.4662509", "0.46605062", "0.465236", "0.4649862", "0.4645947", "0.46417114", "0.46415356", "0.46315172", "0.46158555", "0.46155816", "0.46128443", "0.45999408", "0.45790952", "0.45778793", "0.45733583", "0.45643294", "0.45594954", "0.45495456", "0.45375535", "0.4534445", "0.45310318", "0.45086098", "0.45075607", "0.4505854", "0.45052713", "0.45036805", "0.44993", "0.44886357", "0.44882205", "0.44816926", "0.44812298", "0.44767463", "0.44762143", "0.4462243", "0.44561052", "0.4452814", "0.4447964", "0.44408903", "0.44371247", "0.44330317", "0.44300318", "0.44173956", "0.44147745", "0.44090047", "0.44059986", "0.43953952", "0.4392837", "0.43921605", "0.43884125", "0.43882388", "0.4385957", "0.43844938", "0.43809718" ]
0.7226577
0
DeleteListener mocks base method
func (m *MockLoadBalance) DeleteListener(region string, listener *v1.Listener) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteListener", region, listener) ret0, _ := ret[0].(error) return ret0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockListener) Delete(listenerKey api.ListenerKey, checksum api.Checksum) error {\n\tret := m.ctrl.Call(m, \"Delete\", listenerKey, checksum)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockLoadBalance) DeleteSegmentListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSegmentListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockHooks) OnDelete(arg0 proto.Message) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnDelete\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Test_DeviceService_Remove_Success(t *testing.T) {\n\th := TestHelper{}\n\trep := new(mocks.IDeviceRepository)\n\trepAuth := new(mocks.IDeviceAuthRepository)\n\ts := h.CreateTestDeviceService(rep, repAuth)\n\n\tip := \"127.0.0.1\"\n\trep.On(\"Remove\", ip).Return(nil)\n\n\terr := s.Remove(ip)\n\tassert.NoError(t, err)\n}", "func (m *ManagerMock) Delete(ctx context.Context, s *hub.Subscription) error {\n\targs := m.Called(ctx, s)\n\treturn args.Error(0)\n}", "func (m *MockWatcher) Delete() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\")\n}", "func (m *MockStream) RemoveEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RemoveEventListener\", streamEventListener)\n}", "func TestDeleteHandler(t *testing.T) {\n\n // ...\n\n}", "func (m *MockProvider) OnServiceDelete(arg0 *v1.Service) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceDelete\", arg0)\n}", "func (m *MockDBStorage) DeleteCallback(arg0, arg1 string) (sql.Result, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteCallback\", arg0, arg1)\n\tret0, _ := ret[0].(sql.Result)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (l *Listener) delete(rOpts *ReconcileOptions) error {\n\tin := elbv2.DeleteListenerInput{\n\t\tListenerArn: l.CurrentListener.ListenerArn,\n\t}\n\n\tif err := awsutil.ALBsvc.RemoveListener(in); err != nil {\n\t\trOpts.Eventf(api.EventTypeWarning, \"ERROR\", \"Error deleting %v listener: %s\", *l.CurrentListener.Port, err.Error())\n\t\tl.logger.Errorf(\"Failed Listener deletion. ARN: %s: %s\",\n\t\t\t*l.CurrentListener.ListenerArn, err.Error())\n\t\treturn err\n\t}\n\n\tl.deleted = true\n\treturn nil\n}", "func (m *MockCallback) OnRemoveAll() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemoveAll\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockProvider) OnEndpointsDelete(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsDelete\", arg0)\n}", "func (m *MockCallback) OnRemove(arg0 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar memLog base.InMemLogger\n\tmem := vfs.NewMem()\n\trequire.NoError(t, mem.MkdirAll(\"ext\", 0755))\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(t *testing.T, td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tmemLog.Reset()\n\t\t\tlel := MakeLoggingEventListener(&memLog)\n\t\t\tflushBegin, flushEnd := lel.FlushBegin, lel.FlushEnd\n\t\t\tlel.FlushBegin = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushBegin(info)\n\t\t\t}\n\t\t\tlel.FlushEnd = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushEnd(info)\n\t\t\t}\n\t\t\topts := &Options{\n\t\t\t\tFS: vfs.WithLogging(mem, memLog.Infof),\n\t\t\t\tFormatMajorVersion: internalFormatNewest,\n\t\t\t\tEventListener: &lel,\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tL0CompactionThreshold: 10,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t}\n\t\t\t// The table stats collector runs asynchronously and its\n\t\t\t// timing is less predictable. It increments nextJobID, which\n\t\t\t// can make these tests flaky. The TableStatsLoaded event is\n\t\t\t// tested separately in TestTableStats.\n\t\t\topts.private.disableTableStats = true\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tt := time.Now()\n\t\t\td.timeNow = func() time.Time {\n\t\t\t\tt = t.Add(time.Second)\n\t\t\t\treturn t\n\t\t\t}\n\t\t\td.testingAlwaysWaitForCleanup = true\n\t\t\treturn memLog.String()\n\n\t\tcase \"close\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"flush\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"compact\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\"), false); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"checkpoint\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Checkpoint(\"checkpoint\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"disable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\td.mu.Lock()\n\t\t\td.disableFileDeletions()\n\t\t\td.mu.Unlock()\n\t\t\treturn memLog.String()\n\n\t\tcase \"enable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tmemLog.Infof(\"%v\", r)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\td.mu.Lock()\n\t\t\t\tdefer d.mu.Unlock()\n\t\t\t\td.enableFileDeletions()\n\t\t\t}()\n\t\t\td.TestOnlyWaitForCleaning()\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest\":\n\t\t\tmemLog.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest-flushable\":\n\t\t\tmemLog.Reset()\n\n\t\t\t// Prevent flushes during this test to ensure determinism.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = true\n\t\t\td.mu.Unlock()\n\n\t\t\tb := d.NewBatch()\n\t\t\tif err := b.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Apply(b, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\twriteTable := func(name string, key byte) error {\n\t\t\t\tf, err := mem.Create(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t\t})\n\t\t\t\tif err := w.Add(base.MakeInternalKey([]byte{key}, 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttableA, tableB := \"ext/a\", \"ext/b\"\n\t\t\tif err := writeTable(tableA, 'a'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := writeTable(tableB, 'b'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{tableA, tableB}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\n\t\t\t// Re-enable flushes, to allow the subsequent flush to proceed.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = false\n\t\t\td.mu.Unlock()\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"metrics\":\n\t\t\t// The asynchronous loading of table stats can change metrics, so\n\t\t\t// wait for all the tables' stats to be loaded.\n\t\t\td.mu.Lock()\n\t\t\td.waitTableStats()\n\t\t\td.mu.Unlock()\n\n\t\t\treturn d.Metrics().String()\n\n\t\tcase \"sstables\":\n\t\t\tvar buf bytes.Buffer\n\t\t\ttableInfos, _ := d.SSTables()\n\t\t\tfor i, level := range tableInfos {\n\t\t\t\tif len(level) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(&buf, \"%d:\\n\", i)\n\t\t\t\tfor _, m := range level {\n\t\t\t\t\tfmt.Fprintf(&buf, \" %d:[%s-%s]\\n\",\n\t\t\t\t\t\tm.FileNum, m.Smallest.UserKey, m.Largest.UserKey)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (_m *MockBookingStorage) Delete() {\n\t_m.Called()\n}", "func (et *emulatorTest) delete() {\n\tet.host.Path = strings.Join([]string{\"retry_test\", et.id}, \"/\")\n\tc := http.DefaultClient\n\treq, err := http.NewRequest(\"DELETE\", et.host.String(), nil)\n\tif err != nil {\n\t\tet.Errorf(\"creating request: %v\", err)\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil || resp.StatusCode != 200 {\n\t\tet.Errorf(\"deleting test: err: %v, resp: %+v\", err, resp)\n\t}\n}", "func (m *MockOperation) RemoveHandler(arg0 *client.EventTarget) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveHandler\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func MockListener(t *testing.T, address string) {\n\tladdr, err := net.ResolveUDPAddr(\"udp\", address)\n\tif err != nil {\n\t\tt.Fatal(\"Couldn't resolve address\", err)\n\t}\n\n\t_, err = net.ListenUDP(\"udp\", laddr)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't listen to %s: %s\", address, err)\n\t}\n\n\ttime.Sleep(10 * time.Second)\n\treturn\n}", "func Delete(c *golangsdk.ServiceClient, id string) (r DeleteResult) {\n\turl := resourceURL(c, id)\n\t//fmt.Printf(\"Delete listener url: %s.\\n\", url)\n\t_, r.Err = c.Delete(url, &golangsdk.RequestOpts{\n\t\tOkCodes: []int{204},\n\t})\n\treturn\n}", "func (m *MockAll) Listener() Listener {\n\tret := m.ctrl.Call(m, \"Listener\")\n\tret0, _ := ret[0].(Listener)\n\treturn ret0\n}", "func (s) TestListenerResourceDeletionOnServerNotIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tupdateCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the listener to move to \"serving\" mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a mode change update.\")\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Listener received new mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{}, // empty listener resource\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"timed out waiting for a mode change update: %v\", err)\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeNotServing {\n\t\t\tt.Fatalf(\"listener received new mode %v, want %v\", mode, connectivity.ServingModeNotServing)\n\t\t}\n\t}\n}", "func (m *MockDeletableStorage) Del(ctx context.Context, keys ...interface{}) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range keys {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Del\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (d dummyFuncs) RemoveListener(ch *chan ModifiedFile) {\n\tlog.Warning(\"dummy RemoveListener() is called. It does nothing\")\n\treturn\n}", "func DeleteListener(t *testing.T, client *gophercloud.ServiceClient, lbID, listenerID string) {\n\tt.Logf(\"Attempting to delete listener %s\", listenerID)\n\n\tif err := listeners.Delete(client, listenerID).ExtractErr(); err != nil {\n\t\tif _, ok := err.(gophercloud.ErrDefault404); !ok {\n\t\t\tt.Fatalf(\"Unable to delete listener: %v\", err)\n\t\t}\n\t}\n\n\tif err := WaitForLoadBalancerState(client, lbID, \"ACTIVE\"); err != nil {\n\t\tt.Fatalf(\"Timed out waiting for loadbalancer to become active: %s\", err)\n\t}\n\n\tt.Logf(\"Successfully deleted listener %s\", listenerID)\n}", "func (l *MockListener) Close() error {\n\treturn nil\n}", "func (m *MockStreamEventListener) OnDestroyStream() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnDestroyStream\")\n}", "func TestChannelDelete(t *testing.T) {\n\t// delete this channel\n\twrt := channels[0]\n\n\t// create the mock repo and controller.\n\t// the deletion itself has no bearing on the test\n\t// so just use the findID function which has the the same signature\n\t// and performs the operation we need\n\trepo := &mock.ChannelRepo{DeleteIDFunc: findChannelID}\n\tcontroller := NewChannel(repo)\n\n\t// create a mock request\n\tp := httprouter.Param{Key: \"id\", Value: \"1\"}\n\treq, e := http.NewRequest(http.MethodDelete, \"/channel/\"+p.Value, nil)\n\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\n\t// embed params necessary for controller function\n\tuf.EmbedParams(req, p)\n\n\t// create a response recorder and call the delete method\n\tw := httptest.NewRecorder()\n\te = controller.Delete(w, req)\n\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\n\tres := w.Result()\n\n\t// check if the repo was hit\n\tif !repo.DeleteIDCalled {\n\t\tt.Error(\"Did not call repo.DeleteID\")\n\t}\n\n\t// ensure the content type is application/json\n\tcheckCT(res, t)\n\n\t// extract the body and check the correct channel was returned\n\tdefer res.Body.Close()\n\tbody, e := io.ReadAll(res.Body)\n\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\n\treceived := &are_hub.Channel{}\n\te = json.Unmarshal(body, received)\n\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\n\tif received.Name != wrt.Name {\n\t\tt.Fatalf(\"Expected: %v. Actual: %v.\", wrt, received)\n\t}\n\n\t// check delete returns 404 for an invalid ID\n\tp = httprouter.Param{Key: \"id\", Value: \"-1\"}\n\ttest404(t, http.MethodDelete, \"/channel/\"+p.Value, nil, controller.Delete, p)\n}", "func (m *MockRouterTx) DELETE(path string, handler interface{}, options ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{path, handler}\n\tfor _, a := range options {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"DELETE\", varargs...)\n}", "func (m *MockProc) OnSvcHostRemove(arg0 []*host.Host) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnSvcHostRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (s) TestIgnoreResourceDeletionOnClient(t *testing.T) {\n\tserver1 := stubserver.StartTestService(t, nil)\n\tt.Cleanup(server1.Stop)\n\n\tserver2 := stubserver.StartTestService(t, nil)\n\tt.Cleanup(server2.Stop)\n\n\tinitialResourceOnServer := func(nodeID string) e2e.UpdateOptions {\n\t\treturn e2e.UpdateOptions{\n\t\t\tNodeID: nodeID,\n\t\t\tListeners: []*listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)},\n\t\t\tRoutes: []*routepb.RouteConfiguration{defaultRouteConfigWithTwoRoutes},\n\t\t\tClusters: []*clusterpb.Cluster{\n\t\t\t\te2e.DefaultCluster(cdsName1, edsName1, e2e.SecurityLevelNone),\n\t\t\t\te2e.DefaultCluster(cdsName2, edsName2, e2e.SecurityLevelNone),\n\t\t\t},\n\t\t\tEndpoints: []*endpointpb.ClusterLoadAssignment{\n\t\t\t\te2e.DefaultEndpoint(edsName1, \"localhost\", []uint32{testutils.ParsePort(t, server1.Address)}),\n\t\t\t\te2e.DefaultEndpoint(edsName2, \"localhost\", []uint32{testutils.ParsePort(t, server2.Address)}),\n\t\t\t},\n\t\t\tSkipValidation: true,\n\t\t}\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tupdateResource func(r *e2e.UpdateOptions)\n\t}{\n\t\t{\n\t\t\tname: \"listener\",\n\t\t\tupdateResource: func(r *e2e.UpdateOptions) {\n\t\t\t\tr.Listeners = nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"cluster\",\n\t\t\tupdateResource: func(r *e2e.UpdateOptions) {\n\t\t\t\tr.Clusters = nil\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s resource deletion ignored\", test.name), func(t *testing.T) {\n\t\t\ttestResourceDeletionIgnored(t, initialResourceOnServer, test.updateResource)\n\t\t})\n\t\tt.Run(fmt.Sprintf(\"%s resource deletion not ignored\", test.name), func(t *testing.T) {\n\t\t\ttestResourceDeletionNotIgnored(t, initialResourceOnServer, test.updateResource)\n\t\t})\n\t}\n}", "func (m *MockAuthCheckerClient) Delete(arg0 context.Context, arg1 *auth.SessionToken, arg2 ...grpc.CallOption) (*auth.Nothing, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Delete\", varargs...)\n\tret0, _ := ret[0].(*auth.Nothing)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestNodeDeleted(t *testing.T) {\n\tpod0 := &v1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"pod0\",\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tNodeName: \"node0\",\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tConditions: []v1.PodCondition{\n\t\t\t\t{\n\t\t\t\t\tType: v1.PodReady,\n\t\t\t\t\tStatus: v1.ConditionTrue,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tpod1 := &v1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"pod1\",\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tNodeName: \"node0\",\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tConditions: []v1.PodCondition{\n\t\t\t\t{\n\t\t\t\t\tType: v1.PodReady,\n\t\t\t\t\tStatus: v1.ConditionTrue,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfnh := &testutil.FakeNodeHandler{\n\t\tExisting: []*v1.Node{\n\t\t\t{\n\t\t\t\tObjectMeta: v1.ObjectMeta{\n\t\t\t\t\tName: \"node0\",\n\t\t\t\t\tCreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\tStatus: v1.NodeStatus{\n\t\t\t\t\tConditions: []v1.NodeCondition{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tType: v1.NodeReady,\n\t\t\t\t\t\t\tStatus: v1.ConditionUnknown,\n\t\t\t\t\t\t\tLastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),\n\t\t\t\t\t\t\tLastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tClientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*pod0, *pod1}}),\n\t\tDeleteWaitChan: make(chan struct{}),\n\t}\n\n\tfactory := informers.NewSharedInformerFactory(fnh, nil, controller.NoResyncPeriodFunc())\n\n\teventBroadcaster := record.NewBroadcaster()\n\tcloudNodeController := &CloudNodeController{\n\t\tkubeClient: fnh,\n\t\tnodeInformer: factory.Nodes(),\n\t\tcloud: &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound},\n\t\tnodeMonitorPeriod: 5 * time.Second,\n\t\trecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: \"controllermanager\"}),\n\t}\n\teventBroadcaster.StartLogging(glog.Infof)\n\n\tcloudNodeController.Run()\n\n\tselect {\n\tcase <-fnh.DeleteWaitChan:\n\tcase <-time.After(wait.ForeverTestTimeout):\n\t\tt.Errorf(\"Timed out waiting %v for node to be deleted\", wait.ForeverTestTimeout)\n\t}\n\tif len(fnh.DeletedNodes) != 1 || fnh.DeletedNodes[0].Name != \"node0\" {\n\t\tt.Errorf(\"Node was not deleted\")\n\t}\n}", "func (m *MockListener) Modify(listener api.Listener) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Modify\", listener)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *NuclioFunctionEventInterface) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\tret := _m.Called(ctx, name, opts)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, v1.DeleteOptions) error); ok {\n\t\tr0 = rf(ctx, name, opts)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockInternalServer) ProxyrCbDelete(arg0 context.Context, arg1 *ProxyrCbDeleteRequestMsg) (*ProxyrCbDeleteResponseMsg, error) {\n\tret := m.ctrl.Call(m, \"ProxyrCbDelete\", arg0, arg1)\n\tret0, _ := ret[0].(*ProxyrCbDeleteResponseMsg)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Index) Delete(_a0 index.Entry) (storage.Event, error) {\n\tret := _m.Called(_a0)\n\n\tvar r0 storage.Event\n\tif rf, ok := ret.Get(0).(func(index.Entry) storage.Event); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Get(0).(storage.Event)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(index.Entry) error); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func assertDeleted(t *testing.T, cl client.Client, thing client.Object) {\n\tt.Helper()\n\tif err := cl.Delete(context.TODO(), thing); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"Failed to delete %s: %v\", thing.GetName(), err)\n\t} else {\n\t\tt.Logf(\"Deleted %s\", thing.GetName())\n\t}\n}", "func (m *MockInternalClient) ProxyrCbDelete(ctx context.Context, in *ProxyrCbDeleteRequestMsg, opts ...grpc.CallOption) (*ProxyrCbDeleteResponseMsg, error) {\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"ProxyrCbDelete\", varargs...)\n\tret0, _ := ret[0].(*ProxyrCbDeleteResponseMsg)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *ServerSuite) TestSrvRTMOnDelete(c *C) {\n\te1 := testutils.NewResponder(\"Hi, I'm endpoint 1\")\n\tdefer e1.Close()\n\n\tb := MakeBatch(Batch{Addr: \"localhost:11300\", Route: `Path(\"/\")`, URL: e1.URL})\n\tc.Assert(s.mux.Init(b.Snapshot()), IsNil)\n\tc.Assert(s.mux.Start(), IsNil)\n\tdefer s.mux.Stop(true)\n\n\t// When: an existing backend server is removed and added again.\n\tfor i := 0; i < 3; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\tc.Assert(s.mux.DeleteServer(b.SK), IsNil)\n\tc.Assert(s.mux.UpsertServer(b.BK, b.S), IsNil)\n\tfor i := 0; i < 4; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\n\t// Then: total count includes only metrics after the server was re-added.\n\trts, err := s.mux.ServerStats(b.SK)\n\tc.Assert(err, IsNil)\n\tc.Assert(rts.Counters.Total, Equals, int64(4))\n}", "func (f *FileStorage) ListenOnRemove(err error) *FileStorage {\n\tf.On(\"Remove\", mock.AnythingOfType(\"string\")).Return(err)\n\treturn f\n}", "func (m *MockListener) Get(listenerKey api.ListenerKey) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Get\", listenerKey)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockHTTP) Delete(w http.ResponseWriter, r *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\", w, r)\n}", "func TestDelete(t *testing.T) {\n\tRunWithInstance(func(instance *Instance) {\n\t\tInsertFixtures(instance, []EntryFixture{\n\t\t\t{Name: \"int\", Value: \"2891\", ValueType: 1},\n\t\t\t{Name: \"string\", Value: \"hello world!\", ValueType: 3},\n\t\t})\n\n\t\tif err := instance.Delete(\"int\"); err != nil {\n\t\t\tt.Error(\"Instance.Delete: got error:\\n\", err)\n\t\t}\n\n\t\tif err := panicked(func() { instance.MustDelete(\"string\") }); err != nil {\n\t\t\tt.Error(\"Instance.MustDelete: got panic:\\n\", err)\n\t\t}\n\n\t\tif err := instance.Delete(\"foo\"); err == nil {\n\t\t\tt.Error(\"Instance.Delete: expected error with non-existent entry\")\n\t\t} else if _, ok := err.(*ErrNoEntry); !ok {\n\t\t\tt.Error(\"Instance.Delete: expected error of type *ErrNoEntry\")\n\t\t}\n\n\t\tif err := panicked(func() { instance.MustDelete(\"foo\") }); err == nil {\n\t\t\tt.Error(\"Instance.MustDelete: expected panic with non-existent entry\")\n\t\t}\n\t})\n}", "func (m *MockSessionRunner) Remove(arg0 protocol.ConnectionID) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Remove\", arg0)\n}", "func TestRemoveByIDCorrectData(t *testing.T) {\n\tcMock := getMock()\n\tcMock.On(\"DeleteAnswerByID\", answerToRemoveID).Return(nil)\n\n\tbody := []byte(\"{\\\"id\\\": 1}\")\n\terr := RemoveDELETE(body)\n\tassert.Nil(t, err)\n}", "func newMockListener(endpoint net.Conn) *mockListener {\n \n c := make(chan net.Conn, 1)\n c <- endpoint\n listener := &mockListener{\n connChannel: c,\n serverEndpoint: endpoint,\n }\n return listener\n}", "func (m *MockRouter) DELETE(path string, handler interface{}, options ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{path, handler}\n\tfor _, a := range options {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"DELETE\", varargs...)\n}", "func (h *Hookbot) Del(l Listener) {\n\tclose(l.dead)\n\th.delListener <- l\n}", "func TestDestroyHandler(t *testing.T) {\n\tid := \"c79c54de-39ae-46b0-90e5-9f84c77f6974\"\n\tparams := httprouter.Params{\n\t\thttprouter.Param{Key: \"id\", Value: id},\n\t}\n\ts := Subscription{\n\t\tEventType: \"test_type\",\n\t\tContext: \"test_context\",\n\t}\n\n\th := Handler{\n\t\tdb: MockDatabase{\n\t\t\ts: s,\n\t\t\tgetID: id,\n\t\t},\n\t}\n\n\treq, w := newReqParams(\"GET\")\n\n\th.Destroy(w, req, params)\n\n\tcases := []struct {\n\t\tlabel, actual, expected interface{}\n\t}{\n\t\t{\"Response code\", w.Code, 200},\n\t\t{\"Response body contains context\", strings.Contains(w.Body.String(), s.Context), true},\n\t\t{\"Response body contains event type\", strings.Contains(w.Body.String(), s.EventType), true},\n\t}\n\th.Index(w, req, httprouter.Params{})\n\n\ttestCases(t, cases)\n\tcases = []struct {\n\t\tlabel, actual, expected interface{}\n\t}{\n\t\t{\"Response body doesn't contain the id\", strings.Contains(w.Body.String(), id), false},\n\t}\n\ttestCases(t, cases)\n}", "func (s) TestSuccessCaseDeletedRoute(t *testing.T) {\n\trh, fakeClient, ch := setupTests()\n\n\trh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true})\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\t// Will start two watches.\n\tif err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2); err != nil {\n\t\tt.Fatalf(\"Error while waiting for names: %v\", err)\n\t}\n\n\t// Update the RDSHandler with route names which deletes a route name to\n\t// watch. This should trigger the RDSHandler to cancel the watch for the\n\t// deleted route name to watch.\n\trh.updateRouteNamesToWatch(map[string]bool{route1: true})\n\t// This should delete the watch for route2.\n\trouteNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.CancelRDS failed with error %v\", err)\n\t}\n\tif routeNameDeleted != route2 {\n\t\tt.Fatalf(\"xdsClient.CancelRDS called for route %v, want %v\", routeNameDeleted, route2)\n\t}\n\n\trdsUpdate := xdsresource.RouteConfigUpdate{}\n\t// Invoke callback with the xds client with a certain route update. Due to\n\t// this route update updating every route name that rds handler handles,\n\t// this should write to the update channel to send to the listener.\n\tfakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil)\n\trhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate}\n\tselect {\n\tcase rhu := <-ch:\n\t\tif diff := cmp.Diff(rhu.updates, rhuWant); diff != \"\" {\n\t\t\tt.Fatalf(\"got unexpected route update, diff (-got, +want): %v\", diff)\n\t\t}\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timed out waiting for update from update channel.\")\n\t}\n\n\trh.close()\n\trouteNameDeleted, err = fakeClient.WaitForCancelRouteConfigWatch(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.CancelRDS failed with error: %v\", err)\n\t}\n\tif routeNameDeleted != route1 {\n\t\tt.Fatalf(\"xdsClient.CancelRDS called for route %v, want %v\", routeNameDeleted, route1)\n\t}\n}", "func TestPodDeletionEvent(t *testing.T) {\n\tf := func(path cmp.Path) bool {\n\t\tswitch path.String() {\n\t\t// These fields change at runtime, so ignore it\n\t\tcase \"LastTimestamp\", \"FirstTimestamp\", \"ObjectMeta.Name\":\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tt.Run(\"emitPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Marking for deletion Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n\n\tt.Run(\"emitCancelPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitCancelPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Cancelling deletion of Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n}", "func (m *MockIPAMDriver) Del(arg0 *invoke.Args, arg1 *types.K8sArgs, arg2 []byte) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Del\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s) TestListenerResourceDeletionOnServerIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, true, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tmodeChangeCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the server to update to ServingModeServing mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a server to change to ServingModeServing.\")\n\tcase mode := <-modeChangeCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Server switched to mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Update without a listener resource.\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Perform RPCs every 100 ms for 1s and verify that the serving mode does not\n\t// change on gRPC server.\n\ttimer := time.NewTimer(500 * time.Millisecond)\n\tticker := time.NewTicker(50 * time.Millisecond)\n\tt.Cleanup(ticker.Stop)\n\tfor {\n\t\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\treturn\n\t\tcase mode := <-modeChangeCh:\n\t\t\tt.Fatalf(\"Server switched to mode: %v when no switch was expected\", mode)\n\t\tcase <-ticker.C:\n\t\t}\n\t}\n}", "func (m *MockEventBus) RemoveHandler(arg0 members.Handler) {\n\tm.ctrl.Call(m, \"RemoveHandler\", arg0)\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar buf syncedBuffer\n\tmem := vfs.NewMem()\n\terr := mem.MkdirAll(\"ext\", 0755)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tbuf.Reset()\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", &Options{\n\t\t\t\tFS: loggingFS{mem, &buf},\n\t\t\t\tEventListener: MakeLoggingEventListener(&buf),\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"flush\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"compact\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\")); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"ingest\":\n\t\t\tbuf.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(f, nil, LevelOptions{})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := mem.Remove(\"ext/0\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"metrics\":\n\t\t\treturn d.Metrics().String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (m *MockReminds) HandleDeleteRemindCommand(arg0 *discordgo.Session, arg1 *discordgo.MessageCreate, arg2 []string, arg3 context.Context) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"HandleDeleteRemindCommand\", arg0, arg1, arg2, arg3)\n}", "func (l *Listeners) Delete(id xid.ID) {\n\n\tl.Lock()\n\tdelete(l.listeners, id)\n\tl.Unlock()\n}", "func TestUpdateSubscriptionDeleteFilter(t *testing.T) {\n\trepository, mock := initTest(t)\n\n\t// subscription to update\n\tsubscription := models.Subscription{\n\t\tID: \"42\",\n\t\tCallbackURL: \"url\",\n\t\tCallbackType: models.HTTP,\n\t\tFilters: map[models.EventType]models.Filter{\n\t\t\tmodels.DirectoryBlockCommit: {Filtering: fmt.Sprintf(\"no change filtering\")},\n\t\t},\n\t}\n\tsubscriptionContext := &models.SubscriptionContext{\n\t\tSubscription: subscription,\n\t\tFailures: 0,\n\t}\n\n\tcolumns := []string{\"failures\", \"callback\", \"callback_type\", \"status\", \"info\", \"access_token\", \"username\", \"password\", \"event_type\", \"filtering\"}\n\tmock.ExpectQuery(`SELECT failures, callback, callback_type, status, info, access_token, username, password, event_type, filtering FROM subscriptions LEFT JOIN filters ON filters.subscription = subscriptions.id WHERE subscriptions.id = \\?`).\n\t\tWithArgs(subscription.ID).\n\t\tWillReturnRows(sqlmock.NewRows(columns).\n\t\t\tAddRow(subscriptionContext.Failures, \"url-change\", subscription.CallbackType, subscription.SubscriptionStatus, subscription.SubscriptionInfo, subscription.Credentials.AccessToken, subscription.Credentials.BasicAuthUsername, subscription.Credentials.BasicAuthPassword, models.DirectoryBlockCommit, \"no change filtering\").\n\t\t\tAddRow(subscriptionContext.Failures, \"url-change\", subscription.CallbackType, subscription.SubscriptionStatus, subscription.SubscriptionInfo, subscription.Credentials.AccessToken, subscription.Credentials.BasicAuthUsername, subscription.Credentials.BasicAuthPassword, models.ChainCommit, \"this will be deleted\"))\n\n\tmock.ExpectBegin()\n\tmock.ExpectExec(`UPDATE subscriptions`).WithArgs(subscriptionContext.Failures, subscription.CallbackURL, subscription.CallbackType, subscription.SubscriptionStatus, subscription.SubscriptionInfo, subscription.Credentials.AccessToken, subscription.Credentials.BasicAuthUsername, subscription.Credentials.BasicAuthPassword, subscription.ID).WillReturnResult(sqlmock.NewResult(42, 1))\n\tmock.ExpectExec(`DELETE FROM filters`).WithArgs(subscription.ID, models.ChainCommit).WillReturnResult(sqlmock.NewResult(42, 1))\n\tmock.ExpectCommit()\n\n\t// now we execute our method\n\tupdatedSubscriptionContext, err := repository.UpdateSubscription(subscriptionContext)\n\tif err != nil {\n\t\tt.Errorf(\"error was not expected creating subscription: %s\", err)\n\t}\n\n\tassertSubscription(t, subscriptionContext, updatedSubscriptionContext)\n\n\t// we make sure that all expectations were met\n\tif err := mock.ExpectationsWereMet(); err != nil {\n\t\tt.Errorf(\"there were unfulfilled expectations: %s\", err)\n\t}\n}", "func (m *Client) Delete(arg0 context.Context, arg1 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Delete\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestSubscriptionUnsubscribe(t *testing.T) {\n\tmockTransport := new(mockFScopeTransport)\n\tmockTransport.On(\"Unsubscribe\").Return(nil)\n\tsub := NewFSubscription(\"foo\", mockTransport)\n\tassert.Nil(t, sub.Unsubscribe())\n\tmockTransport.AssertExpectations(t)\n}", "func TestDeleteVolume(t *testing.T) {\n\tts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, \"{\\\"name\\\":\\\"test\\\",\\\"custom_deploy_script\\\":null,\\\"current_release_id\\\":\\\"20160509194833\\\",\\\"target_release_id\\\":null,\\\"created\\\":\\\"Mon, 09 May 2016 19:48:33 UTC\\\",\\\"updated\\\":\\\"Mon, 09 May 2016 19:48:48 UTC\\\",\\\"tags\\\":{},\\\"addresses\\\":{\\\"external\\\":[{\\\"port\\\":\\\"test\\\",\\\"address\\\":\\\"tcp://test:80\\\"}],\\\"internal\\\":null}}\")\n\t}))\n\ttestURL := strings.Replace(ts.URL, \"https://\", \"\", -1)\n\tConvey(\"When deleting a Supergiant Volume.\", t, func() {\n\t\t//setup steps\n\t\tsg, err := NewClient(testURL, \"test\", \"test\")\n\t\tSo(err, ShouldBeNil)\n\t\trelease, err := sg.GetRelease(\"test\", \"test\")\n\t\tSo(err, ShouldBeNil)\n\n\t\tConvey(\"We would expect the volume delete to error if it dos not have any volumes.\", func() {\n\t\t\tdelerr := DeleteVolume(release, \"cheese\")\n\t\t\tSo(delerr, ShouldNotBeNil)\n\t\t\tSo(delerr.Error(), ShouldEqual, \"This Component has not volumes.\")\n\t\t})\n\n\t\t// One Volume\n\t\trelease.Volumes = append(release.Volumes, &common.VolumeBlueprint{\n\t\t\tName: common.IDString(\"test\"),\n\t\t\tType: \"test\",\n\t\t\tSize: 30,\n\t\t})\n\n\t\tConvey(\"We would expect the volume to delete without error.\", func() {\n\t\t\tdelerr := DeleteVolume(release, \"test\")\n\t\t\tSo(delerr, ShouldBeNil)\n\t\t})\n\t\tConvey(\"We would expect the volume delete to error if it dos not exist.\", func() {\n\t\t\tdelerr := DeleteVolume(release, \"cheese\")\n\t\t\tSo(delerr, ShouldNotBeNil)\n\t\t\tSo(delerr.Error(), ShouldEqual, \"Volume not found.\")\n\t\t})\n\t\tConvey(\"We would expect the volume delete to error if there is an api error.\", func() {\n\t\t\tts.Close()\n\t\t\tdelerr := DeleteVolume(release, \"test\")\n\t\t\tSo(delerr.Error(), ShouldContainSubstring, \"Put https\")\n\t\t})\n\t})\n\n}", "func (m *MockResponseHandler) Delete(arg0 func(context.Context, string) (string, error), arg1 *responsehandler.DeleteInput) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\", arg0, arg1)\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteListener\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteListener), region, listener)\n}", "func TestGetDeleted4A(t *testing.T) {\n}", "func (ml *MockMonitorListener) Close() {\n}", "func (m *MockDynamicCertPrivate) AddListener(arg0 dynamiccertificates.Listener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddListener\", arg0)\n}", "func TestRemoveEventWithRemovedResourceReference(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\tc := s.Connect()\n\t\tsubscribeToTestCollectionParent(t, s, c, false)\n\n\t\t// Send event on collection and validate client event\n\t\ts.ResourceEvent(\"test.collection\", \"custom\", common.CustomEvent())\n\t\tc.GetEvent(t).Equals(t, \"test.collection.custom\", common.CustomEvent())\n\n\t\t// Send event on collection and validate client event\n\t\ts.ResourceEvent(\"test.collection.parent\", \"remove\", json.RawMessage(`{\"idx\":1}`))\n\t\tc.GetEvent(t).Equals(t, \"test.collection.parent.remove\", json.RawMessage(`{\"idx\":1}`))\n\n\t\t// Send event on collection and validate client event is not sent to client\n\t\ts.ResourceEvent(\"test.collection\", \"custom\", common.CustomEvent())\n\t\tc.AssertNoEvent(t, \"test.collection\")\n\t})\n}", "func ExpectDeleteResource(t *testing.T, w types.Watcher, timeout time.Duration, resource types.Resource) {\n\ttimeoutC := time.After(timeout)\nwaitLoop:\n\tfor {\n\t\tselect {\n\t\tcase <-timeoutC:\n\t\t\tt.Fatalf(\"Timeout waiting for delete resource %v\", resource)\n\t\tcase <-w.Done():\n\t\t\tt.Fatalf(\"Watcher exited with error %v\", w.Error())\n\t\tcase event := <-w.Events():\n\t\t\tif event.Type != types.OpDelete {\n\t\t\t\tlog.Debugf(\"Skipping stale event %v %v\", event.Type, event.Resource.GetName())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Server resources may have subkind set, but the backend\n\t\t\t// generating this delete event doesn't know the subkind.\n\t\t\t// Set it to prevent the check below from failing.\n\t\t\tif event.Resource.GetKind() == types.KindNode {\n\t\t\t\tevent.Resource.SetSubKind(resource.GetSubKind())\n\t\t\t}\n\n\t\t\trequire.Empty(t, cmp.Diff(resource, event.Resource))\n\t\t\tbreak waitLoop\n\t\t}\n\t}\n}", "func Test_App_Listener(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\n\tgo func() {\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tutils.AssertEqual(t, nil, app.Shutdown())\n\t}()\n\n\tln := fasthttputil.NewInmemoryListener()\n\tutils.AssertEqual(t, nil, app.Listener(ln))\n}", "func (d Delegate) BeforeJobDeleted(spec job.Job) {\n}", "func (rh *ruleHandler) internalDelete(name string) {\n\t// deletes relevant discoverer delegate\n\tif delegate, exists := rh.d.delegates[name]; exists {\n\t\tdelegate.handler.DeleteMissing(nil)\n\t\tdelete(rh.d.delegates, name)\n\t}\n}", "func TestToOneRemove(t *testing.T) {}", "func TestToOneRemove(t *testing.T) {}", "func TestToOneRemove(t *testing.T) {}", "func TestToOneRemove(t *testing.T) {}", "func TestToOneRemove(t *testing.T) {}", "func TestToOneRemove(t *testing.T) {}", "func TestToOneRemove(t *testing.T) {}", "func TestToOneRemove(t *testing.T) {}", "func (_m *EventAPIRepository) Delete(ctx context.Context, tenantID string, id string) error {\n\tret := _m.Called(ctx, tenantID, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = rf(ctx, tenantID, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (ctrler CtrlDefReactor) OnSnapshotRestoreDelete(obj *SnapshotRestore) error {\n\tlog.Info(\"OnSnapshotRestoreDelete is not implemented\")\n\treturn nil\n}", "func TestDelete(t *testing.T) {\n\tlocalStore := NewEventLocalStore()\n\n\teventTest1 := &entities.Event{ID: \"id1\"}\n\n\terr := localStore.Create(eventTest1)\n\tassert.NoError(t, err)\n\n\terr = localStore.Delete(\"id1\")\n\tassert.NoError(t, err)\n\n\t// If Event Not Found\n\terr = localStore.Delete(\"\")\n\tassert.Error(t, err)\n\tassert.Equal(t, err, event.ErrEventNotFound)\n}", "func (m *MockHandle) AddrDel(link netlink.Link, addr *netlink.Addr) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddrDel\", link, addr)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (self *Mediator) OnRemove() {\n\n}", "func (client StorageTargetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func (m *MockEventDao) DelEventByServiceID(serviceID string) error {\n\tret := m.ctrl.Call(m, \"DelEventByServiceID\", serviceID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (d *Delegate) BeforeJobDeleted(spec job.Job) {}", "func (m *MockListener) Create(listener api.Listener) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Create\", listener)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockStudentRepository) Delete(arg0 gocql.UUID) *exception.AppError {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Delete\", arg0)\n\tret0, _ := ret[0].(*exception.AppError)\n\treturn ret0\n}", "func testDeleteExamList(t *testing.T) {\n\tif err := MainService.DeleteExamList(1); err != nil {\n\t\tt.Error(err)\n\t}\n}", "func (m *InterfacesClientMock) Delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) *retry.Error {\n\targs := m.Called(resourceGroupName, networkInterfaceName)\n\tif args.Error(1) != nil {\n\t\treturn &retry.Error{RawError: args.Error(1)}\n\t}\n\treturn nil\n}", "func TestDeleteOrder(t *testing.T) {\n\n // ...\n\n}", "func (client JobClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusAccepted,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n }", "func (m *MockVirtualServiceSet) Delete(virtualService ezkube.ResourceId) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\", virtualService)\n}", "func (suite *HandlerTestSuite) TestDeleteRecipe() {\n\n\trecipe1 := recipeForTest()\n\tsuite.repo.Recipes[recipe1.Id] = recipe1\n\trecipe2 := recipeForTest()\n\tsuite.repo.Recipes[recipe2.Id] = recipe2\n\n\trequest := apiGatewayRequestForTest(http.MethodDelete, nil, &recipe2.Id)\n\tresponse, err := suite.handler.handle(context.Background(), request)\n\tsuite.Nil(err)\n\tsuite.assertSuccessfulResponse(response)\n\n\t_, recipe1Exists := suite.repo.Recipes[recipe1.Id]\n\tsuite.True(recipe1Exists)\n\t_, recipe2Exists := suite.repo.Recipes[recipe2.Id]\n\tsuite.False(recipe2Exists)\n\n\tnotExistingId := utils.NewId()\n\trequest2 := apiGatewayRequestForTest(http.MethodDelete, nil, &notExistingId)\n\tresponse2, err := suite.handler.handle(context.Background(), request2)\n\tsuite.NotNil(err)\n\tsuite.assertResponseStatusCode(response2, http.StatusInternalServerError)\n\n\trequest3 := apiGatewayRequestForTest(http.MethodDelete, nil, nil)\n\tresponse3, err := suite.handler.handle(context.Background(), request3)\n\tsuite.NotNil(err)\n\tsuite.assertResponseStatusCode(response3, http.StatusBadRequest)\n}", "func (s *systemtestSuite) TestServiceAddDeleteServiceVxlan(c *C) {\n\ts.testServiceAddDeleteService(c, \"vxlan\")\n}", "func (client BaseClient) DeleteExpectationResponder(resp *http.Response) (result String, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (m *MockEnvoyFilterSet) Delete(envoyFilter ezkube.ResourceId) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\", envoyFilter)\n}" ]
[ "0.7035906", "0.6353265", "0.6340077", "0.6139648", "0.61341786", "0.59926474", "0.5990956", "0.594307", "0.58960354", "0.5882571", "0.5869912", "0.58653843", "0.5841051", "0.5776364", "0.5721302", "0.567146", "0.56694627", "0.5627602", "0.56106", "0.5603022", "0.56028306", "0.5578928", "0.5500649", "0.5494471", "0.5483365", "0.54791784", "0.5478358", "0.5466615", "0.5446076", "0.542633", "0.5414385", "0.5410177", "0.54090554", "0.54074496", "0.5397762", "0.5382825", "0.5368461", "0.5364777", "0.53587925", "0.53517354", "0.53487253", "0.5340964", "0.5331395", "0.53292745", "0.5316819", "0.53142005", "0.53114355", "0.5292873", "0.5292077", "0.52888316", "0.5287256", "0.52812856", "0.52783626", "0.52763", "0.5271328", "0.5266862", "0.5260473", "0.5253315", "0.525082", "0.52432334", "0.52272844", "0.5226514", "0.5220107", "0.5216569", "0.52145505", "0.52129567", "0.52125883", "0.5204537", "0.518567", "0.5180789", "0.517825", "0.51781034", "0.51770794", "0.51701117", "0.51701117", "0.51701117", "0.51701117", "0.51701117", "0.51701117", "0.51701117", "0.51701117", "0.51698637", "0.5167491", "0.5165483", "0.5154727", "0.5152801", "0.5151632", "0.51511705", "0.51449513", "0.514261", "0.5135457", "0.5134015", "0.51335764", "0.5132892", "0.5128047", "0.51272154", "0.5125356", "0.51216245", "0.5121478", "0.51153237" ]
0.697801
1
DeleteListener indicates an expected call of DeleteListener
func (mr *MockLoadBalanceMockRecorder) DeleteListener(region, listener interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteListener", reflect.TypeOf((*MockLoadBalance)(nil).DeleteListener), region, listener) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l *Listener) delete(rOpts *ReconcileOptions) error {\n\tin := elbv2.DeleteListenerInput{\n\t\tListenerArn: l.CurrentListener.ListenerArn,\n\t}\n\n\tif err := awsutil.ALBsvc.RemoveListener(in); err != nil {\n\t\trOpts.Eventf(api.EventTypeWarning, \"ERROR\", \"Error deleting %v listener: %s\", *l.CurrentListener.Port, err.Error())\n\t\tl.logger.Errorf(\"Failed Listener deletion. ARN: %s: %s\",\n\t\t\t*l.CurrentListener.ListenerArn, err.Error())\n\t\treturn err\n\t}\n\n\tl.deleted = true\n\treturn nil\n}", "func Delete(c *golangsdk.ServiceClient, id string) (r DeleteResult) {\n\turl := resourceURL(c, id)\n\t//fmt.Printf(\"Delete listener url: %s.\\n\", url)\n\t_, r.Err = c.Delete(url, &golangsdk.RequestOpts{\n\t\tOkCodes: []int{204},\n\t})\n\treturn\n}", "func (l *Listeners) Delete(id xid.ID) {\n\n\tl.Lock()\n\tdelete(l.listeners, id)\n\tl.Unlock()\n}", "func DeleteListener(t *testing.T, client *gophercloud.ServiceClient, lbID, listenerID string) {\n\tt.Logf(\"Attempting to delete listener %s\", listenerID)\n\n\tif err := listeners.Delete(client, listenerID).ExtractErr(); err != nil {\n\t\tif _, ok := err.(gophercloud.ErrDefault404); !ok {\n\t\t\tt.Fatalf(\"Unable to delete listener: %v\", err)\n\t\t}\n\t}\n\n\tif err := WaitForLoadBalancerState(client, lbID, \"ACTIVE\"); err != nil {\n\t\tt.Fatalf(\"Timed out waiting for loadbalancer to become active: %s\", err)\n\t}\n\n\tt.Logf(\"Successfully deleted listener %s\", listenerID)\n}", "func (m *MockLoadBalance) DeleteListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mr *MockListenerMockRecorder) Delete(listenerKey, checksum interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Delete\", reflect.TypeOf((*MockListener)(nil).Delete), listenerKey, checksum)\n}", "func (scm *QuitChanMap) DeleteListener(peerAddress string, origin string, messageID uint32) {\n\tpmi, _ := scm.peersMap.LoadOrStore(peerAddress, &sync.Map{})\n\tpm := pmi.(*sync.Map)\n\tomi, _ := pm.LoadOrStore(origin, &sync.Map{})\n\tom := omi.(*sync.Map)\n\tom.Delete(messageID)\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteMultiListeners), region, lbID, listeners)\n}", "func (d dummyFuncs) RemoveListener(ch *chan ModifiedFile) {\n\tlog.Warning(\"dummy RemoveListener() is called. It does nothing\")\n\treturn\n}", "func (d *Delegate) BeforeJobDeleted(spec job.Job) {}", "func (d Delegate) BeforeJobDeleted(spec job.Job) {\n}", "func (m *MockLoadBalance) DeleteSegmentListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSegmentListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (c *FakeListeners) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteAction(listenersResource, c.ns, name), &networkextensionv1.Listener{})\n\n\treturn err\n}", "func (ctrler CtrlDefReactor) OnSnapshotRestoreDelete(obj *SnapshotRestore) error {\n\tlog.Info(\"OnSnapshotRestoreDelete is not implemented\")\n\treturn nil\n}", "func (m *MockListener) Delete(listenerKey api.ListenerKey, checksum api.Checksum) error {\n\tret := m.ctrl.Call(m, \"Delete\", listenerKey, checksum)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteSegmentListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteSegmentListener\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteSegmentListener), region, listener)\n}", "func (h *Hookbot) Del(l Listener) {\n\tclose(l.dead)\n\th.delListener <- l\n}", "func (s) TestListenerResourceDeletionOnServerNotIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tupdateCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the listener to move to \"serving\" mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a mode change update.\")\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Listener received new mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{}, // empty listener resource\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"timed out waiting for a mode change update: %v\", err)\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeNotServing {\n\t\t\tt.Fatalf(\"listener received new mode %v, want %v\", mode, connectivity.ServingModeNotServing)\n\t\t}\n\t}\n}", "func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {\n\tif r.DeleteFunc != nil {\n\t\tr.DeleteFunc(obj)\n\t}\n}", "func ExampleELB_DeleteLoadBalancerListeners_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DeleteLoadBalancerListenersInput{\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tLoadBalancerPorts: []*int64{\n\t\t\taws.Int64(80),\n\t\t},\n\t}\n\n\tresult, err := svc.DeleteLoadBalancerListeners(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (_m *Callbacks) SubscriptionDeleted(id *fftypes.UUID) {\n\t_m.Called(id)\n}", "func (ctrler CtrlDefReactor) OnNodeDelete(obj *Node) error {\n\tlog.Info(\"OnNodeDelete is not implemented\")\n\treturn nil\n}", "func (sm *stateMachine) OnDelete(key string) {\n\tif !sm.running.Load() {\n\t\tsm.logger.Warn(\"state machine is stopped\",\n\t\t\tlogger.String(\"type\", sm.stateMachineType.String()))\n\t\treturn\n\t}\n\tsm.logger.Info(\"discovery state removed\",\n\t\tlogger.String(\"type\", sm.stateMachineType.String()),\n\t\tlogger.String(\"key\", key))\n\tif sm.onDeleteFn != nil {\n\t\tsm.onDeleteFn(key)\n\t}\n}", "func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (ctrler CtrlDefReactor) OnClusterDelete(obj *Cluster) error {\n\tlog.Info(\"OnClusterDelete is not implemented\")\n\treturn nil\n}", "func (rh *ruleHandler) internalDelete(name string) {\n\t// deletes relevant discoverer delegate\n\tif delegate, exists := rh.d.delegates[name]; exists {\n\t\tdelegate.handler.DeleteMissing(nil)\n\t\tdelete(rh.d.delegates, name)\n\t}\n}", "func (_ *updateDebugPredicate) Delete(_ event.DeleteEvent) bool {\n\treturn true\n}", "func (e *ObservableEditableBuffer) deleted(q0, q1 OffsetTuple) {\n\te.treatasclean = false\n\tfor observer := range e.observers {\n\t\tobserver.Deleted(q0, q1)\n\t}\n}", "func (r *Router) RemoveListener(l Listener) { r.listeners.Delete(l) }", "func (ctrler CtrlDefReactor) OnDistributedServiceCardDelete(obj *DistributedServiceCard) error {\n\tlog.Info(\"OnDistributedServiceCardDelete is not implemented\")\n\treturn nil\n}", "func (ctrler CtrlDefReactor) OnConfigurationSnapshotDelete(obj *ConfigurationSnapshot) error {\n\tlog.Info(\"OnConfigurationSnapshotDelete is not implemented\")\n\treturn nil\n}", "func (c *k8sClient) OnDelete(obj interface{}) {\n\tselect {\n\tcase c.eventCh <- obj:\n\tdefault:\n\t}\n}", "func DeleteTrigger(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Received a DELETE. Working. \")\n}", "func (ctrler CtrlDefReactor) OnDSCProfileDelete(obj *DSCProfile) error {\n\tlog.Info(\"OnDSCProfileDelete is not implemented\")\n\treturn nil\n}", "func (m *SGController) onSgDeleted(sg *v1alpha1.Statefulguardian) {\n\tglog.Infof(\"Cluster %s deleted\", sg.Name)\n\texecCont:= NewExecController(sg)\n execCont.ClusterQuit(m.ctx)\n\tmetrics.IncEventCounter(sgsDeletedCount)\n\tmetrics.DecEventGauge(sgsTotalCount)\n\tglog.Infof(\"Delete statefulset\")\n\tm.statefulSetControl.DeleteStatefulSet(sg)\n}", "func TestPodDeletionEvent(t *testing.T) {\n\tf := func(path cmp.Path) bool {\n\t\tswitch path.String() {\n\t\t// These fields change at runtime, so ignore it\n\t\tcase \"LastTimestamp\", \"FirstTimestamp\", \"ObjectMeta.Name\":\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tt.Run(\"emitPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Marking for deletion Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n\n\tt.Run(\"emitCancelPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitCancelPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Cancelling deletion of Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n}", "func TestRemoveEventWithRemovedResourceReference(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\tc := s.Connect()\n\t\tsubscribeToTestCollectionParent(t, s, c, false)\n\n\t\t// Send event on collection and validate client event\n\t\ts.ResourceEvent(\"test.collection\", \"custom\", common.CustomEvent())\n\t\tc.GetEvent(t).Equals(t, \"test.collection.custom\", common.CustomEvent())\n\n\t\t// Send event on collection and validate client event\n\t\ts.ResourceEvent(\"test.collection.parent\", \"remove\", json.RawMessage(`{\"idx\":1}`))\n\t\tc.GetEvent(t).Equals(t, \"test.collection.parent.remove\", json.RawMessage(`{\"idx\":1}`))\n\n\t\t// Send event on collection and validate client event is not sent to client\n\t\ts.ResourceEvent(\"test.collection\", \"custom\", common.CustomEvent())\n\t\tc.AssertNoEvent(t, \"test.collection\")\n\t})\n}", "func onDataSourceDelete(ctx context.Context, deletedSource string) {\n\n\t// TODO - find a way to delete datasources - surely a config watch\n\n\tlog.Logger(ctx).Info(\"Sync = Send Event Server-wide for \" + deletedSource)\n\tcl := defaults.NewClient()\n\tcl.Publish(ctx, cl.NewPublication(common.TOPIC_DATASOURCE_EVENT, &object.DataSourceEvent{\n\t\tType: object.DataSourceEvent_DELETE,\n\t\tName: deletedSource,\n\t}))\n\n}", "func (signup *EventSignup) OnDeleted(container *ioccontainer.Container) error {\n\terr := signup.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar eventRepository EventRepository\n\tcontainer.Make(&eventRepository)\n\n\tevent, err := eventRepository.GetEventByID(signup.EventID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn signup.sendNotification(event, \"member_signed_out\", container)\n}", "func assertDeleted(t *testing.T, cl client.Client, thing client.Object) {\n\tt.Helper()\n\tif err := cl.Delete(context.TODO(), thing); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"Failed to delete %s: %v\", thing.GetName(), err)\n\t} else {\n\t\tt.Logf(\"Deleted %s\", thing.GetName())\n\t}\n}", "func (c *grafana) MarkDeleted() {}", "func Test_DeviceService_Remove_Success(t *testing.T) {\n\th := TestHelper{}\n\trep := new(mocks.IDeviceRepository)\n\trepAuth := new(mocks.IDeviceAuthRepository)\n\ts := h.CreateTestDeviceService(rep, repAuth)\n\n\tip := \"127.0.0.1\"\n\trep.On(\"Remove\", ip).Return(nil)\n\n\terr := s.Remove(ip)\n\tassert.NoError(t, err)\n}", "func (s *BasePlSqlParserListener) ExitOn_delete_clause(ctx *On_delete_clauseContext) {}", "func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) {\n\t// Add your code here:\n\t// * Make API calls (use req.Session)\n\t// * Mutate the model\n\t// * Check/set any callback context (req.CallbackContext / response.CallbackContext)\n\n\t// Construct a new handler.ProgressEvent and return it\n\tresponse := handler.ProgressEvent{\n\t\tOperationStatus: handler.Success,\n\t\tMessage: \"Delete complete\",\n\t\tResourceModel: currentModel,\n\t}\n\n\treturn response, nil\n\n\t// Not implemented, return an empty handler.ProgressEvent\n\t// and an error\n\treturn handler.ProgressEvent{}, errors.New(\"Not implemented: Delete\")\n}", "func (l *Notifier) Deleted(c *config.KdnConfig, msg string) error {\n\tc.Logger.Infof(\"Deleted: %s\", msg)\n\treturn nil\n}", "func (s) TestListenerResourceDeletionOnServerIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, true, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tmodeChangeCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the server to update to ServingModeServing mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a server to change to ServingModeServing.\")\n\tcase mode := <-modeChangeCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Server switched to mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Update without a listener resource.\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Perform RPCs every 100 ms for 1s and verify that the serving mode does not\n\t// change on gRPC server.\n\ttimer := time.NewTimer(500 * time.Millisecond)\n\tticker := time.NewTicker(50 * time.Millisecond)\n\tt.Cleanup(ticker.Stop)\n\tfor {\n\t\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\treturn\n\t\tcase mode := <-modeChangeCh:\n\t\t\tt.Fatalf(\"Server switched to mode: %v when no switch was expected\", mode)\n\t\tcase <-ticker.C:\n\t\t}\n\t}\n}", "func (ctrler CtrlDefReactor) OnVersionDelete(obj *Version) error {\n\tlog.Info(\"OnVersionDelete is not implemented\")\n\treturn nil\n}", "func (mr *MockInternalServerMockRecorder) TlsCbDelete(arg0, arg1 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"TlsCbDelete\", reflect.TypeOf((*MockInternalServer)(nil).TlsCbDelete), arg0, arg1)\n}", "func (ctrler CtrlDefReactor) OnHostDelete(obj *Host) error {\n\tlog.Info(\"OnHostDelete is not implemented\")\n\treturn nil\n}", "func (c *Controller) onDelete(obj interface{}) {\n\tfmt.Println(\"onDelete called\")\n\tc.dequeue(obj)\n}", "func (s *MetalLBSpeaker) OnDeleteService(svc *slim_corev1.Service) error {\n\tif s.shutDown() {\n\t\treturn ErrShutDown\n\t}\n\tvar (\n\t\tsvcID = k8s.ParseServiceID(svc)\n\t\tl = log.WithFields(logrus.Fields{\n\t\t\t\"component\": \"MetalLBSpeaker.OnDeleteService\",\n\t\t\t\"service-id\": svcID,\n\t\t})\n\t\tmeta = fence.Meta{}\n\t)\n\n\ts.Lock()\n\tdelete(s.services, svcID)\n\ts.Unlock()\n\n\tif err := meta.FromObjectMeta(&svc.ObjectMeta); err != nil {\n\t\tl.WithError(err).Error(\"failed to parse event metadata\")\n\t}\n\n\tl.Debug(\"adding event to queue\")\n\t// Passing nil as the service will force the MetalLB speaker to withdraw\n\t// the BGP announcement.\n\ts.queue.Add(svcEvent{\n\t\tMeta: meta,\n\t\top: Delete,\n\t\tid: svcID,\n\t\tsvc: nil,\n\t\teps: nil,\n\t})\n\treturn nil\n}", "func (ctrler CtrlDefReactor) OnTenantDelete(obj *Tenant) error {\n\tlog.Info(\"OnTenantDelete is not implemented\")\n\treturn nil\n}", "func (mr *MockInternalServerMockRecorder) ProxyrCbDelete(arg0, arg1 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ProxyrCbDelete\", reflect.TypeOf((*MockInternalServer)(nil).ProxyrCbDelete), arg0, arg1)\n}", "func (mr *MockAllMockRecorder) Listener() *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Listener\", reflect.TypeOf((*MockAll)(nil).Listener))\n}", "func (c *Controller) OnDelete(del common.Cluster) {\n\tblog.Infof(\"cluster %+v delete\", del)\n\tif _, ok := c.reconcilerMap[del.ClusterID]; ok {\n\t\tblog.Infof(\"delete del reconciler for %+v\", del)\n\t\t// call cancel function\n\t\tc.cancelFuncMap[del.ClusterID]()\n\t\tdelete(c.cancelFuncMap, del.ClusterID)\n\t\tdelete(c.reconcilerMap, del.ClusterID)\n\t} else {\n\t\tblog.Infof(\"no reconciler for cluster %+v, need to delete\", del)\n\t}\n}", "func (o *Subscriber) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tif o == nil {\n\t\treturn 0, errors.New(\"models: no Subscriber provided for delete\")\n\t}\n\n\tif err := o.doBeforeDeleteHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), subscriberPrimaryKeyMapping)\n\tsql := \"DELETE FROM `subscribers` WHERE `id`=?\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args...)\n\t}\n\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to delete from subscribers\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by delete for subscribers\")\n\t}\n\n\tif err := o.doAfterDeleteHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn rowsAff, nil\n}", "func (mr *MockDBStorageMockRecorder) DeleteCallback(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteCallback\", reflect.TypeOf((*MockDBStorage)(nil).DeleteCallback), arg0, arg1)\n}", "func (proxier *Proxier) OnServiceDelete(service *api.Service) {\n\tnamespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}\n\tif proxier.serviceChanges.Update(&namespacedName, service, nil) && proxier.isInitialized() {\n//\t\tproxier.syncRunner.Run()\n\t\tproxier.syncProxyRules()\n\t}\n}", "func (c *FakeListeners) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {\n\taction := testing.NewDeleteCollectionAction(listenersResource, c.ns, listOpts)\n\n\t_, err := c.Fake.Invokes(action, &networkextensionv1.ListenerList{})\n\treturn err\n}", "func (c *ResourcesHandler) Delete(event.DeleteEvent, workqueue.RateLimitingInterface) {}", "func (ml *ManagedListener) Delete(pipe *pipe.Pipe) {\n\tdefer trace.Tracer.ScopedTrace(\"MapRm\", *pipe)()\n\tpipe.State = share.Closed\n\tdefer ml.Monitor()()\n\tdelete(ml.Pipes, pipe)\n\tml.Active = uint64(len(ml.Pipes))\n}", "func (s *sequencePubSub) Delete(seq sequenceId) {\n\ts.reg <- sequenceReg{seq, sequenceObserver{-1, nil}}\n}", "func (s *consumerCRUD) Delete(arg ...crud.Arg) (crud.Arg, error) {\n\tevent := eventFromArg(arg[0])\n\tconsumer := consumerFromStuct(event)\n\terr := s.client.Consumers.Delete(nil, consumer.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn consumer, nil\n}", "func (e *EventHandlerFuncs) OnDelete(table string, row Model) {\n\tif e.DeleteFunc != nil {\n\t\te.DeleteFunc(table, row)\n\t}\n}", "func (e *emitter) RemoveListener(id string) {\n\te.listeners.Delete(id)\n}", "func (m *ManagerMock) Delete(ctx context.Context, s *hub.Subscription) error {\n\targs := m.Called(ctx, s)\n\treturn args.Error(0)\n}", "func (*listener) OnDisconnect() {}", "func TestDelete(t *testing.T) {\n\tlocalStore := NewEventLocalStore()\n\n\teventTest1 := &entities.Event{ID: \"id1\"}\n\n\terr := localStore.Create(eventTest1)\n\tassert.NoError(t, err)\n\n\terr = localStore.Delete(\"id1\")\n\tassert.NoError(t, err)\n\n\t// If Event Not Found\n\terr = localStore.Delete(\"\")\n\tassert.Error(t, err)\n\tassert.Equal(t, err, event.ErrEventNotFound)\n}", "func (cli *FakeConfigAgentClient) DeleteOperationCalledCnt() int {\n\treturn int(atomic.LoadInt32(&cli.deleteOperationCalledCnt))\n}", "func (self *Mediator) OnRemove() {\n\n}", "func TestNodeDeleted(t *testing.T) {\n\tpod0 := &v1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"pod0\",\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tNodeName: \"node0\",\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tConditions: []v1.PodCondition{\n\t\t\t\t{\n\t\t\t\t\tType: v1.PodReady,\n\t\t\t\t\tStatus: v1.ConditionTrue,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tpod1 := &v1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"pod1\",\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tNodeName: \"node0\",\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tConditions: []v1.PodCondition{\n\t\t\t\t{\n\t\t\t\t\tType: v1.PodReady,\n\t\t\t\t\tStatus: v1.ConditionTrue,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfnh := &testutil.FakeNodeHandler{\n\t\tExisting: []*v1.Node{\n\t\t\t{\n\t\t\t\tObjectMeta: v1.ObjectMeta{\n\t\t\t\t\tName: \"node0\",\n\t\t\t\t\tCreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\tStatus: v1.NodeStatus{\n\t\t\t\t\tConditions: []v1.NodeCondition{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tType: v1.NodeReady,\n\t\t\t\t\t\t\tStatus: v1.ConditionUnknown,\n\t\t\t\t\t\t\tLastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),\n\t\t\t\t\t\t\tLastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tClientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*pod0, *pod1}}),\n\t\tDeleteWaitChan: make(chan struct{}),\n\t}\n\n\tfactory := informers.NewSharedInformerFactory(fnh, nil, controller.NoResyncPeriodFunc())\n\n\teventBroadcaster := record.NewBroadcaster()\n\tcloudNodeController := &CloudNodeController{\n\t\tkubeClient: fnh,\n\t\tnodeInformer: factory.Nodes(),\n\t\tcloud: &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound},\n\t\tnodeMonitorPeriod: 5 * time.Second,\n\t\trecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: \"controllermanager\"}),\n\t}\n\teventBroadcaster.StartLogging(glog.Infof)\n\n\tcloudNodeController.Run()\n\n\tselect {\n\tcase <-fnh.DeleteWaitChan:\n\tcase <-time.After(wait.ForeverTestTimeout):\n\t\tt.Errorf(\"Timed out waiting %v for node to be deleted\", wait.ForeverTestTimeout)\n\t}\n\tif len(fnh.DeletedNodes) != 1 || fnh.DeletedNodes[0].Name != \"node0\" {\n\t\tt.Errorf(\"Node was not deleted\")\n\t}\n}", "func (mr *MockHooksMockRecorder) OnDelete(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"OnDelete\", reflect.TypeOf((*MockHooks)(nil).OnDelete), arg0)\n}", "func (mgr *endpointManager) OnDeleteNode(node *slim_corev1.Node,\n\tswg *lock.StoppableWaitGroup) error {\n\n\treturn nil\n}", "func (client ThreatIntelligenceIndicatorClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func (srv *Server) trackListener(ln *net.Listener, add bool) bool {\n\tsrv.mu.Lock()\n\tdefer srv.mu.Unlock()\n\tif add {\n\t\tif srv.shuttingDown() {\n\t\t\treturn false\n\t\t}\n\t\tif srv.listeners == nil {\n\t\t\tsrv.listeners = make(map[*net.Listener]struct{})\n\t\t}\n\t\tsrv.listeners[ln] = struct{}{}\n\t\treturn true\n\t}\n\tdelete(srv.listeners, ln)\n\treturn true\n}", "func AddListener_DeAuth(fn func(string)) {\n\t_deAuthListeners = append(_deAuthListeners, fn)\n}", "func (mr *MockAuthCheckerClientMockRecorder) Delete(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Delete\", reflect.TypeOf((*MockAuthCheckerClient)(nil).Delete), varargs...)\n}", "func (client AppsClient) DeleteResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (ctrler CtrlDefReactor) OnBucketDelete(obj *Bucket) error {\n\tlog.Info(\"OnBucketDelete is not implemented\")\n\treturn nil\n}", "func (s) TestSuccessCaseDeletedRoute(t *testing.T) {\n\trh, fakeClient, ch := setupTests()\n\n\trh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true})\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\t// Will start two watches.\n\tif err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2); err != nil {\n\t\tt.Fatalf(\"Error while waiting for names: %v\", err)\n\t}\n\n\t// Update the RDSHandler with route names which deletes a route name to\n\t// watch. This should trigger the RDSHandler to cancel the watch for the\n\t// deleted route name to watch.\n\trh.updateRouteNamesToWatch(map[string]bool{route1: true})\n\t// This should delete the watch for route2.\n\trouteNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.CancelRDS failed with error %v\", err)\n\t}\n\tif routeNameDeleted != route2 {\n\t\tt.Fatalf(\"xdsClient.CancelRDS called for route %v, want %v\", routeNameDeleted, route2)\n\t}\n\n\trdsUpdate := xdsresource.RouteConfigUpdate{}\n\t// Invoke callback with the xds client with a certain route update. Due to\n\t// this route update updating every route name that rds handler handles,\n\t// this should write to the update channel to send to the listener.\n\tfakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil)\n\trhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate}\n\tselect {\n\tcase rhu := <-ch:\n\t\tif diff := cmp.Diff(rhu.updates, rhuWant); diff != \"\" {\n\t\t\tt.Fatalf(\"got unexpected route update, diff (-got, +want): %v\", diff)\n\t\t}\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timed out waiting for update from update channel.\")\n\t}\n\n\trh.close()\n\trouteNameDeleted, err = fakeClient.WaitForCancelRouteConfigWatch(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.CancelRDS failed with error: %v\", err)\n\t}\n\tif routeNameDeleted != route1 {\n\t\tt.Fatalf(\"xdsClient.CancelRDS called for route %v, want %v\", routeNameDeleted, route1)\n\t}\n}", "func (e *Envoy) RemoveListener(name string, wg *completion.WaitGroup) {\n\te.xds.removeListener(name, wg)\n}", "func (e *Event) Delete(c echo.Context, id int) error {\n\tevent, err := e.udb.View(e.db, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// if err := e.rbac.IsLowerRole(c, event.Role.AccessLevel); err != nil {\n\t// \treturn err\n\t// }\n\treturn e.udb.Delete(e.db, event)\n}", "func (i *IpScheduler) OnDelete(del interface{}) {\n\tdelPod, ok := del.(*v1.Pod)\n\tif !ok {\n\t\treturn\n\t}\n\tif delPod.GetNamespace() == \"bcs-system\" {\n\t\treturn\n\t}\n\tblog.Infof(\"pod %s/%s is deletd\", delPod.GetName(), delPod.GetNamespace())\n\ti.CacheLock.Lock()\n\ti.NodeIPCache.DeleteResource(cache.GetMetaKey(delPod.GetName(), delPod.GetNamespace()))\n\ti.CacheLock.Unlock()\n}", "func (ctrler CtrlDefReactor) OnLicenseDelete(obj *License) error {\n\tlog.Info(\"OnLicenseDelete is not implemented\")\n\treturn nil\n}", "func (s *Basememcached_protocolListener) ExitDelete_command(ctx *Delete_commandContext) {}", "func (s *ServerSuite) TestSrvRTMOnDelete(c *C) {\n\te1 := testutils.NewResponder(\"Hi, I'm endpoint 1\")\n\tdefer e1.Close()\n\n\tb := MakeBatch(Batch{Addr: \"localhost:11300\", Route: `Path(\"/\")`, URL: e1.URL})\n\tc.Assert(s.mux.Init(b.Snapshot()), IsNil)\n\tc.Assert(s.mux.Start(), IsNil)\n\tdefer s.mux.Stop(true)\n\n\t// When: an existing backend server is removed and added again.\n\tfor i := 0; i < 3; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\tc.Assert(s.mux.DeleteServer(b.SK), IsNil)\n\tc.Assert(s.mux.UpsertServer(b.BK, b.S), IsNil)\n\tfor i := 0; i < 4; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\n\t// Then: total count includes only metrics after the server was re-added.\n\trts, err := s.mux.ServerStats(b.SK)\n\tc.Assert(err, IsNil)\n\tc.Assert(rts.Counters.Total, Equals, int64(4))\n}", "func (mr *MockProviderMockRecorder) OnServiceDelete(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"OnServiceDelete\", reflect.TypeOf((*MockProvider)(nil).OnServiceDelete), arg0)\n}", "func (client StorageTargetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func Deleted(c *routing.Context, msg string, service string) error {\n\tResponse(c, `{\"error\": false, \"msg\": \"`+msg+`\"}`, 200, service, \"application/json\")\n\treturn nil\n}", "func TestDelete(t *testing.T) {\n\tRunWithInstance(func(instance *Instance) {\n\t\tInsertFixtures(instance, []EntryFixture{\n\t\t\t{Name: \"int\", Value: \"2891\", ValueType: 1},\n\t\t\t{Name: \"string\", Value: \"hello world!\", ValueType: 3},\n\t\t})\n\n\t\tif err := instance.Delete(\"int\"); err != nil {\n\t\t\tt.Error(\"Instance.Delete: got error:\\n\", err)\n\t\t}\n\n\t\tif err := panicked(func() { instance.MustDelete(\"string\") }); err != nil {\n\t\t\tt.Error(\"Instance.MustDelete: got panic:\\n\", err)\n\t\t}\n\n\t\tif err := instance.Delete(\"foo\"); err == nil {\n\t\t\tt.Error(\"Instance.Delete: expected error with non-existent entry\")\n\t\t} else if _, ok := err.(*ErrNoEntry); !ok {\n\t\t\tt.Error(\"Instance.Delete: expected error of type *ErrNoEntry\")\n\t\t}\n\n\t\tif err := panicked(func() { instance.MustDelete(\"foo\") }); err == nil {\n\t\t\tt.Error(\"Instance.MustDelete: expected panic with non-existent entry\")\n\t\t}\n\t})\n}", "func (s *systemtestSuite) TestServiceAddDeleteServiceVxlan(c *C) {\n\ts.testServiceAddDeleteService(c, \"vxlan\")\n}", "func (r *ScanRequest) Delete(*cloudformationevt.Event, *runtime.Context) error {\n return nil\n}", "func (mr *MockInternalClientMockRecorder) TlsCbDelete(ctx, in interface{}, opts ...interface{}) *gomock.Call {\n\tvarargs := append([]interface{}{ctx, in}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"TlsCbDelete\", reflect.TypeOf((*MockInternalClient)(nil).TlsCbDelete), varargs...)\n}", "func (mr *MockWatcherMockRecorder) Delete() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Delete\", reflect.TypeOf((*MockWatcher)(nil).Delete))\n}", "func (client JobClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusAccepted,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n }", "func (e *EventMapper) DeleteEvent() {\n\n}", "func TestDelete(t *testing.T) {\n\n\tt.Run(\"Successful delete test record\", func(t *testing.T) {\n\t\ttableName := \"sls_rtc_connections\"\n\t\ttable := newTable(tableName)\n\t\tpk := pkPrefixRoom + \"test2\"\n\n\t\terr := table.delete(pk)\n\t\tassert.Nil(t, err)\n\t\tfetched, _ := table.find(pk)\n\t\tassert.Nil(t, fetched)\n\t})\n}", "func (mr *MockProviderMockRecorder) OnEndpointsDelete(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"OnEndpointsDelete\", reflect.TypeOf((*MockProvider)(nil).OnEndpointsDelete), arg0)\n}", "func (e *logSrcEmitter) RemoveListener(id string) {\n\te.listeners.Delete(id)\n}", "func deleteOp(ctx context.Context, c *messaging.Client, fb wasabee.FirebaseCmd) error {\n\tdata := map[string]string{\n\t\t\"gid\": string(fb.Gid),\n\t\t\"msg\": fb.Msg,\n\t\t\"cmd\": fb.Cmd.String(),\n\t}\n\n\ttokens, err := fb.Gid.FirebaseTokens()\n\tif err != nil {\n\t\twasabee.Log.Error(err)\n\t\treturn err\n\t}\n\tgenericMulticast(ctx, c, data, tokens)\n\treturn nil\n}" ]
[ "0.6834911", "0.64514387", "0.6440485", "0.62711996", "0.6267627", "0.62583685", "0.60565895", "0.6054767", "0.6035084", "0.5932007", "0.5926271", "0.5908957", "0.5864843", "0.5832359", "0.5828633", "0.5824492", "0.5766709", "0.5691311", "0.5686258", "0.5611051", "0.5608319", "0.55701953", "0.5566638", "0.5563843", "0.5524359", "0.5512598", "0.54628754", "0.54386336", "0.54384226", "0.543087", "0.54263335", "0.5421899", "0.54102814", "0.5402306", "0.5399889", "0.5391828", "0.539146", "0.53890514", "0.53741986", "0.53649956", "0.53414047", "0.5339842", "0.5335173", "0.53262484", "0.5324084", "0.5298143", "0.5293297", "0.5279585", "0.52689373", "0.52528393", "0.52477616", "0.5240923", "0.5210905", "0.5202377", "0.51953226", "0.5193558", "0.51783764", "0.51704365", "0.51637155", "0.5162864", "0.5161598", "0.51607853", "0.51578677", "0.5156243", "0.51312983", "0.5124337", "0.5118763", "0.51172745", "0.51163954", "0.51071966", "0.5107038", "0.51023644", "0.5101874", "0.5100523", "0.50987166", "0.50982404", "0.50831926", "0.50818425", "0.5074162", "0.50720423", "0.5066363", "0.5064754", "0.50620645", "0.50586784", "0.50574046", "0.5052376", "0.50509214", "0.50495636", "0.504807", "0.50398105", "0.50323015", "0.5026779", "0.50267047", "0.50242233", "0.5021366", "0.5012844", "0.5004713", "0.50046134", "0.50008595", "0.49947682" ]
0.6725229
1
EnsureMultiListeners mocks base method
func (m *MockLoadBalance) EnsureMultiListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EnsureMultiListeners", region, lbID, listeners) ret0, _ := ret[0].(map[string]string) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockEventLogger) AppendCheckMulti(assumedVersion uint64, events ...eventlog.EventData) (uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{assumedVersion}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendCheckMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(time.Time)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func (m *MockEventLogger) AppendMulti(events ...eventlog.EventData) (uint64, uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(uint64)\n\tret3, _ := ret[3].(time.Time)\n\tret4, _ := ret[4].(error)\n\treturn ret0, ret1, ret2, ret3, ret4\n}", "func TestMsgListenerMulti(t *testing.T) {\n\tml := newMsgListeners()\n\n\tcount := 0\n\tcids := testCids()\t// TODO: The wrong Directory type was being used for MapEntries.\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\n\t})\t// TODO: Исправления для OSX\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\t// TODO: Test emails 1\n\t})\n\tml.onMsgComplete(cids[1], func(err error) {\n\t\tcount++\n\t})\n\n\tml.fireMsgComplete(cids[0], nil)\n\trequire.Equal(t, 2, count)\n\n\tml.fireMsgComplete(cids[1], nil)\n\trequire.Equal(t, 3, count)\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiListeners), region, lbID, listeners)\n}", "func TestActiveMultiEvent_Deactivate(t *testing.T) {\r\n\tnumber := 10\r\n\tvar events []*ActiveEvent\r\n\tvar mock []*mockUnixHelper\r\n\r\n\tfor i := 0; i < number; i++ {\r\n\t\tunixMock := &mockUnixHelper{}\r\n\t\tnewActive := &ActiveEvent{FileDescriptor: i, unix: unixMock}\r\n\t\tunixMock.On(\"close\", i).Return(nil).Once()\r\n\t\tevents = append(events, newActive)\r\n\t\tmock = append(mock, unixMock)\r\n\t}\r\n\r\n\tnewActiveMulti := ActiveMultiEvent{events: events}\r\n\tnewActiveMulti.Deactivate()\r\n\r\n\trequire.Nil(t, newActiveMulti.events)\r\n\tfor _, event := range events {\r\n\t\trequire.Nil(t, event)\r\n\t}\r\n\tfor _, m := range mock {\r\n\t\tm.AssertExpectations(t)\r\n\t}\r\n}", "func (m *MockUsecase) ListenEvents(userID int) (chan map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListenEvents\", userID)\n\tret0, _ := ret[0].(chan map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockIRandomPresenter) OnListReceived(arg0 []aggregates.Topic) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnListReceived\", arg0)\n}", "func (m *MockCallback) OnRemoveAll() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemoveAll\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestResilientMultiWriter(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\twriters []io.Writer\n\t}{\n\t\t{\n\t\t\tname: \"All valid writers\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"All invalid writers\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"First invalid writer\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"First valid writer\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\twriters := tt.writers\n\t\tmultiWriter := MultiLevelWriter(writers...)\n\n\t\tlogger := New(multiWriter).With().Timestamp().Logger().Level(InfoLevel)\n\t\tlogger.Info().Msg(\"Test msg\")\n\n\t\tif len(writers) != writeCalls {\n\t\t\tt.Errorf(\"Expected %d writers to have been called but only %d were.\", len(writers), writeCalls)\n\t\t}\n\t\twriteCalls = 0\n\t}\n}", "func (m *MockListener) Index(filters ...ListenerFilter) (api.Listeners, error) {\n\tvarargs := []interface{}{}\n\tfor _, a := range filters {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Index\", varargs...)\n\tret0, _ := ret[0].(api.Listeners)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestMultipleRegisterCalls(t *testing.T) {\n\tRegister(\"multiple-register-driver-1\")\n\trequire.PanicsWithError(t, \"Register called twice for driver multiple-register-driver-1\", func() {\n\t\tRegister(\"multiple-register-driver-1\")\n\t})\n\n\t// Should be no error.\n\tRegister(\"multiple-register-driver-2\")\n}", "func (m *MockMetrics) MultiCreateSuccessResponseCounter() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"MultiCreateSuccessResponseCounter\")\n}", "func (m *MockProvider) OnEndpointsAdd(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsAdd\", arg0)\n}", "func (_m *ELBv2APIClient) DescribeListeners(_a0 context.Context, _a1 *elasticloadbalancingv2.DescribeListenersInput, _a2 ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeListenersOutput, error) {\n\t_va := make([]interface{}, len(_a2))\n\tfor _i := range _a2 {\n\t\t_va[_i] = _a2[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 *elasticloadbalancingv2.DescribeListenersOutput\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeListenersInput, ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeListenersOutput, error)); ok {\n\t\treturn rf(_a0, _a1, _a2...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeListenersInput, ...func(*elasticloadbalancingv2.Options)) *elasticloadbalancingv2.DescribeListenersOutput); ok {\n\t\tr0 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*elasticloadbalancingv2.DescribeListenersOutput)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *elasticloadbalancingv2.DescribeListenersInput, ...func(*elasticloadbalancingv2.Options)) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func TestDefaultBrokerWithManyTriggers(t *testing.T) {\n\tclient := setup(t, true)\n\tdefer tearDown(client)\n\n\t// Label namespace so that it creates the default broker.\n\tif err := client.LabelNamespace(map[string]string{\"knative-eventing-injection\": \"enabled\"}); err != nil {\n\t\tt.Fatalf(\"Error annotating namespace: %v\", err)\n\t}\n\n\t// Wait for default broker ready.\n\tif err := client.WaitForResourceReady(defaultBrokerName, common.BrokerTypeMeta); err != nil {\n\t\tt.Fatalf(\"Error waiting for default broker to become ready: %v\", err)\n\t}\n\n\t// These are the event types and sources that triggers will listen to, as well as the selectors\n\t// to set in the subscriber and services pods.\n\teventsToReceive := []eventReceiver{\n\t\t{eventTypeAndSource{Type: any, Source: any}, newSelector()},\n\t\t{eventTypeAndSource{Type: eventType1, Source: any}, newSelector()},\n\t\t{eventTypeAndSource{Type: any, Source: eventSource1}, newSelector()},\n\t\t{eventTypeAndSource{Type: eventType1, Source: eventSource1}, newSelector()},\n\t}\n\n\t// Create subscribers.\n\tfor _, event := range eventsToReceive {\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tpod := resources.EventLoggerPod(subscriberName)\n\t\tclient.CreatePodOrFail(pod, common.WithService(subscriberName))\n\t}\n\n\t// Create triggers.\n\tfor _, event := range eventsToReceive {\n\t\ttriggerName := name(\"trigger\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tclient.CreateTriggerOrFail(triggerName,\n\t\t\tresources.WithSubscriberRefForTrigger(subscriberName),\n\t\t\tresources.WithTriggerFilter(event.typeAndSource.Source, event.typeAndSource.Type),\n\t\t)\n\t}\n\n\t// Wait for all test resources to become ready before sending the events.\n\tif err := client.WaitForAllTestResourcesReady(); err != nil {\n\t\tt.Fatalf(\"Failed to get all test resources ready: %v\", err)\n\t}\n\n\t// These are the event types and sources that will be send.\n\teventsToSend := []eventTypeAndSource{\n\t\t{eventType1, eventSource1},\n\t\t{eventType1, eventSource2},\n\t\t{eventType2, eventSource1},\n\t\t{eventType2, eventSource2},\n\t}\n\t// Map to save the expected events per dumper so that we can verify the delivery.\n\texpectedEvents := make(map[string][]string)\n\t// Map to save the unexpected events per dumper so that we can verify that they weren't delivered.\n\tunexpectedEvents := make(map[string][]string)\n\tfor _, eventToSend := range eventsToSend {\n\t\t// Create cloud event.\n\t\t// Using event type and source as part of the body for easier debugging.\n\t\tbody := fmt.Sprintf(\"Body-%s-%s\", eventToSend.Type, eventToSend.Source)\n\t\tcloudEvent := &resources.CloudEvent{\n\t\t\tSource: eventToSend.Source,\n\t\t\tType: eventToSend.Type,\n\t\t\tData: fmt.Sprintf(`{\"msg\":%q}`, body),\n\t\t}\n\t\t// Create sender pod.\n\t\tsenderPodName := name(\"sender\", eventToSend.Type, eventToSend.Source)\n\t\tif err := client.SendFakeEventToAddressable(senderPodName, defaultBrokerName, common.BrokerTypeMeta, cloudEvent); err != nil {\n\t\t\tt.Fatalf(\"Error send cloud event to broker: %v\", err)\n\t\t}\n\n\t\t// Check on every dumper whether we should expect this event or not, and add its body\n\t\t// to the expectedEvents/unexpectedEvents maps.\n\t\tfor _, eventToReceive := range eventsToReceive {\n\t\t\tsubscriberName := name(\"dumper\", eventToReceive.typeAndSource.Type, eventToReceive.typeAndSource.Source)\n\t\t\tif shouldExpectEvent(&eventToSend, &eventToReceive, t.Logf) {\n\t\t\t\texpectedEvents[subscriberName] = append(expectedEvents[subscriberName], body)\n\t\t\t} else {\n\t\t\t\tunexpectedEvents[subscriberName] = append(unexpectedEvents[subscriberName], body)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, event := range eventsToReceive {\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tif err := client.CheckLog(subscriberName, common.CheckerContainsAll(expectedEvents[subscriberName])); err != nil {\n\t\t\tt.Fatalf(\"Event(s) not found in logs of subscriber pod %q: %v\", subscriberName, err)\n\t\t}\n\t\t// At this point all the events should have been received in the pod.\n\t\t// We check whether we find unexpected events. If so, then we fail.\n\t\tfound, err := client.FindAnyLogContents(subscriberName, unexpectedEvents[subscriberName])\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed querying to find log contents in pod %q: %v\", subscriberName, err)\n\t\t}\n\t\tif found {\n\t\t\tt.Fatalf(\"Unexpected event(s) found in logs of subscriber pod %q\", subscriberName)\n\t\t}\n\t}\n}", "func (m *MockAll) Listener() Listener {\n\tret := m.ctrl.Call(m, \"Listener\")\n\tret0, _ := ret[0].(Listener)\n\treturn ret0\n}", "func (m *MockLoadBalance) EnsureListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar memLog base.InMemLogger\n\tmem := vfs.NewMem()\n\trequire.NoError(t, mem.MkdirAll(\"ext\", 0755))\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(t *testing.T, td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tmemLog.Reset()\n\t\t\tlel := MakeLoggingEventListener(&memLog)\n\t\t\tflushBegin, flushEnd := lel.FlushBegin, lel.FlushEnd\n\t\t\tlel.FlushBegin = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushBegin(info)\n\t\t\t}\n\t\t\tlel.FlushEnd = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushEnd(info)\n\t\t\t}\n\t\t\topts := &Options{\n\t\t\t\tFS: vfs.WithLogging(mem, memLog.Infof),\n\t\t\t\tFormatMajorVersion: internalFormatNewest,\n\t\t\t\tEventListener: &lel,\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tL0CompactionThreshold: 10,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t}\n\t\t\t// The table stats collector runs asynchronously and its\n\t\t\t// timing is less predictable. It increments nextJobID, which\n\t\t\t// can make these tests flaky. The TableStatsLoaded event is\n\t\t\t// tested separately in TestTableStats.\n\t\t\topts.private.disableTableStats = true\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tt := time.Now()\n\t\t\td.timeNow = func() time.Time {\n\t\t\t\tt = t.Add(time.Second)\n\t\t\t\treturn t\n\t\t\t}\n\t\t\td.testingAlwaysWaitForCleanup = true\n\t\t\treturn memLog.String()\n\n\t\tcase \"close\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"flush\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"compact\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\"), false); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"checkpoint\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Checkpoint(\"checkpoint\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"disable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\td.mu.Lock()\n\t\t\td.disableFileDeletions()\n\t\t\td.mu.Unlock()\n\t\t\treturn memLog.String()\n\n\t\tcase \"enable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tmemLog.Infof(\"%v\", r)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\td.mu.Lock()\n\t\t\t\tdefer d.mu.Unlock()\n\t\t\t\td.enableFileDeletions()\n\t\t\t}()\n\t\t\td.TestOnlyWaitForCleaning()\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest\":\n\t\t\tmemLog.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest-flushable\":\n\t\t\tmemLog.Reset()\n\n\t\t\t// Prevent flushes during this test to ensure determinism.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = true\n\t\t\td.mu.Unlock()\n\n\t\t\tb := d.NewBatch()\n\t\t\tif err := b.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Apply(b, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\twriteTable := func(name string, key byte) error {\n\t\t\t\tf, err := mem.Create(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t\t})\n\t\t\t\tif err := w.Add(base.MakeInternalKey([]byte{key}, 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttableA, tableB := \"ext/a\", \"ext/b\"\n\t\t\tif err := writeTable(tableA, 'a'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := writeTable(tableB, 'b'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{tableA, tableB}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\n\t\t\t// Re-enable flushes, to allow the subsequent flush to proceed.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = false\n\t\t\td.mu.Unlock()\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"metrics\":\n\t\t\t// The asynchronous loading of table stats can change metrics, so\n\t\t\t// wait for all the tables' stats to be loaded.\n\t\t\td.mu.Lock()\n\t\t\td.waitTableStats()\n\t\t\td.mu.Unlock()\n\n\t\t\treturn d.Metrics().String()\n\n\t\tcase \"sstables\":\n\t\t\tvar buf bytes.Buffer\n\t\t\ttableInfos, _ := d.SSTables()\n\t\t\tfor i, level := range tableInfos {\n\t\t\t\tif len(level) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(&buf, \"%d:\\n\", i)\n\t\t\t\tfor _, m := range level {\n\t\t\t\t\tfmt.Fprintf(&buf, \" %d:[%s-%s]\\n\",\n\t\t\t\t\t\tm.FileNum, m.Smallest.UserKey, m.Largest.UserKey)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (m *MockListener) Create(listener api.Listener) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Create\", listener)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func (m *MockListener) Modify(listener api.Listener) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Modify\", listener)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockConn) Multi(ops ...interface{}) ([]zk.MultiResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range ops {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Multi\", varargs...)\n\tret0, _ := ret[0].([]zk.MultiResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockListener) Get(listenerKey api.ListenerKey) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Get\", listenerKey)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenMultipleTracksFound() {\n\n}", "func TestCmdConfigChangeEvents(t *testing.T) {\n\tdefer cleanTestArtifacts(t)\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\n\toldconf := &guerrilla.AppConfig{}\n\tif err := oldconf.Load([]byte(configJsonA)); err != nil {\n\t\tt.Error(\"configJsonA is invalid\", err)\n\t}\n\n\tnewconf := &guerrilla.AppConfig{}\n\tif err := newconf.Load([]byte(configJsonB)); err != nil {\n\t\tt.Error(\"configJsonB is invalid\", err)\n\t}\n\n\tnewerconf := &guerrilla.AppConfig{}\n\tif err := newerconf.Load([]byte(configJsonC)); err != nil {\n\t\tt.Error(\"configJsonC is invalid\", err)\n\t}\n\n\texpectedEvents := map[guerrilla.Event]bool{\n\t\tguerrilla.EventConfigBackendConfig: false,\n\t\tguerrilla.EventConfigServerNew: false,\n\t}\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\n\tbcfg := backends.BackendConfig{\"log_received_mails\": true}\n\tbackend, err := backends.New(bcfg, mainlog)\n\tapp, err := guerrilla.New(oldconf, backend, mainlog)\n\tif err != nil {\n\t\tt.Error(\"Failed to create new app\", err)\n\t}\n\ttoUnsubscribe := map[guerrilla.Event]func(c *guerrilla.AppConfig){}\n\ttoUnsubscribeS := map[guerrilla.Event]func(c *guerrilla.ServerConfig){}\n\n\tfor event := range expectedEvents {\n\t\t// Put in anon func since range is overwriting event\n\t\tfunc(e guerrilla.Event) {\n\t\t\tif strings.Index(e.String(), \"server_change\") == 0 {\n\t\t\t\tf := func(c *guerrilla.ServerConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(e, f)\n\t\t\t\ttoUnsubscribeS[e] = f\n\t\t\t} else {\n\t\t\t\tf := func(c *guerrilla.AppConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(e, f)\n\t\t\t\ttoUnsubscribe[e] = f\n\t\t\t}\n\n\t\t}(event)\n\t}\n\n\t// emit events\n\tnewconf.EmitChangeEvents(oldconf, app)\n\tnewerconf.EmitChangeEvents(newconf, app)\n\t// unsubscribe\n\tfor unevent, unfun := range toUnsubscribe {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\tfor unevent, unfun := range toUnsubscribeS {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\n\tfor event, val := range expectedEvents {\n\t\tif val == false {\n\t\t\tt.Error(\"Did not fire config change event:\", event)\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n}", "func (m *MockProvider) OnEndpointsUpdate(arg0, arg1 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsUpdate\", arg0, arg1)\n}", "func (_m *MockNetwork) AppendSubscriber() chan []net.IP {\n\tret := _m.ctrl.Call(_m, \"AppendSubscriber\")\n\tret0, _ := ret[0].(chan []net.IP)\n\treturn ret0\n}", "func (m *MockMultiClusterRoleEventWatcher) AddEventHandler(ctx context.Context, h controller.MultiClusterRoleEventHandler, predicates ...predicate.Predicate) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, h}\n\tfor _, a := range predicates {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AddEventHandler\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockStream) AddEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddEventListener\", streamEventListener)\n}", "func TestMultiHandlers(t *testing.T) {\n\t// New logger for output stderr\n\tlogStd := New(\"\", LevelDebug, os.Stderr)\n\tAddHandler(logStd)\n\n\t// New logger for output file\n\tfw, err := FileWriter(\"logs/test.log\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlogFile := New(\"\", LevelDebug, fw)\n\tAddHandler(logFile)\n\n\t// Test\n\tDebug(\"MultiHandlers: Debug\")\n\tInfo(\"MultiHandlers: Info\")\n\tWarn(\"MultiHandlers: Warn\")\n\tError(\"MultiHandlers: Error\")\n\t//Panic(\"MultiHandlers: Panic\")\n\t//Fatal(\"MultiHandlers: Fatal\")\n}", "func TestEnd2End(t *testing.T) {\n\n\n\t//\tmg := mailgunprovider.New(utilities.GetLogger(\"MG\"),mailgunprovider.BitLabConfig(\"09fe27\"),NewMockFailureStrategy());\n\t//\taz := amazonsesprovider.New(utilities.GetLogger(\"MG\"),amazonsesprovider.BitLabConfig(\"09fe27\"),NewMockFailureStrategy());\n\t// sg := sendgridprovider.New(utilities.GetLogger(\"MG\"),sendgridprovider.BitLabConfig(\"09fe27\"),NewMockFailureStrategy());\n\tvar provider = loopbackprovider.New(utilities.GetLogger(\"loop1\"), mtacontainer.NewThressHoldFailureStrategy(12));\n\n\tscheduler := mtacontainer.NewRoundRobinScheduler([]mtacontainer.MTAProvider{provider});\n\n\tcontainer := mtacontainer.New(scheduler);\n\n\tmail1 := FreshTestMail(provider, \"[email protected]\");\n\tmail2 := FreshTestMail(provider, \"[email protected]\");\n\tmail3 := FreshTestMail(provider, \"[email protected]\");\n\n\tcontainer.GetOutgoing() <- mail1;\n\tcontainer.GetOutgoing() <- mail2;\n\tcontainer.GetOutgoing() <- mail3;\n\n\tgo func() {\n\t\t<-container.GetIncoming()\n\t}();\n\n\ti := 0;\n\tfor {\n\t\tselect {\n\t\tcase e := <-container.GetEvent():\n\t\t\tlog.Println(\"Reading event from container: \" + e.GetError().Error());\n\t\t\tif i == 2 {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\ti = i + 1;\n\t\t}\n\t}\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiSegmentListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners)\n}", "func (m *MockUsecase) StopListenEvents(userID int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StopListenEvents\", userID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestSplitListenersToDiffProtocol(t *testing.T) {\n\ttestListeners := []*networkextensionv1.Listener{\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8000,\n\t\t\t\tProtocol: \"HTTP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8001,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8002,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8003,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8004,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t}\n\tliGroup := splitListenersToDiffProtocol(testListeners)\n\tfor _, list := range liGroup {\n\t\tt.Logf(\"%+v\", list)\n\t\ttmpProtocol := make(map[string]struct{})\n\t\tfor _, li := range list {\n\t\t\ttmpProtocol[li.Spec.Protocol] = struct{}{}\n\t\t}\n\t\tif len(tmpProtocol) != 1 {\n\t\t\tt.Errorf(\"list %v contains more than one protocol %v\", list, tmpProtocol)\n\t\t}\n\t}\n}", "func TestProcessEventHandling(t *testing.T) {\n\tctx := context.Background()\n\n\tclient := mocks.NewEventMonitoringModuleClient(t)\n\tstream := mocks.NewEventMonitoringModule_GetProcessEventsClient(t)\n\tclient.On(\"GetProcessEvents\", ctx, &api.GetProcessEventParams{TimeoutSeconds: 1}).Return(stream, nil)\n\n\tevents := make([]*model.ProcessEvent, 0)\n\tevents = append(events, model.NewMockedExecEvent(time.Now().Add(-10*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}))\n\tevents = append(events, model.NewMockedExitEvent(time.Now().Add(-9*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}, 0))\n\tevents = append(events, model.NewMockedExecEvent(time.Now().Add(-5*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"invalid-path\"}))\n\tevents = append(events, model.NewMockedExitEvent(time.Now().Add(-5*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"invalid-path\"}, 2))\n\n\tfor _, e := range events {\n\t\tdata, err := e.MarshalMsg(nil)\n\t\trequire.NoError(t, err)\n\n\t\tstream.On(\"Recv\").Once().Return(&api.ProcessEventMessage{Data: data}, nil)\n\t}\n\tstream.On(\"Recv\").Return(nil, io.EOF)\n\n\trcvMessage := make(chan bool)\n\ti := 0\n\thandler := func(e *model.ProcessEvent) {\n\t\tif i > len(events)-1 {\n\t\t\tt.Error(\"should not have received more process events\")\n\t\t}\n\n\t\tAssertProcessEvents(t, events[i], e)\n\t\t// all message have been consumed\n\t\tif i == len(events)-1 {\n\t\t\tclose(rcvMessage)\n\t\t}\n\n\t\ti++\n\t}\n\tl, err := NewSysProbeListener(nil, client, handler)\n\trequire.NoError(t, err)\n\tl.Run()\n\n\t<-rcvMessage\n\tl.Stop()\n\tclient.AssertExpectations(t)\n\tstream.AssertExpectations(t)\n}", "func TestLifecycleManyAddons(t *testing.T) {\n\trunLifecycleTestAWS(&LifecycleTestOptions{\n\t\tt: t,\n\t\tSrcDir: \"many-addons\",\n\t\tClusterName: \"minimal.example.com\",\n\t})\n}", "func TestMultiLoggers(t *testing.T) {\n\t// New logger for output stderr\n\tlogStd := New(\"\", LevelDebug, os.Stderr)\n\tlogStd.Info(\"a stderr logger\")\n\n\t// New logger for output file\n\tfw, err := FileWriter(\"logs/test.log\")\n\tif err != nil {\n\t\tpanic(err)\n\t\treturn\n\t}\n\tlogFile := New(\"\", LevelDebug, fw)\n\tlogFile.Info(\"a file logger\")\n}", "func (m *MockDynamicCertPrivate) AddListener(arg0 dynamiccertificates.Listener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddListener\", arg0)\n}", "func TestMultiReporterError(t *testing.T) {\n\tr1 := ReporterFunc(func(ctx context.Context, level string, err error) error {\n\t\treturn errors.New(\"boom 1\")\n\t})\n\n\tr2 := ReporterFunc(func(ctx context.Context, level string, err error) error {\n\t\treturn errors.New(\"boom 2\")\n\t})\n\n\th := MultiReporter{r1, r2}\n\n\tctx := WithReporter(context.Background(), h)\n\terr := Report(ctx, errBoom)\n\n\tif _, ok := err.(*MultiError); !ok {\n\t\tt.Fatal(\"Expected a MultiError to be returned\")\n\t}\n}", "func (m *MockMultiClusterRoleEventHandler) UpdateMultiClusterRole(old, new *v1alpha1.MultiClusterRole) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateMultiClusterRole\", old, new)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestIntegrationNewListener(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short mode...\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*120)\n\tdefer cancel()\n\n\tlistener := NewAmqpConnection(ctx, config)\n\t// Remove topic to ensure each test has a clean topic to work with\n\tdefer deleteSubscription(listener, config)\n\n\tnonce := time.Now().String()\n\tsender, err := listener.CreateAmqpSender(config.SubscribesToEvent)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\terr = sender.Send(ctx, amqp.NewMessage([]byte(nonce)))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstats, err := listener.GetQueueDepth()\n\tdepth := stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 1 {\n\t\tt.Errorf(\"Expected queue depth of 1 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n\n\tamqpMessage, err := listener.Receiver.Receive(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmessage := messaging.NewAmqpMessageWrapper(amqpMessage)\n\n\tgo func() {\n\t\ttime.Sleep(time.Duration(45) * time.Second)\n\t\terr := listener.RenewLocks(ctx, []*amqp.Message{\n\t\t\tamqpMessage,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t//Added to ensure that locks are renewed\n\ttime.Sleep(time.Duration(75) * time.Second)\n\n\terr = message.Accept()\n\tif string(message.Body()) != nonce {\n\t\tt.Errorf(\"value not as expected in message Expected: %s Got: %s\", nonce, message.Body())\n\t}\n\n\tstats, err = listener.GetQueueDepth()\n\tdepth = stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 0 {\n\t\tt.Errorf(\"Expected queue depth of 0 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func (m *MockHealthCheck) SetListener(arg0 discovery.LegacyHealthCheckStatsListener, arg1 bool) {\n\tm.ctrl.Call(m, \"SetListener\", arg0, arg1)\n}", "func (m *MockProc) OnSvcAllHostReplace(arg0 []*host.Host) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnSvcAllHostReplace\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockServersService) CreateMultiple(arg0 *binarylane.ServerMultiCreateRequest) (bl.Servers, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateMultiple\", arg0)\n\tret0, _ := ret[0].(bl.Servers)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *LogPollerWrapper) LatestEvents() ([]types.OracleRequest, []types.OracleResponse, error) {\n\tret := _m.Called()\n\n\tvar r0 []types.OracleRequest\n\tvar r1 []types.OracleResponse\n\tvar r2 error\n\tif rf, ok := ret.Get(0).(func() ([]types.OracleRequest, []types.OracleResponse, error)); ok {\n\t\treturn rf()\n\t}\n\tif rf, ok := ret.Get(0).(func() []types.OracleRequest); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]types.OracleRequest)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func() []types.OracleResponse); ok {\n\t\tr1 = rf()\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).([]types.OracleResponse)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(2).(func() error); ok {\n\t\tr2 = rf()\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func (m *MockHooks) OnUpdate(existing, new proto.Message) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnUpdate\", existing, new)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockProvider) OnEndpointsSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsSynced\")\n}", "func TestMultipleHeartbeatTimeout(t *testing.T) {\n\ts := NewSupervisor(nil)\n\traa := NewRecoverableAction(s)\n\trab := NewRecoverableAction(s)\n\trac := NewRecoverableAction(s)\n\n\ts.AddRecoverable(\"A\", raa)\n\ts.AddRecoverable(\"B\", rab)\n\ts.AddRecoverable(\"C\", rac)\n\n\tt.Logf(\"(A) is '%v'.\", raa.Action(TimeConsumingAction))\n\tt.Logf(\"(B) is '%v'.\", rab.Action(PositiveAction))\n\tt.Logf(\"(C) is '%v'.\", rac.Action(PositiveAction))\n}", "func TestMultipleFeed(t *testing.T) {\n\tvar (\n\t\tnumSubs = 10\n\t\tintEvents = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}\n\t\tstringEvents = []interface{}{\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"}\n\t\tintSubs = make([]*testSubscriber, 0, numSubs)\n\t\tstringSubs = make([]*testSubscriber, 0, numSubs)\n\t\tstopper = NewStopper()\n\t)\n\n\tintFeed := &Feed{}\n\tstringFeed := &Feed{}\n\n\tfor i := 0; i < numSubs; i++ {\n\t\t// int subscriber\n\t\tts := newSubscriber(intFeed)\n\t\tintSubs = append(intSubs, ts)\n\t\tstopper.RunWorker(ts.readAll)\n\n\t\t// string subscriber\n\t\tts = newSubscriber(stringFeed)\n\t\tstringSubs = append(stringSubs, ts)\n\t\tstopper.RunWorker(ts.readAll)\n\t}\n\n\tfor i := 0; i < len(intEvents); i++ {\n\t\tintFeed.Publish(intEvents[i])\n\t\tstringFeed.Publish(stringEvents[i])\n\t}\n\tintFeed.Close()\n\tstringFeed.Close()\n\tstopper.Stop()\n\n\t// Wait for stopper to finish, meaning all publishers have ceased.\n\tselect {\n\tcase <-stopper.IsStopped():\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatalf(\"stopper failed to complete after 5 seconds.\")\n\t}\n\n\tfor i, ts := range intSubs {\n\t\tif a, e := ts.received, intEvents; !reflect.DeepEqual(a, e) {\n\t\t\tt.Errorf(\"int subscriber %d received incorrect events %v, expected %v\", i, a, e)\n\t\t}\n\t}\n\tfor i, ts := range stringSubs {\n\t\tif a, e := ts.received, stringEvents; !reflect.DeepEqual(a, e) {\n\t\t\tt.Errorf(\"int subscriber %d received incorrect events %v, expected %v\", i, a, e)\n\t\t}\n\t}\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteMultiListeners), region, lbID, listeners)\n}", "func (m *MockCallback) OnRemove(arg0 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockStreamReceiveListener) OnReceive(ctx context.Context, headers api.HeaderMap, data buffer.IoBuffer, trailers api.HeaderMap) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnReceive\", ctx, headers, data, trailers)\n}", "func (s *Suite) TestMaxRetriesMulti() {\n\tsts := SimpleTestSetup{\n\t\tNamespaceName: \"TestMaxRetries\",\n\t\tWorkerName: \"worker\",\n\t\tWorkSpecName: \"spec\",\n\t\tWorkSpecData: map[string]interface{}{\n\t\t\t\"max_getwork\": 2,\n\t\t\t\"max_retries\": 1,\n\t\t},\n\t}\n\tsts.SetUp(s)\n\tdefer sts.TearDown(s)\n\n\tfor _, name := range []string{\"a\", \"b\", \"c\", \"d\"} {\n\t\t_, err := sts.AddWorkUnit(name)\n\t\tif !s.NoError(err) {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Now we should be able to request work and get both a and b\n\treq := coordinate.AttemptRequest{\n\t\tNumberOfWorkUnits: 10,\n\t}\n\tattempts, err := sts.Worker.RequestAttempts(req)\n\tif s.NoError(err) {\n\t\tif s.Len(attempts, 2) {\n\t\t\ts.Equal(\"a\", attempts[0].WorkUnit().Name())\n\t\t\ts.Equal(\"b\", attempts[1].WorkUnit().Name())\n\t\t}\n\t}\n\n\t// Let the first work unit finish and the second time out\n\ts.Clock.Add(1 * time.Minute)\n\terr = attempts[0].Finish(nil)\n\ts.NoError(err)\n\n\ts.Clock.Add(1 * time.Hour)\n\n\t// Now get two more work units. We expect the system to find\n\t// b and c, notice that b is expired, and just return c.\n\tattempts, err = sts.Worker.RequestAttempts(req)\n\tif s.NoError(err) {\n\t\tif s.Len(attempts, 1) {\n\t\t\ts.Equal(\"c\", attempts[0].WorkUnit().Name())\n\t\t}\n\t}\n}", "func (s *HealthCheckSuite) TestMultipleRoutersOnSameService(c *check.C) {\n\tfile := s.adaptFile(c, \"fixtures/healthcheck/multiple-routers-one-same-service.toml\", struct {\n\t\tServer1 string\n\t}{s.whoami1IP})\n\tdefer os.Remove(file)\n\n\tcmd, display := s.traefikCmd(withConfigFile(file))\n\tdefer display(c)\n\terr := cmd.Start()\n\tc.Assert(err, checker.IsNil)\n\tdefer s.killCmd(cmd)\n\n\t// wait for traefik\n\terr = try.GetRequest(\"http://127.0.0.1:8080/api/rawdata\", 60*time.Second, try.BodyContains(\"Host(`test.localhost`)\"))\n\tc.Assert(err, checker.IsNil)\n\n\t// Set whoami health to 200 to be sure to start with the wanted status\n\tclient := &http.Client{}\n\tstatusOkReq, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"200\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusOkReq)\n\tc.Assert(err, checker.IsNil)\n\n\t// check healthcheck on web1 entrypoint\n\thealthReqWeb1, err := http.NewRequest(http.MethodGet, \"http://127.0.0.1:8000/health\", nil)\n\tc.Assert(err, checker.IsNil)\n\thealthReqWeb1.Host = \"test.localhost\"\n\terr = try.Request(healthReqWeb1, 1*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\t// check healthcheck on web2 entrypoint\n\thealthReqWeb2, err := http.NewRequest(http.MethodGet, \"http://127.0.0.1:9000/health\", nil)\n\tc.Assert(err, checker.IsNil)\n\thealthReqWeb2.Host = \"test.localhost\"\n\n\terr = try.Request(healthReqWeb2, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\t// Set whoami health to 500\n\tstatusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"500\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusInternalServerErrorReq)\n\tc.Assert(err, checker.IsNil)\n\n\t// Verify no backend service is available due to failing health checks\n\terr = try.Request(healthReqWeb1, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))\n\tc.Assert(err, checker.IsNil)\n\n\terr = try.Request(healthReqWeb2, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))\n\tc.Assert(err, checker.IsNil)\n\n\t// Change one whoami health to 200\n\tstatusOKReq1, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"200\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusOKReq1)\n\tc.Assert(err, checker.IsNil)\n\n\t// Verify health check\n\terr = try.Request(healthReqWeb1, 3*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\terr = try.Request(healthReqWeb2, 3*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n}", "func (m *MockMachine) PublishLifecycleEvent(arg0 lifecycle.Type, arg1 ...lifecycle.Option) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0}\n\tfor _, a := range arg1 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"PublishLifecycleEvent\", varargs...)\n}", "func (m *MockSession) SetOnCloseCallbacks(arg0 []func()) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SetOnCloseCallbacks\", arg0)\n}", "func TestIntegrationNewListener(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short mode...\")\n\t}\n\n\t// pick a random name to prevent previous tests affecting this test\n\tconfig.ModuleName = helpers.RandomName(8)\n\n\trenewEvery := time.Second * 35\n\tprocessingTime := time.Second * 240\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*310)\n\tdefer cancel()\n\n\tlistener := NewAmqpConnection(ctx, config)\n\t// Remove topic to ensure each test has a clean topic to work with\n\tdefer deleteSubscription(listener, config)\n\n\tnonce := time.Now().String()\n\tsender, err := listener.CreateAmqpSender(config.SubscribesToEvent)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\terr = sender.Send(ctx, amqp.NewMessage([]byte(nonce)))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstats, err := listener.GetQueueDepth()\n\tdepth := stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 1 {\n\t\tt.Errorf(\"Expected queue depth of 1 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n\n\tamqpMessage, err := listener.Receiver.Receive(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmessage := messaging.NewAmqpMessageWrapper(amqpMessage)\n\n\t// SUMMARY: Testing message lock renewal. By default SB messages's locks expire after 1min and the message is requeued\n\t// 1. Starts a loop renewing the message lock\n\t// 2. Block for more than 1min\n\t// 3. Accept the message (dequeuing it)\n\t// 4. Check the queue length is 0... if it's not we lost the lock and the message got put back on the queue.\n\trenewContext, cancel := context.WithCancel(ctx)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-renewContext.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(renewEvery)\n\t\t\t\terr := listener.RenewLocks(ctx, []*amqp.Message{\n\t\t\t\t\tamqpMessage,\n\t\t\t\t})\n\n\t\t\t\t// Report the test error if the context hasn't been cancelled.\n\t\t\t\tif err != nil {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-renewContext.Done():\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tt.Error(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\ttime.Sleep(processingTime)\n\tcancel()\n\n\terr = message.Accept()\n\tif string(message.Body()) != nonce {\n\t\tt.Errorf(\"value not as expected in message Expected: %s Got: %s\", nonce, message.Body())\n\t}\n\n\t// wait for the SB stats API to update\n\ttime.Sleep(time.Second * 30)\n\n\tstats, err = listener.GetQueueDepth()\n\tdepth = stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 0 {\n\t\tt.Errorf(\"Expected queue depth of 0 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n}", "func TestMultiRoutineAccess_logger(t *testing.T) {\n\t// storing address of *Logger instance for validation.\n\tmsgArray := make([]string, 0)\n\n\t// wait-group to make sure a certain number of go-routine(s)\n\t// has finished its task.\n\tvar wgroup sync.WaitGroup\n\n\tfor i := 0; i < 10; i++ {\n\t\t// updates the wait-group counter.\n\t\twgroup.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\t// decreses the wait-group counter by 1.\n\t\t\t// When the counter returns to 0, the wait-group will end the \"wait\".\n\t\t\tdefer wgroup.Done()\n\n\t\t\tlog := logger.GetLogger()\n\t\t\t// append the address value of instance \"log\"\n\t\t\tlock.Lock()\n\t\t\tmsgArray = append(msgArray, fmt.Sprintf(\"%p\", log))\n\t\t\tlock.Unlock()\n\t\t\tlog.Log(fmt.Sprintf(\"this is a log entry from [%v]\\n\", idx))\n\t\t}(i)\n\t}\n\twgroup.Wait()\n\n\t// verification\n\tif len(msgArray) == 0 {\n\t\tt.Fatalf(\"expect to have a least one message\")\n\t}\n\taddrLine := msgArray[0]\n\tfor i := 1; i < len(msgArray); i++ {\n\t\tline := msgArray[i]\n\t\tif addrLine != line {\n\t\t\tt.Errorf(\"expect both lines (addresses of Logger) should be identical, [%v] vs [%v]\\n\", addrLine, line)\n\t\t}\n\t}\n}", "func (m *MockStreamEventListener) OnDestroyStream() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnDestroyStream\")\n}", "func (m *MockWaiter) WaitForAll(arg0 context.Context) []error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WaitForAll\", arg0)\n\tret0, _ := ret[0].([]error)\n\treturn ret0\n}", "func (m *MockEventsSourceProvider) Events(arg0 string, arg1 *cert.Info) (events.Client, events.EventsSource, error) {\n\tret := m.ctrl.Call(m, \"Events\", arg0, arg1)\n\tret0, _ := ret[0].(events.Client)\n\tret1, _ := ret[1].(events.EventsSource)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (m *MockResponseHandler) TargetList() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"TargetList\")\n}", "func testMultiSourceEndpoints(t *testing.T) {\n\tfoo := &endpoint.Endpoint{DNSName: \"foo\", Targets: endpoint.Targets{\"8.8.8.8\"}}\n\tbar := &endpoint.Endpoint{DNSName: \"bar\", Targets: endpoint.Targets{\"8.8.4.4\"}}\n\n\tfor _, tc := range []struct {\n\t\ttitle string\n\t\tnestedEndpoints [][]*endpoint.Endpoint\n\t\texpected []*endpoint.Endpoint\n\t}{\n\t\t{\n\t\t\t\"no child sources return no endpoints\",\n\t\t\tnil,\n\t\t\t[]*endpoint.Endpoint{},\n\t\t},\n\t\t{\n\t\t\t\"single empty child source returns no endpoints\",\n\t\t\t[][]*endpoint.Endpoint{{}},\n\t\t\t[]*endpoint.Endpoint{},\n\t\t},\n\t\t{\n\t\t\t\"single non-empty child source returns child's endpoints\",\n\t\t\t[][]*endpoint.Endpoint{{foo}},\n\t\t\t[]*endpoint.Endpoint{foo},\n\t\t},\n\t\t{\n\t\t\t\"multiple non-empty child sources returns merged children's endpoints\",\n\t\t\t[][]*endpoint.Endpoint{{foo}, {bar}},\n\t\t\t[]*endpoint.Endpoint{foo, bar},\n\t\t},\n\t} {\n\t\ttc := tc\n\t\tt.Run(tc.title, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// Prepare the nested mock sources.\n\t\t\tsources := make([]Source, 0, len(tc.nestedEndpoints))\n\n\t\t\t// Populate the nested mock sources.\n\t\t\tfor _, endpoints := range tc.nestedEndpoints {\n\t\t\t\tsrc := new(testutils.MockSource)\n\t\t\t\tsrc.On(\"Endpoints\").Return(endpoints, nil)\n\n\t\t\t\tsources = append(sources, src)\n\t\t\t}\n\n\t\t\t// Create our object under test and get the endpoints.\n\t\t\tsource := NewMultiSource(sources, nil)\n\n\t\t\t// Get endpoints from the source.\n\t\t\tendpoints, err := source.Endpoints(context.Background())\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Validate returned endpoints against desired endpoints.\n\t\t\tvalidateEndpoints(t, endpoints, tc.expected)\n\n\t\t\t// Validate that the nested sources were called.\n\t\t\tfor _, src := range sources {\n\t\t\t\tsrc.(*testutils.MockSource).AssertExpectations(t)\n\t\t\t}\n\t\t})\n\t}\n}", "func (m *MockCoinsMempool) AddMulti(item *primitives.TxMulti, state *primitives.CoinsState) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddMulti\", item, state)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockEventDao) UnfinishedEvents(target, targetID string, optTypes ...string) ([]*model.ServiceEvent, error) {\n\tvarargs := []interface{}{target, targetID}\n\tfor _, a := range optTypes {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"UnfinishedEvents\", varargs...)\n\tret0, _ := ret[0].([]*model.ServiceEvent)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func AssertLogutilEventsMatch(t testing.TB, expected []*logutilpb.Event, actual []*logutilpb.Event) {\n\tt.Helper()\n\n\tf := func(e *logutilpb.Event) *logutilpb.Event {\n\t\treturn &logutilpb.Event{\n\t\t\tValue: e.Value,\n\t\t}\n\t}\n\texpected = clearEvents(expected, f)\n\tactual = clearEvents(actual, f)\n\n\texpectedBytes, err := json.Marshal(expected)\n\tif !assert.NoError(t, err, \"could not marshal expected events as json, assertion messages will be impacted\") {\n\t\texpectedBytes = nil\n\t}\n\n\tactualBytes, err := json.Marshal(actual)\n\tif !assert.NoError(t, err, \"could not marshal actual events as json, assertion messages will be impacted\") {\n\t\tactualBytes = nil\n\t}\n\n\tif !assert.Equal(t, len(expected), len(actual), \"differing number of events; expected %d, have %d\\nexpected bytes: %s\\nactual bytes: %s\\n\", len(expected), len(actual), expectedBytes, actualBytes) {\n\t\treturn\n\t}\n\n\tfor i, expectedEvent := range expected {\n\t\tactualEvent := actual[i]\n\t\tassert.Regexp(t, expectedEvent.Value, actualEvent.Value, \"event %d mismatch\", i)\n\t}\n}", "func (m *MockGeneralRepository) GetEvents(arg0 []models.SubscriptionRequest, arg1, arg2 int64) ([]models.Event, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetEvents\", arg0, arg1, arg2)\n\tret0, _ := ret[0].([]models.Event)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func MockEvents() []optic.Event {\n\tevents := make([]optic.Event, 0)\n\tevents = append(events, TestRaw([]byte(\"raw\")))\n\tevents = append(events, TestMetric(1.0))\n\tevents = append(events, TestLogLine(\"logline\"))\n\treturn events\n}", "func (p *EventProber) ExpectEvents(ids []string) {\n\tp.appendIds(ids...)\n}", "func (m *MockEventLogger) Append(event eventlog.EventData) (uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Append\", event)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(time.Time)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func (suite *TransportTestSuite) TestAlreadyListening() {\n\ttrans := suite.Transport\n\tinboundChan := make(chan message.Request, 1)\n\tsuite.Assert().NoError(trans.Listen(testService, inboundChan))\n\tsuite.Assert().Equal(ErrAlreadyListening, trans.Listen(testService, inboundChan))\n}", "func (m *MockStreamConnectionEventListener) OnGoAway() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnGoAway\")\n}", "func verifyReceiverLifecycle(t *testing.T, factory component.ReceiverFactory, getConfigFn getReceiverConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\treceiverCreateSet := componenttest.NewNopReceiverCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tcreateFns := []createReceiverFn{\n\t\twrapCreateLogsRcvr(factory),\n\t\twrapCreateTracesRcvr(factory),\n\t\twrapCreateMetricsRcvr(factory),\n\t}\n\n\tfor _, createFn := range createFns {\n\t\tfirstRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\tif errors.Is(err, componenterror.ErrDataTypeIsNotSupported) {\n\t\t\tcontinue\n\t\t}\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, firstRcvr.Start(ctx, host))\n\t\trequire.NoError(t, firstRcvr.Shutdown(ctx))\n\n\t\tsecondRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, secondRcvr.Start(ctx, host))\n\t\trequire.NoError(t, secondRcvr.Shutdown(ctx))\n\t}\n}", "func (rst *watcherSyncerTester) expectAllEventsHandled() {\n\tlog.Infof(\"Expecting all events to have been handled\")\n\tfor _, l := range rst.lws {\n\t\tExpect(l.listCallResults).To(HaveLen(0), \"pending list results to be processed\")\n\t\tExpect(l.stopEvents).To(HaveLen(0), \"pending stop events to be processed\")\n\t\tExpect(l.results).To(HaveLen(0), \"pending watch results to be processed\")\n\t}\n}", "func (m *MockMulticlusterValidatingWebhookConfigurationReconcileLoop) AddMulticlusterValidatingWebhookConfigurationReconciler(ctx context.Context, rec controller.MulticlusterValidatingWebhookConfigurationReconciler, predicates ...predicate.Predicate) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, rec}\n\tfor _, a := range predicates {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"AddMulticlusterValidatingWebhookConfigurationReconciler\", varargs...)\n}", "func (m *MockMultiClusterRoleEventHandler) GenericMultiClusterRole(obj *v1alpha1.MultiClusterRole) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GenericMultiClusterRole\", obj)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockHandler) V2GetEvents(clusterID, hostID, infraEnvID *strfmt.UUID, categories ...string) ([]*common.Event, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{clusterID, hostID, infraEnvID}\n\tfor _, a := range categories {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"V2GetEvents\", varargs...)\n\tret0, _ := ret[0].([]*common.Event)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockIByIdPresenter) OnReceived(arg0 aggregates.Topic) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnReceived\", arg0)\n}", "func TestMultiRoutineAccessWithDelay_logger(t *testing.T) {\n\t// storing address of *Logger instance for validation.\n\tmsgArray := make([]string, 0)\n\n\t// wait-group to make sure a certain number of go-routine(s)\n\t// has finished its task.\n\tvar wgroup sync.WaitGroup\n\n\tfor i := 0; i < 10; i++ {\n\t\t// updates the wait-group counter.\n\t\twgroup.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\t// decreses the wait-group counter by 1.\n\t\t\t// When the counter returns to 0, the wait-group will end the \"wait\".\n\t\t\tdefer wgroup.Done()\n\n\t\t\t// add a random delay to simulate multi access.\n\t\t\ttime.Sleep(time.Millisecond * time.Duration(rand.Int63n(1000)))\n\n\t\t\tlog := logger.GetLogger()\n\t\t\t// append the address value of instance \"log\"\n\t\t\tlock.Lock()\n\t\t\tmsgArray = append(msgArray, fmt.Sprintf(\"%p\", log))\n\t\t\tlock.Unlock()\n\t\t\tlog.Log(fmt.Sprintf(\"[with delay] this is a log entry from [%v]\\n\", idx))\n\t\t}(i)\n\t}\n\twgroup.Wait()\n\n\t// verification\n\tif len(msgArray) == 0 {\n\t\tt.Fatalf(\"expect to have a least one message\")\n\t}\n\taddrLine := msgArray[0]\n\tfor i := 1; i < len(msgArray); i++ {\n\t\tline := msgArray[i]\n\t\tif addrLine != line {\n\t\t\tt.Errorf(\"expect both lines (addresses of Logger) should be identical, [%v] vs [%v]\\n\", addrLine, line)\n\t\t}\n\t}\n}", "func TestOnlyCallOnceOnMultipleDepChanges(t *testing.T) {\n\tr := New()\n\ti := r.CreateInput(1)\n\tc1 := r.CreateCompute1(i, func(v int) int { return v + 1 })\n\tc2 := r.CreateCompute1(i, func(v int) int { return v - 1 })\n\tc3 := r.CreateCompute1(c2, func(v int) int { return v - 1 })\n\tc4 := r.CreateCompute2(c1, c3, func(v1, v3 int) int { return v1 * v3 })\n\tchanged4 := 0\n\tc4.AddCallback(func(int) { changed4++ })\n\ti.SetValue(3)\n\tif changed4 < 1 {\n\t\tt.Fatalf(\"callback function was not called\")\n\t} else if changed4 > 1 {\n\t\tt.Fatalf(\"callback function was called too often\")\n\t}\n}", "func (m *MockServerStreamConnectionEventListener) OnGoAway() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnGoAway\")\n}", "func (m *MockSnapshot) ApplyMultiCluster(ctx context.Context, multiClusterClient multicluster.Client, errHandler output.ErrorHandler) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"ApplyMultiCluster\", ctx, multiClusterClient, errHandler)\n}", "func TestProviderSendAndReceive(t *testing.T) {\n\t// Comment out following line to run the example\n\tt.SkipNow()\n\n\teventProviders, err := initializeEventProviders(\"test_data/providers0/eventDefinitions.yaml\")\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tb, err := yaml.Marshal(eventProviders)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tpayload := string(b)\n\tt.Logf(\"Processed eventDefinitions.yaml:\\n%s\", payload)\n\n\t// Start listening on all eventSources\n\tt.Log(\"Creating subscriptions for event sources\")\n\n/*\n\tfor _, eventSource := range eventProviders.EventSources {\n\t\tprovider := eventProviders.GetMessageProvider(eventSource.ProviderRef)\n\t\tt.Logf(\"Subscribing to event source '%s'\", eventSource.ProviderRef)\n\t\terr := provider.Subscribe(eventSource)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unable to subscribe to eventSource '%s'\", eventSource.Name)\n\t\t}\n\t}\n*/\n\n\tvar wg sync.WaitGroup\n\n\t// Send the messages to the eventSources\n\tfor _, node := range eventProviders.EventDestinations {\n\t\tmsg, err := json.Marshal(map[string]string{\n\t\t\t\"msg\": fmt.Sprintf(\"Hello %s from %s...\", node.Name, node.ProviderRef),\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tt.Logf(\"Sending message to event destination '%s': %s\", node.Name, msg)\n\t\tprovider := eventProviders.GetMessageProvider(node.ProviderRef)\n\t\tif provider == nil {\n\t\t\tt.Fatalf(\"unable to find provider referenced by event destination '%s'\", node.Name)\n\t\t}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tprovider.Send(node, []byte(msg), nil)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\n\t// And receive them here\n/*\n\tnumMessagesExpected := 2\n\tfor _, eventSource := range eventProviders.EventSources {\n\t\tprovider := eventProviders.GetMessageProvider(eventSource.ProviderRef)\n\n\t\t// Try maxAttempts times to receive messages before giving up\n\t\tfor i := 0; i < numMessagesExpected; i++ {\n\t\t\tt.Logf(\"Waiting for message %d / %d eventSource '%s'\", i + 1, numMessagesExpected, eventSource.Name)\n\t\t\tb, err := provider.Receive(eventSource)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"timed out waiting for a message from eventSource '%s'\", eventSource.Name)\n\t\t\t}\n\t\t\tt.Logf(\"Received message from eventSource '%s': %s\", eventSource.Name, b)\n\t\t}\n\t}\n*/\n}", "func TestConsumerTickerLoop(t *testing.T) {\n\texpect := shared.NewExpect(t)\n\tmockC := getMockConsumer()\n\tmockC.setState(PluginStateActive)\n\t// accept timeroff by abs( 8 ms)\n\tdelta := float64(8 * time.Millisecond)\n\tcounter := new(int32)\n\ttickerLoopTimeout := 20 * time.Millisecond\n\tvar timeRecorded time.Time\n\tonTimeOut := func() {\n\t\tif atomic.LoadInt32(counter) > 3 {\n\t\t\tmockC.setState(PluginStateDead)\n\t\t\treturn\n\t\t}\n\t\t//this was fired as soon as the ticker started. So ignore but save the time\n\t\tif atomic.LoadInt32(counter) == 0 {\n\t\t\ttimeRecorded = time.Now()\n\t\t\tatomic.AddInt32(counter, 1)\n\t\t\treturn\n\t\t}\n\t\tdiff := time.Now().Sub(timeRecorded)\n\t\tdeltaDiff := math.Abs(float64(tickerLoopTimeout - diff))\n\t\texpect.True(deltaDiff < delta)\n\t\ttimeRecorded = time.Now()\n\t\tatomic.AddInt32(counter, 1)\n\t\treturn\n\t}\n\n\tmockC.tickerLoop(tickerLoopTimeout, onTimeOut)\n\ttime.Sleep(2 * time.Second)\n\t// in anycase, the callback has to be called atleast once\n\texpect.Greater(atomic.LoadInt32(counter), int32(1))\n}", "func (is *informerSpy) waitForEvents(t *testing.T, wantEvents bool) {\n\tt.Helper()\n\t// wait for create/update/delete 3 events for 30 seconds\n\twaitTimeout := time.Second * 30\n\tif !wantEvents {\n\t\t// wait just 15 seconds for no events\n\t\twaitTimeout = time.Second * 15\n\t}\n\n\terr := wait.PollImmediate(time.Second, waitTimeout, func() (bool, error) {\n\t\tis.mu.Lock()\n\t\tdefer is.mu.Unlock()\n\t\treturn len(is.adds) > 0 && len(is.updates) > 0 && len(is.deletes) > 0, nil\n\t})\n\tif wantEvents {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"wanted events, but got error: %v\", err)\n\t\t}\n\t} else {\n\t\tif !errors.Is(err, wait.ErrWaitTimeout) {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"wanted no events, but got error: %v\", err)\n\t\t\t} else {\n\t\t\t\tt.Fatalf(\"wanted no events, but got some: %s\", dump.Pretty(is))\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockProvider) OnServiceAdd(arg0 *v1.Service) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceAdd\", arg0)\n}", "func TestControllerHandleEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\taddServices []*corev1.Service\n\t\tupdateServices []string\n\t\tdelServices []string\n\t\texpAddedServices []string\n\t\texpDeletedServices []string\n\t}{\n\t\t{\n\t\t\tname: \"If a controller is watching services it should react to the service change events.\",\n\t\t\taddServices: []*corev1.Service{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc1\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc2\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tupdateServices: []string{\"svc1\"},\n\t\t\tdelServices: []string{\"svc1\", \"svc2\"},\n\t\t\texpAddedServices: []string{\"svc1\", \"svc2\", \"svc1\"},\n\t\t\texpDeletedServices: []string{\"svc1\", \"svc2\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\tassert := assert.New(t)\n\t\t\tresync := 30 * time.Second\n\t\t\tstopC := make(chan struct{})\n\t\t\tvar gotAddedServices []string\n\t\t\tvar gotDeletedServices []string\n\n\t\t\t// Create the kubernetes client.\n\t\t\tk8scli, _, _, err := cli.GetK8sClients(\"\")\n\n\t\t\trequire.NoError(err, \"kubernetes client is required\")\n\n\t\t\t// Prepare the environment on the cluster.\n\t\t\tprep := prepare.New(k8scli, t)\n\t\t\tprep.SetUp()\n\t\t\tdefer prep.TearDown()\n\n\t\t\t// Create the reitrever.\n\t\t\trt := &retrieve.Resource{\n\t\t\t\tListerWatcher: cache.NewListWatchFromClient(k8scli.CoreV1().RESTClient(), \"services\", prep.Namespace().Name, fields.Everything()),\n\t\t\t\tObject: &corev1.Service{},\n\t\t\t}\n\n\t\t\t// Call times are the number of times the handler should be called before sending the termination signal.\n\t\t\tstopCallTimes := len(test.addServices) + len(test.updateServices) + len(test.delServices)\n\t\t\tcalledTimes := 0\n\t\t\tvar mx sync.Mutex\n\n\t\t\t// Create the handler.\n\t\t\thl := &handler.HandlerFunc{\n\t\t\t\tAddFunc: func(_ context.Context, obj runtime.Object) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\tsvc := obj.(*corev1.Service)\n\t\t\t\t\tgotAddedServices = append(gotAddedServices, svc.Name)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(_ context.Context, id string) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\t// Ignore namespace.\n\t\t\t\t\tid = strings.Split(id, \"/\")[1]\n\t\t\t\t\tgotDeletedServices = append(gotDeletedServices, id)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Create a Pod controller.\n\t\t\tctrl := controller.NewSequential(resync, hl, rt, nil, log.Dummy)\n\t\t\trequire.NotNil(ctrl, \"controller is required\")\n\t\t\tgo ctrl.Run(stopC)\n\n\t\t\t// Create the required services.\n\t\t\tfor _, svc := range test.addServices {\n\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Create(svc)\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\tfor _, svc := range test.updateServices {\n\t\t\t\torigSvc, err := k8scli.CoreV1().Services(prep.Namespace().Name).Get(svc, metav1.GetOptions{})\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\t// Change something\n\t\t\t\t\torigSvc.Spec.Ports = append(origSvc.Spec.Ports, corev1.ServicePort{Name: \"updateport\", Port: 9876})\n\t\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Update(origSvc)\n\t\t\t\t\tassert.NoError(err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete the required services.\n\t\t\tfor _, svc := range test.delServices {\n\t\t\t\terr := k8scli.CoreV1().Services(prep.Namespace().Name).Delete(svc, &metav1.DeleteOptions{})\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\t// Wait until we have finished.\n\t\t\tselect {\n\t\t\t// Timeout.\n\t\t\tcase <-time.After(20 * time.Second):\n\t\t\t// Finished.\n\t\t\tcase <-stopC:\n\t\t\t}\n\n\t\t\t// Check.\n\t\t\tassert.Equal(test.expAddedServices, gotAddedServices)\n\t\t\tassert.Equal(test.expDeletedServices, gotDeletedServices)\n\t\t})\n\t}\n}", "func TestProcessEventFiltering(t *testing.T) {\n\trawEvents := make([]*model.ProcessEvent, 0)\n\thandlers := make([]EventHandler, 0)\n\n\t// The listener should drop unexpected events and not call the EventHandler for it\n\trawEvents = append(rawEvents, model.NewMockedForkEvent(time.Now(), 23, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}))\n\n\t// Verify that expected events are correctly consumed\n\trawEvents = append(rawEvents, model.NewMockedExecEvent(time.Now(), 23, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}))\n\thandlers = append(handlers, func(e *model.ProcessEvent) {\n\t\trequire.Equal(t, model.Exec, e.EventType)\n\t\trequire.Equal(t, uint32(23), e.Pid)\n\t})\n\n\trawEvents = append(rawEvents, model.NewMockedExitEvent(time.Now(), 23, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}, 0))\n\thandlers = append(handlers, func(e *model.ProcessEvent) {\n\t\trequire.Equal(t, model.Exit, e.EventType)\n\t\trequire.Equal(t, uint32(23), e.Pid)\n\t})\n\n\t// To avoid race conditions, all handlers should be assigned during the creation of SysProbeListener\n\tcalledHandlers := 0\n\thandler := func(e *model.ProcessEvent) {\n\t\thandlers[calledHandlers](e)\n\t\tcalledHandlers++\n\t}\n\n\tl, err := NewSysProbeListener(nil, nil, handler)\n\trequire.NoError(t, err)\n\n\tfor _, e := range rawEvents {\n\t\tdata, err := e.MarshalMsg(nil)\n\t\trequire.NoError(t, err)\n\t\tl.consumeData(data)\n\t}\n\tassert.Equal(t, len(handlers), calledHandlers)\n}", "func TestProviderListenAndSend(t *testing.T) {\n\t// Comment out following line to run the example\n\tt.SkipNow()\n\n\teventProviders, err := initializeEventProviders(\"test_data/providers0/eventDefinitions.yaml\")\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tb, err := yaml.Marshal(eventProviders)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tpayload := string(b)\n\tt.Logf(\"Processed eventDefinitions.yaml:\\n%s\", payload)\n\n\t// Start listening on all eventSources\n\tt.Log(\"Creating subscriptions for event sources\")\n\n\t/*\n\tfor _, eventSource := range eventProviders.EventSources {\n\t\tprovider := eventProviders.GetMessageProvider(eventSource.ProviderRef)\n\t\tt.Logf(\"Subscribing to event source '%s'\", eventSource.ProviderRef)\n\t\techoMessage := func(data []byte) {\n\t\t\tt.Logf(\"Message body: %s\", data)\n\t\t}\n\t\tgo provider.ListenAndServe(eventSource, echoMessage)\n\t}\n\t*/\n\n\t// Give the listener a bit of time to start up before sending messages\n\ttime.Sleep(1 * time.Second)\n\n\tvar wg sync.WaitGroup\n\tfor _, node := range eventProviders.EventDestinations {\n\t\tmsg, err := json.Marshal(map[string]string{\n\t\t\t\"msg\": fmt.Sprintf(\"Hello %s from %s...\", node.Name, node.ProviderRef),\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tt.Logf(\"Sending message to event destination '%s': %s\", node.Name, msg)\n\t\tprovider := eventProviders.GetMessageProvider(node.ProviderRef)\n\t\tif provider == nil {\n\t\t\tt.Fatalf(\"unable to find provider referenced by event destination '%s'\", node.Name)\n\t\t}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tprovider.Send(node, []byte(msg), nil)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}", "func TestEventsMgrRestart(t *testing.T) {\n\tti := tInfo{}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\tnumRecorders := 3\n\n\tstopEventRecorders := make(chan struct{})\n\twg := new(sync.WaitGroup)\n\twg.Add(numRecorders + 1) // +1 for events manager restart go routine\n\n\t// uuid to make each source unique\n\tcomponentID := uuid.NewV4().String()\n\ttotalEventsSentBySrc := make([]int, numRecorders)\n\n\t// create recorder events directory\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\n\tfor i := 0; i < numRecorders; i++ {\n\t\tgo func(i int) {\n\t\t\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\t\t\tComponent: fmt.Sprintf(\"%v-%v\", componentID, i),\n\t\t\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\t\t\tBackupDir: recorderEventsDir}, ti.logger)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to create recorder for source %v\", i)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tti.recorders.Lock()\n\t\t\tti.recorders.list = append(ti.recorders.list, evtsRecorder)\n\t\t\tti.recorders.Unlock()\n\n\t\t\tticker := time.NewTicker(100 * time.Millisecond)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stopEventRecorders:\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test event - 1\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test event - 2\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_STOPPED, \"test event - 3\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// restart events manager\n\tgo func() {\n\t\tevtsMgrURL := ti.evtsMgr.RPCServer.GetListenURL()\n\n\t\tfor i := 0; i < 3; i++ {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tti.evtsMgr.Stop()\n\n\t\t\t// manager won't be able to accept any events for 1s; all the elastic writes will be denied\n\t\t\t// and all the events will be buffered at the writer for this time\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\t// exporters should be able to release all the holding events from the buffer\n\t\t\tevtsMgr, _, err := testutils.StartEvtsMgr(evtsMgrURL, ti.mockResolver, ti.logger, ti.esClient, nil)\n\t\t\tAssertOk(t, err, \"failed to start events manager, err: %v\", err)\n\t\t\tti.evtsMgr = evtsMgr\n\t\t}\n\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// stop all the recorders\n\t\tclose(stopEventRecorders)\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\t//total events sent by all the recorders\n\ttotalEventsSent := 0\n\tfor _, val := range totalEventsSentBySrc {\n\t\ttotalEventsSent += val\n\t}\n\n\tlog.Infof(\"total events sent: %v\", totalEventsSent)\n\n\t// total number of events received at elastic should match the total events sent\n\t// query all the events received from this source.component\n\tquery := es.NewRegexpQuery(\"source.component.keyword\", fmt.Sprintf(\"%v-.*\", componentID))\n\tti.assertElasticUniqueEvents(t, query, true, 3*numRecorders, \"60s\")\n\tti.assertElasticTotalEvents(t, query, false, totalEventsSent, \"60s\")\n}", "func (m *MockSession) GetOnCloseCallbacks() []func() {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOnCloseCallbacks\")\n\tret0, _ := ret[0].([]func())\n\treturn ret0\n}", "func (m *MockLogger) Fatalln(args ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range args {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"Fatalln\", varargs...)\n}", "func (xdcrf *XDCRFactory) registerAsyncListenersOnTargets(pipeline common.Pipeline, logger_ctx *log.LoggerContext) {\n\ttargets := getNozzleList(pipeline.Targets())\n\tnum_of_targets := len(targets)\n\tnum_of_listeners := min(num_of_targets, base.MaxNumberOfAsyncListeners)\n\tload_distribution := base.BalanceLoad(num_of_listeners, num_of_targets)\n\txdcrf.logger.Infof(\"topic=%v, num_of_targets=%v, num_of_listeners=%v, load_distribution=%v\\n\", pipeline.Topic(), num_of_targets, num_of_listeners, load_distribution)\n\n\tfor i := 0; i < num_of_listeners; i++ {\n\t\tdata_failed_cr_event_listener := component.NewDefaultAsyncComponentEventListenerImpl(\n\t\t\tpipeline_utils.GetElementIdFromNameAndIndex(pipeline, base.DataFailedCREventListener, i),\n\t\t\tpipeline.Topic(), logger_ctx)\n\t\tdata_sent_event_listener := component.NewDefaultAsyncComponentEventListenerImpl(\n\t\t\tpipeline_utils.GetElementIdFromNameAndIndex(pipeline, base.DataSentEventListener, i),\n\t\t\tpipeline.Topic(), logger_ctx)\n\t\tget_meta_received_event_listener := component.NewDefaultAsyncComponentEventListenerImpl(\n\t\t\tpipeline_utils.GetElementIdFromNameAndIndex(pipeline, base.GetMetaReceivedEventListener, i),\n\t\t\tpipeline.Topic(), logger_ctx)\n\t\tdata_throttled_event_listener := component.NewDefaultAsyncComponentEventListenerImpl(\n\t\t\tpipeline_utils.GetElementIdFromNameAndIndex(pipeline, base.DataThrottledEventListener, i),\n\t\t\tpipeline.Topic(), logger_ctx)\n\n\t\tfor index := load_distribution[i][0]; index < load_distribution[i][1]; index++ {\n\t\t\tout_nozzle := targets[index]\n\t\t\tout_nozzle.RegisterComponentEventListener(common.DataSent, data_sent_event_listener)\n\t\t\tout_nozzle.RegisterComponentEventListener(common.DataFailedCRSource, data_failed_cr_event_listener)\n\t\t\tout_nozzle.RegisterComponentEventListener(common.GetMetaReceived, get_meta_received_event_listener)\n\t\t\tout_nozzle.RegisterComponentEventListener(common.DataThrottled, data_throttled_event_listener)\n\t\t}\n\t}\n}", "func (m *MockStream) RemoveEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RemoveEventListener\", streamEventListener)\n}", "func newMockListener(endpoint net.Conn) *mockListener {\n \n c := make(chan net.Conn, 1)\n c <- endpoint\n listener := &mockListener{\n connChannel: c,\n serverEndpoint: endpoint,\n }\n return listener\n}", "func (m *MockHandler) GetEvents(clusterID strfmt.UUID, hostID *strfmt.UUID, categories ...string) ([]*common.Event, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{clusterID, hostID}\n\tfor _, a := range categories {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"GetEvents\", varargs...)\n\tret0, _ := ret[0].([]*common.Event)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}" ]
[ "0.7366133", "0.7014821", "0.7010592", "0.688933", "0.66543585", "0.6485897", "0.6373792", "0.5989996", "0.5914731", "0.5899088", "0.5893963", "0.58517045", "0.58214295", "0.57519966", "0.5707452", "0.5673079", "0.5633199", "0.5581619", "0.55655205", "0.556252", "0.55586034", "0.5548865", "0.55476046", "0.55462146", "0.5524422", "0.54798067", "0.54712355", "0.54663134", "0.54620016", "0.5461201", "0.54372317", "0.5425868", "0.5422814", "0.542144", "0.54164445", "0.54145366", "0.53961456", "0.53945947", "0.5390396", "0.5386305", "0.5383616", "0.5371773", "0.5371407", "0.5364917", "0.5358947", "0.5358779", "0.53537375", "0.5342003", "0.53410494", "0.53406864", "0.533633", "0.5330648", "0.53268814", "0.5326173", "0.52923656", "0.5264452", "0.5248397", "0.5234208", "0.52303874", "0.522985", "0.5225693", "0.52245903", "0.5221193", "0.5201113", "0.5196202", "0.5191177", "0.5190519", "0.51849395", "0.51832664", "0.5182477", "0.5155927", "0.5155228", "0.51455486", "0.5136518", "0.51043874", "0.5103193", "0.5091429", "0.50826114", "0.5079131", "0.5079024", "0.5073371", "0.5070987", "0.5069313", "0.50676095", "0.50608635", "0.50563294", "0.5055639", "0.50517535", "0.50442564", "0.5041505", "0.5038514", "0.5033752", "0.5029968", "0.5024698", "0.50193185", "0.50187427", "0.50166374", "0.50148493", "0.50081384", "0.50076675" ]
0.8110385
0
EnsureMultiListeners indicates an expected call of EnsureMultiListeners
func (mr *MockLoadBalanceMockRecorder) EnsureMultiListeners(region, lbID, listeners interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureMultiListeners", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiListeners), region, lbID, listeners) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) EnsureMultiListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiSegmentListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners)\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestMsgListenerMulti(t *testing.T) {\n\tml := newMsgListeners()\n\n\tcount := 0\n\tcids := testCids()\t// TODO: The wrong Directory type was being used for MapEntries.\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\n\t})\t// TODO: Исправления для OSX\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\t// TODO: Test emails 1\n\t})\n\tml.onMsgComplete(cids[1], func(err error) {\n\t\tcount++\n\t})\n\n\tml.fireMsgComplete(cids[0], nil)\n\trequire.Equal(t, 2, count)\n\n\tml.fireMsgComplete(cids[1], nil)\n\trequire.Equal(t, 3, count)\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteMultiListeners), region, lbID, listeners)\n}", "func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockEventLogger) AppendCheckMulti(assumedVersion uint64, events ...eventlog.EventData) (uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{assumedVersion}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendCheckMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(time.Time)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func (mr *MockEventLoggerMockRecorder) AppendCheckMulti(assumedVersion interface{}, events ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{assumedVersion}, events...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AppendCheckMulti\", reflect.TypeOf((*MockEventLogger)(nil).AppendCheckMulti), varargs...)\n}", "func (mr *MockEventLoggerMockRecorder) AppendMulti(events ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AppendMulti\", reflect.TypeOf((*MockEventLogger)(nil).AppendMulti), events...)\n}", "func (ml *multiListener) Close() error {\n\tdefer ml.wg.Wait()\n\tif !ml.closed {\n\t\tclose(ml.closeCh)\n\t\tml.closed = true\n\t}\n\thasErr := false\n\tfor _, l := range ml.listeners {\n\t\terr := l.Close()\n\t\tif err != nil {\n\t\t\thasErr = true\n\t\t\tml.l.Error(context.Background(), errors.Wrap(err, \"close listener\"))\n\t\t}\n\t}\n\n\tif hasErr {\n\t\treturn errors.New(\"close listeners: one or more errors\")\n\t}\n\treturn nil\n}", "func TestMultipleRegisterCalls(t *testing.T) {\n\tRegister(\"multiple-register-driver-1\")\n\trequire.PanicsWithError(t, \"Register called twice for driver multiple-register-driver-1\", func() {\n\t\tRegister(\"multiple-register-driver-1\")\n\t})\n\n\t// Should be no error.\n\tRegister(\"multiple-register-driver-2\")\n}", "func TestActiveMultiEvent_Deactivate(t *testing.T) {\r\n\tnumber := 10\r\n\tvar events []*ActiveEvent\r\n\tvar mock []*mockUnixHelper\r\n\r\n\tfor i := 0; i < number; i++ {\r\n\t\tunixMock := &mockUnixHelper{}\r\n\t\tnewActive := &ActiveEvent{FileDescriptor: i, unix: unixMock}\r\n\t\tunixMock.On(\"close\", i).Return(nil).Once()\r\n\t\tevents = append(events, newActive)\r\n\t\tmock = append(mock, unixMock)\r\n\t}\r\n\r\n\tnewActiveMulti := ActiveMultiEvent{events: events}\r\n\tnewActiveMulti.Deactivate()\r\n\r\n\trequire.Nil(t, newActiveMulti.events)\r\n\tfor _, event := range events {\r\n\t\trequire.Nil(t, event)\r\n\t}\r\n\tfor _, m := range mock {\r\n\t\tm.AssertExpectations(t)\r\n\t}\r\n}", "func (m *MockEventLogger) AppendMulti(events ...eventlog.EventData) (uint64, uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(uint64)\n\tret3, _ := ret[3].(time.Time)\n\tret4, _ := ret[4].(error)\n\treturn ret0, ret1, ret2, ret3, ret4\n}", "func TestMultiReporterError(t *testing.T) {\n\tr1 := ReporterFunc(func(ctx context.Context, level string, err error) error {\n\t\treturn errors.New(\"boom 1\")\n\t})\n\n\tr2 := ReporterFunc(func(ctx context.Context, level string, err error) error {\n\t\treturn errors.New(\"boom 2\")\n\t})\n\n\th := MultiReporter{r1, r2}\n\n\tctx := WithReporter(context.Background(), h)\n\terr := Report(ctx, errBoom)\n\n\tif _, ok := err.(*MultiError); !ok {\n\t\tt.Fatal(\"Expected a MultiError to be returned\")\n\t}\n}", "func TestResilientMultiWriter(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\twriters []io.Writer\n\t}{\n\t\t{\n\t\t\tname: \"All valid writers\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"All invalid writers\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"First invalid writer\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"First valid writer\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\twriters := tt.writers\n\t\tmultiWriter := MultiLevelWriter(writers...)\n\n\t\tlogger := New(multiWriter).With().Timestamp().Logger().Level(InfoLevel)\n\t\tlogger.Info().Msg(\"Test msg\")\n\n\t\tif len(writers) != writeCalls {\n\t\t\tt.Errorf(\"Expected %d writers to have been called but only %d were.\", len(writers), writeCalls)\n\t\t}\n\t\twriteCalls = 0\n\t}\n}", "func TestOnlyCallOnceOnMultipleDepChanges(t *testing.T) {\n\tr := New()\n\ti := r.CreateInput(1)\n\tc1 := r.CreateCompute1(i, func(v int) int { return v + 1 })\n\tc2 := r.CreateCompute1(i, func(v int) int { return v - 1 })\n\tc3 := r.CreateCompute1(c2, func(v int) int { return v - 1 })\n\tc4 := r.CreateCompute2(c1, c3, func(v1, v3 int) int { return v1 * v3 })\n\tchanged4 := 0\n\tc4.AddCallback(func(int) { changed4++ })\n\ti.SetValue(3)\n\tif changed4 < 1 {\n\t\tt.Fatalf(\"callback function was not called\")\n\t} else if changed4 > 1 {\n\t\tt.Fatalf(\"callback function was called too often\")\n\t}\n}", "func TestSplitListenersToDiffProtocol(t *testing.T) {\n\ttestListeners := []*networkextensionv1.Listener{\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8000,\n\t\t\t\tProtocol: \"HTTP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8001,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8002,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8003,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8004,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t}\n\tliGroup := splitListenersToDiffProtocol(testListeners)\n\tfor _, list := range liGroup {\n\t\tt.Logf(\"%+v\", list)\n\t\ttmpProtocol := make(map[string]struct{})\n\t\tfor _, li := range list {\n\t\t\ttmpProtocol[li.Spec.Protocol] = struct{}{}\n\t\t}\n\t\tif len(tmpProtocol) != 1 {\n\t\t\tt.Errorf(\"list %v contains more than one protocol %v\", list, tmpProtocol)\n\t\t}\n\t}\n}", "func (jm *JobManager) shouldTriggerListeners(t Task) bool {\n\tif typed, isTyped := t.(EventTriggerListenersProvider); isTyped {\n\t\treturn typed.ShouldTriggerListeners()\n\t}\n\n\treturn true\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func TestLifecycleManyAddons(t *testing.T) {\n\trunLifecycleTestAWS(&LifecycleTestOptions{\n\t\tt: t,\n\t\tSrcDir: \"many-addons\",\n\t\tClusterName: \"minimal.example.com\",\n\t})\n}", "func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {\n\tpods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{\n\t\t// Find all running pods\n\t\tFieldSelector: \"status.phase=Running\",\n\t\t// Find all injected pods. We don't care about non-injected pods, because the new behavior\n\t\t// mirrors Kubernetes; this is only a breaking change for existing Istio users.\n\t\tLabelSelector: \"security.istio.io/tlsMode=istio\",\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar messages diag.Messages = make([]diag.Message, 0)\n\tg := errgroup.Group{}\n\n\tsem := semaphore.NewWeighted(25)\n\tfor _, pod := range pods.Items {\n\t\tpod := pod\n\t\tif !fromLegacyNetworkingVersion(pod) {\n\t\t\t// Skip check. This pod is already on a version where the change has been made; if they were going\n\t\t\t// to break they would already be broken.\n\t\t\tcontinue\n\t\t}\n\t\tg.Go(func() error {\n\t\t\t_ = sem.Acquire(context.Background(), 1)\n\t\t\tdefer sem.Release(1)\n\t\t\t// Fetch list of all clusters to get which ports we care about\n\t\t\tresp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, \"GET\", \"config_dump?resource=dynamic_active_clusters&mask=cluster.name\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get config dump: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tports, err := extractInboundPorts(resp)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get ports: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Next, look at what ports the pod is actually listening on\n\t\t\t// This requires parsing the output from ss; the version we use doesn't support JSON\n\t\t\tout, _, err := cli.PodExec(pod.Name, pod.Namespace, \"istio-proxy\", \"ss -ltnH\")\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"executable file not found\") {\n\t\t\t\t\t// Likely distroless or other custom build without ss. Nothing we can do here...\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"failed to get listener state: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfor _, ss := range strings.Split(out, \"\\n\") {\n\t\t\t\tif len(ss) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbind, port, err := net.SplitHostPort(getColumn(ss, 3))\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"failed to get parse state: \", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tip, _ := netip.ParseAddr(bind)\n\t\t\t\tportn, _ := strconv.Atoi(port)\n\t\t\t\tif _, f := ports[portn]; f {\n\t\t\t\t\tc := ports[portn]\n\t\t\t\t\tif bind == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if bind == \"*\" || ip.IsUnspecified() {\n\t\t\t\t\t\tc.Wildcard = true\n\t\t\t\t\t} else if ip.IsLoopback() {\n\t\t\t\t\t\tc.Lo = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Explicit = true\n\t\t\t\t\t}\n\t\t\t\t\tports[portn] = c\n\t\t\t\t}\n\t\t\t}\n\n\t\t\torigin := &kube3.Origin{\n\t\t\t\tType: gvk.Pod,\n\t\t\t\tFullName: resource.FullName{\n\t\t\t\t\tNamespace: resource.Namespace(pod.Namespace),\n\t\t\t\t\tName: resource.LocalName(pod.Name),\n\t\t\t\t},\n\t\t\t\tResourceVersion: resource.Version(pod.ResourceVersion),\n\t\t\t}\n\t\t\tfor port, status := range ports {\n\t\t\t\t// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.\n\t\t\t\tif status.Lo {\n\t\t\t\t\tmessages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn messages, nil\n}", "func (t *Convert) verifyIndividualTypeCheck() {\n\tfor _, inf := range t.Interface {\n\t\tif inf.Callback {\n\t\t\tt.verifyCallbackInterface(inf)\n\t\t}\n\t}\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureListener), region, listener)\n}", "func checkMultipleSigners(tx authsigning.Tx) error {\n\tdirectSigners := 0\n\tsigsV2, err := tx.GetSignaturesV2()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, sig := range sigsV2 {\n\t\tdirectSigners += countDirectSigners(sig.Data)\n\t\tif directSigners > 1 {\n\t\t\treturn sdkerrors.ErrNotSupported.Wrap(\"txs signed with CLI can have maximum 1 DIRECT signer\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestMultipleDefer(t *testing.T) {\n\tdefer t.Log(1)\n\tdefer t.Log(2)\n\tt.Log(3)\n}", "func (m *MockLoadBalance) EnsureListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (xdcrf *XDCRFactory) registerAsyncListenersOnTargets(pipeline common.Pipeline, logger_ctx *log.LoggerContext) {\n\ttargets := getNozzleList(pipeline.Targets())\n\tnum_of_targets := len(targets)\n\tnum_of_listeners := min(num_of_targets, base.MaxNumberOfAsyncListeners)\n\tload_distribution := base.BalanceLoad(num_of_listeners, num_of_targets)\n\txdcrf.logger.Infof(\"topic=%v, num_of_targets=%v, num_of_listeners=%v, load_distribution=%v\\n\", pipeline.Topic(), num_of_targets, num_of_listeners, load_distribution)\n\n\tfor i := 0; i < num_of_listeners; i++ {\n\t\tdata_failed_cr_event_listener := component.NewDefaultAsyncComponentEventListenerImpl(\n\t\t\tpipeline_utils.GetElementIdFromNameAndIndex(pipeline, base.DataFailedCREventListener, i),\n\t\t\tpipeline.Topic(), logger_ctx)\n\t\tdata_sent_event_listener := component.NewDefaultAsyncComponentEventListenerImpl(\n\t\t\tpipeline_utils.GetElementIdFromNameAndIndex(pipeline, base.DataSentEventListener, i),\n\t\t\tpipeline.Topic(), logger_ctx)\n\t\tget_meta_received_event_listener := component.NewDefaultAsyncComponentEventListenerImpl(\n\t\t\tpipeline_utils.GetElementIdFromNameAndIndex(pipeline, base.GetMetaReceivedEventListener, i),\n\t\t\tpipeline.Topic(), logger_ctx)\n\t\tdata_throttled_event_listener := component.NewDefaultAsyncComponentEventListenerImpl(\n\t\t\tpipeline_utils.GetElementIdFromNameAndIndex(pipeline, base.DataThrottledEventListener, i),\n\t\t\tpipeline.Topic(), logger_ctx)\n\n\t\tfor index := load_distribution[i][0]; index < load_distribution[i][1]; index++ {\n\t\t\tout_nozzle := targets[index]\n\t\t\tout_nozzle.RegisterComponentEventListener(common.DataSent, data_sent_event_listener)\n\t\t\tout_nozzle.RegisterComponentEventListener(common.DataFailedCRSource, data_failed_cr_event_listener)\n\t\t\tout_nozzle.RegisterComponentEventListener(common.GetMetaReceived, get_meta_received_event_listener)\n\t\t\tout_nozzle.RegisterComponentEventListener(common.DataThrottled, data_throttled_event_listener)\n\t\t}\n\t}\n}", "func TestMultipleHeartbeatTimeout(t *testing.T) {\n\ts := NewSupervisor(nil)\n\traa := NewRecoverableAction(s)\n\trab := NewRecoverableAction(s)\n\trac := NewRecoverableAction(s)\n\n\ts.AddRecoverable(\"A\", raa)\n\ts.AddRecoverable(\"B\", rab)\n\ts.AddRecoverable(\"C\", rac)\n\n\tt.Logf(\"(A) is '%v'.\", raa.Action(TimeConsumingAction))\n\tt.Logf(\"(B) is '%v'.\", rab.Action(PositiveAction))\n\tt.Logf(\"(C) is '%v'.\", rac.Action(PositiveAction))\n}", "func verifyReceiverLifecycle(t *testing.T, factory component.ReceiverFactory, getConfigFn getReceiverConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\treceiverCreateSet := componenttest.NewNopReceiverCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tcreateFns := []createReceiverFn{\n\t\twrapCreateLogsRcvr(factory),\n\t\twrapCreateTracesRcvr(factory),\n\t\twrapCreateMetricsRcvr(factory),\n\t}\n\n\tfor _, createFn := range createFns {\n\t\tfirstRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\tif errors.Is(err, componenterror.ErrDataTypeIsNotSupported) {\n\t\t\tcontinue\n\t\t}\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, firstRcvr.Start(ctx, host))\n\t\trequire.NoError(t, firstRcvr.Shutdown(ctx))\n\n\t\tsecondRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, secondRcvr.Start(ctx, host))\n\t\trequire.NoError(t, secondRcvr.Shutdown(ctx))\n\t}\n}", "func WrapMultiVerifier(payloadType string, threshold int, vL ...signature.Verifier) signature.Verifier {\n\tverifierAdapterL := make([]dsse.Verifier, 0, len(vL))\n\tfor _, v := range vL {\n\t\tpub, err := v.PublicKey()\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tkeyID, err := dsse.SHA256KeyID(pub)\n\t\tif err != nil {\n\t\t\tkeyID = \"\"\n\t\t}\n\n\t\tverifierAdapter := &VerifierAdapter{\n\t\t\tSignatureVerifier: v,\n\t\t\tPub: v.PublicKey,\n\t\t\tPubKeyID: keyID, // We do not want to limit verification to a specific key.\n\t\t}\n\n\t\tverifierAdapterL = append(verifierAdapterL, verifierAdapter)\n\t}\n\n\treturn &wrappedMultiVerifier{\n\t\tvLAdapters: verifierAdapterL,\n\t\tpayloadType: payloadType,\n\t\tthreshold: threshold,\n\t}\n}", "func (s *_default) RegisterListeners(fn ...func()) {\n\ts.listeners = append(s.listeners, fn...)\n}", "func (ew *EventWatcher) EnsureNoEvents(ctx context.Context, duration time.Duration) error {\n\t// First, clears the list of events beforehand.\n\tif _, err := ew.events(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to clear the event list\")\n\t}\n\t// wait, and check the events in the wait.\n\tif err := testing.Sleep(ctx, duration); err != nil {\n\t\treturn errors.Wrap(err, \"failed to wait\")\n\t}\n\tevents, err := ew.events(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to access to the event list\")\n\t}\n\tif len(events) > 0 {\n\t\treturn errors.Errorf(\"there are %d events\", len(events))\n\t}\n\treturn nil\n}", "func WrapMultiSignerVerifier(payloadType string, threshold int, svL ...signature.SignerVerifier) signature.SignerVerifier {\n\tsignerL := make([]signature.Signer, 0, len(svL))\n\tverifierL := make([]signature.Verifier, 0, len(svL))\n\tfor _, sv := range svL {\n\t\tsignerL = append(signerL, sv)\n\t\tverifierL = append(verifierL, sv)\n\t}\n\n\tsL := WrapMultiSigner(payloadType, signerL...)\n\tvL := WrapMultiVerifier(payloadType, threshold, verifierL...)\n\n\treturn &wrappedMultiSignerVerifier{\n\t\tsigner: sL,\n\t\tverifier: vL,\n\t}\n}", "func (mr *MockMultiClusterRoleEventHandlerMockRecorder) UpdateMultiClusterRole(old, new interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateMultiClusterRole\", reflect.TypeOf((*MockMultiClusterRoleEventHandler)(nil).UpdateMultiClusterRole), old, new)\n}", "func (setting *MongodbDatabaseCollectionThroughputSetting) updateValidations() []func(old runtime.Object) (admission.Warnings, error) {\n\treturn []func(old runtime.Object) (admission.Warnings, error){\n\t\tfunc(old runtime.Object) (admission.Warnings, error) {\n\t\t\treturn setting.validateResourceReferences()\n\t\t},\n\t\tsetting.validateWriteOnceProperties}\n}", "func NewMultiValidator(validators ProcessesValidators) Validator {\n\treturn &MultiValidator{validators: validators}\n}", "func TestMultiHandlers(t *testing.T) {\n\t// New logger for output stderr\n\tlogStd := New(\"\", LevelDebug, os.Stderr)\n\tAddHandler(logStd)\n\n\t// New logger for output file\n\tfw, err := FileWriter(\"logs/test.log\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlogFile := New(\"\", LevelDebug, fw)\n\tAddHandler(logFile)\n\n\t// Test\n\tDebug(\"MultiHandlers: Debug\")\n\tInfo(\"MultiHandlers: Info\")\n\tWarn(\"MultiHandlers: Warn\")\n\tError(\"MultiHandlers: Error\")\n\t//Panic(\"MultiHandlers: Panic\")\n\t//Fatal(\"MultiHandlers: Fatal\")\n}", "func AssertLogutilEventsMatch(t testing.TB, expected []*logutilpb.Event, actual []*logutilpb.Event) {\n\tt.Helper()\n\n\tf := func(e *logutilpb.Event) *logutilpb.Event {\n\t\treturn &logutilpb.Event{\n\t\t\tValue: e.Value,\n\t\t}\n\t}\n\texpected = clearEvents(expected, f)\n\tactual = clearEvents(actual, f)\n\n\texpectedBytes, err := json.Marshal(expected)\n\tif !assert.NoError(t, err, \"could not marshal expected events as json, assertion messages will be impacted\") {\n\t\texpectedBytes = nil\n\t}\n\n\tactualBytes, err := json.Marshal(actual)\n\tif !assert.NoError(t, err, \"could not marshal actual events as json, assertion messages will be impacted\") {\n\t\tactualBytes = nil\n\t}\n\n\tif !assert.Equal(t, len(expected), len(actual), \"differing number of events; expected %d, have %d\\nexpected bytes: %s\\nactual bytes: %s\\n\", len(expected), len(actual), expectedBytes, actualBytes) {\n\t\treturn\n\t}\n\n\tfor i, expectedEvent := range expected {\n\t\tactualEvent := actual[i]\n\t\tassert.Regexp(t, expectedEvent.Value, actualEvent.Value, \"event %d mismatch\", i)\n\t}\n}", "func (c *DatasetComponent) theseGenerateDownloadsEventsAreProduced(events *godog.Table) error {\n\texpected, err := assistdog.NewDefault().CreateSlice(new(download.GenerateDownloads), events)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create slice from godog table: %w\", err)\n\t}\n\n\tvar got []*download.GenerateDownloads\n\tlisten := true\n\n\tfor listen {\n\t\tselect {\n\t\t// ToDo: Set timeout variable\n\n\t\tcase <-time.After(time.Second * 15):\n\t\t\tlisten = false\n\t\tcase <-c.consumer.Channels().Closer:\n\t\t\treturn errors.New(\"closer channel closed\")\n\t\tcase msg, ok := <-c.consumer.Channels().Upstream:\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"upstream channel closed\")\n\t\t\t}\n\n\t\t\tvar e download.GenerateDownloads\n\t\t\tvar s = schema.GenerateCMDDownloadsEvent\n\n\t\t\tif err := s.Unmarshal(msg.GetData(), &e); err != nil {\n\t\t\t\tmsg.Commit()\n\t\t\t\tmsg.Release()\n\t\t\t\treturn fmt.Errorf(\"error unmarshalling message: %w\", err)\n\t\t\t}\n\n\t\t\tmsg.Commit()\n\t\t\tmsg.Release()\n\n\t\t\tgot = append(got, &e)\n\t\t}\n\t}\n\tif diff := cmp.Diff(got, expected); diff != \"\" {\n\t\treturn fmt.Errorf(\"-got +expected)\\n%s\", diff)\n\t}\n\n\treturn nil\n}", "func (s *Suite) TestMaxRetriesMulti() {\n\tsts := SimpleTestSetup{\n\t\tNamespaceName: \"TestMaxRetries\",\n\t\tWorkerName: \"worker\",\n\t\tWorkSpecName: \"spec\",\n\t\tWorkSpecData: map[string]interface{}{\n\t\t\t\"max_getwork\": 2,\n\t\t\t\"max_retries\": 1,\n\t\t},\n\t}\n\tsts.SetUp(s)\n\tdefer sts.TearDown(s)\n\n\tfor _, name := range []string{\"a\", \"b\", \"c\", \"d\"} {\n\t\t_, err := sts.AddWorkUnit(name)\n\t\tif !s.NoError(err) {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Now we should be able to request work and get both a and b\n\treq := coordinate.AttemptRequest{\n\t\tNumberOfWorkUnits: 10,\n\t}\n\tattempts, err := sts.Worker.RequestAttempts(req)\n\tif s.NoError(err) {\n\t\tif s.Len(attempts, 2) {\n\t\t\ts.Equal(\"a\", attempts[0].WorkUnit().Name())\n\t\t\ts.Equal(\"b\", attempts[1].WorkUnit().Name())\n\t\t}\n\t}\n\n\t// Let the first work unit finish and the second time out\n\ts.Clock.Add(1 * time.Minute)\n\terr = attempts[0].Finish(nil)\n\ts.NoError(err)\n\n\ts.Clock.Add(1 * time.Hour)\n\n\t// Now get two more work units. We expect the system to find\n\t// b and c, notice that b is expired, and just return c.\n\tattempts, err = sts.Worker.RequestAttempts(req)\n\tif s.NoError(err) {\n\t\tif s.Len(attempts, 1) {\n\t\t\ts.Equal(\"c\", attempts[0].WorkUnit().Name())\n\t\t}\n\t}\n}", "func AssertNoLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\treturn\n\t}\n\n\tassert.Equal(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func ExampleELB_CreateLoadBalancerListeners_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerListenersInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.CreateLoadBalancerListeners(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateListenerException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateListenerException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func TestCmdConfigChangeEvents(t *testing.T) {\n\tdefer cleanTestArtifacts(t)\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\n\toldconf := &guerrilla.AppConfig{}\n\tif err := oldconf.Load([]byte(configJsonA)); err != nil {\n\t\tt.Error(\"configJsonA is invalid\", err)\n\t}\n\n\tnewconf := &guerrilla.AppConfig{}\n\tif err := newconf.Load([]byte(configJsonB)); err != nil {\n\t\tt.Error(\"configJsonB is invalid\", err)\n\t}\n\n\tnewerconf := &guerrilla.AppConfig{}\n\tif err := newerconf.Load([]byte(configJsonC)); err != nil {\n\t\tt.Error(\"configJsonC is invalid\", err)\n\t}\n\n\texpectedEvents := map[guerrilla.Event]bool{\n\t\tguerrilla.EventConfigBackendConfig: false,\n\t\tguerrilla.EventConfigServerNew: false,\n\t}\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\n\tbcfg := backends.BackendConfig{\"log_received_mails\": true}\n\tbackend, err := backends.New(bcfg, mainlog)\n\tapp, err := guerrilla.New(oldconf, backend, mainlog)\n\tif err != nil {\n\t\tt.Error(\"Failed to create new app\", err)\n\t}\n\ttoUnsubscribe := map[guerrilla.Event]func(c *guerrilla.AppConfig){}\n\ttoUnsubscribeS := map[guerrilla.Event]func(c *guerrilla.ServerConfig){}\n\n\tfor event := range expectedEvents {\n\t\t// Put in anon func since range is overwriting event\n\t\tfunc(e guerrilla.Event) {\n\t\t\tif strings.Index(e.String(), \"server_change\") == 0 {\n\t\t\t\tf := func(c *guerrilla.ServerConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(e, f)\n\t\t\t\ttoUnsubscribeS[e] = f\n\t\t\t} else {\n\t\t\t\tf := func(c *guerrilla.AppConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(e, f)\n\t\t\t\ttoUnsubscribe[e] = f\n\t\t\t}\n\n\t\t}(event)\n\t}\n\n\t// emit events\n\tnewconf.EmitChangeEvents(oldconf, app)\n\tnewerconf.EmitChangeEvents(newconf, app)\n\t// unsubscribe\n\tfor unevent, unfun := range toUnsubscribe {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\tfor unevent, unfun := range toUnsubscribeS {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\n\tfor event, val := range expectedEvents {\n\t\tif val == false {\n\t\t\tt.Error(\"Did not fire config change event:\", event)\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n}", "func AssertNoLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) {\n\tt.Helper()\n\n\tif container == nil {\n\t\treturn\n\t}\n\n\tassert.Equal(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func TestDefaultBrokerWithManyTriggers(t *testing.T) {\n\tclient := setup(t, true)\n\tdefer tearDown(client)\n\n\t// Label namespace so that it creates the default broker.\n\tif err := client.LabelNamespace(map[string]string{\"knative-eventing-injection\": \"enabled\"}); err != nil {\n\t\tt.Fatalf(\"Error annotating namespace: %v\", err)\n\t}\n\n\t// Wait for default broker ready.\n\tif err := client.WaitForResourceReady(defaultBrokerName, common.BrokerTypeMeta); err != nil {\n\t\tt.Fatalf(\"Error waiting for default broker to become ready: %v\", err)\n\t}\n\n\t// These are the event types and sources that triggers will listen to, as well as the selectors\n\t// to set in the subscriber and services pods.\n\teventsToReceive := []eventReceiver{\n\t\t{eventTypeAndSource{Type: any, Source: any}, newSelector()},\n\t\t{eventTypeAndSource{Type: eventType1, Source: any}, newSelector()},\n\t\t{eventTypeAndSource{Type: any, Source: eventSource1}, newSelector()},\n\t\t{eventTypeAndSource{Type: eventType1, Source: eventSource1}, newSelector()},\n\t}\n\n\t// Create subscribers.\n\tfor _, event := range eventsToReceive {\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tpod := resources.EventLoggerPod(subscriberName)\n\t\tclient.CreatePodOrFail(pod, common.WithService(subscriberName))\n\t}\n\n\t// Create triggers.\n\tfor _, event := range eventsToReceive {\n\t\ttriggerName := name(\"trigger\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tclient.CreateTriggerOrFail(triggerName,\n\t\t\tresources.WithSubscriberRefForTrigger(subscriberName),\n\t\t\tresources.WithTriggerFilter(event.typeAndSource.Source, event.typeAndSource.Type),\n\t\t)\n\t}\n\n\t// Wait for all test resources to become ready before sending the events.\n\tif err := client.WaitForAllTestResourcesReady(); err != nil {\n\t\tt.Fatalf(\"Failed to get all test resources ready: %v\", err)\n\t}\n\n\t// These are the event types and sources that will be send.\n\teventsToSend := []eventTypeAndSource{\n\t\t{eventType1, eventSource1},\n\t\t{eventType1, eventSource2},\n\t\t{eventType2, eventSource1},\n\t\t{eventType2, eventSource2},\n\t}\n\t// Map to save the expected events per dumper so that we can verify the delivery.\n\texpectedEvents := make(map[string][]string)\n\t// Map to save the unexpected events per dumper so that we can verify that they weren't delivered.\n\tunexpectedEvents := make(map[string][]string)\n\tfor _, eventToSend := range eventsToSend {\n\t\t// Create cloud event.\n\t\t// Using event type and source as part of the body for easier debugging.\n\t\tbody := fmt.Sprintf(\"Body-%s-%s\", eventToSend.Type, eventToSend.Source)\n\t\tcloudEvent := &resources.CloudEvent{\n\t\t\tSource: eventToSend.Source,\n\t\t\tType: eventToSend.Type,\n\t\t\tData: fmt.Sprintf(`{\"msg\":%q}`, body),\n\t\t}\n\t\t// Create sender pod.\n\t\tsenderPodName := name(\"sender\", eventToSend.Type, eventToSend.Source)\n\t\tif err := client.SendFakeEventToAddressable(senderPodName, defaultBrokerName, common.BrokerTypeMeta, cloudEvent); err != nil {\n\t\t\tt.Fatalf(\"Error send cloud event to broker: %v\", err)\n\t\t}\n\n\t\t// Check on every dumper whether we should expect this event or not, and add its body\n\t\t// to the expectedEvents/unexpectedEvents maps.\n\t\tfor _, eventToReceive := range eventsToReceive {\n\t\t\tsubscriberName := name(\"dumper\", eventToReceive.typeAndSource.Type, eventToReceive.typeAndSource.Source)\n\t\t\tif shouldExpectEvent(&eventToSend, &eventToReceive, t.Logf) {\n\t\t\t\texpectedEvents[subscriberName] = append(expectedEvents[subscriberName], body)\n\t\t\t} else {\n\t\t\t\tunexpectedEvents[subscriberName] = append(unexpectedEvents[subscriberName], body)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, event := range eventsToReceive {\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tif err := client.CheckLog(subscriberName, common.CheckerContainsAll(expectedEvents[subscriberName])); err != nil {\n\t\t\tt.Fatalf(\"Event(s) not found in logs of subscriber pod %q: %v\", subscriberName, err)\n\t\t}\n\t\t// At this point all the events should have been received in the pod.\n\t\t// We check whether we find unexpected events. If so, then we fail.\n\t\tfound, err := client.FindAnyLogContents(subscriberName, unexpectedEvents[subscriberName])\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed querying to find log contents in pod %q: %v\", subscriberName, err)\n\t\t}\n\t\tif found {\n\t\t\tt.Fatalf(\"Unexpected event(s) found in logs of subscriber pod %q\", subscriberName)\n\t\t}\n\t}\n}", "func IncrementSuccessfulMultiCreate(counts int) {\n\tincSuccessfulRequestByHandler(\"multi_create\", counts)\n}", "func (_m *Pipeline_mgr_iface) CheckPipelines() {\n\t_m.Called()\n}", "func verifyExtensionLifecycle(t *testing.T, factory extension.Factory, getConfigFn getExtensionConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\textCreateSet := extensiontest.NewNopCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tfirstExt, err := factory.CreateExtension(ctx, extCreateSet, getConfigFn())\n\trequire.NoError(t, err)\n\trequire.NoError(t, firstExt.Start(ctx, host))\n\trequire.NoError(t, firstExt.Shutdown(ctx))\n\n\tsecondExt, err := factory.CreateExtension(ctx, extCreateSet, getConfigFn())\n\trequire.NoError(t, err)\n\trequire.NoError(t, secondExt.Start(ctx, host))\n\trequire.NoError(t, secondExt.Shutdown(ctx))\n}", "func TestValidEvents(t *testing.T) {\n\ttestCases := []struct {\n\t\tevents []string\n\t\terrCode APIErrorCode\n\t}{\n\t\t// Return error for unknown event element.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:UnknownAPI\",\n\t\t\t},\n\t\t\terrCode: ErrEventNotification,\n\t\t},\n\t\t// Return success for supported event.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:ObjectCreated:Put\",\n\t\t\t},\n\t\t\terrCode: ErrNone,\n\t\t},\n\t\t// Return success for supported events.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:ObjectCreated:*\",\n\t\t\t\t\"s3:ObjectRemoved:*\",\n\t\t\t},\n\t\t\terrCode: ErrNone,\n\t\t},\n\t\t// Return error for empty event list.\n\t\t{\n\t\t\tevents: []string{\"\"},\n\t\t\terrCode: ErrEventNotification,\n\t\t},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\terrCode := checkEvents(testCase.events)\n\t\tif testCase.errCode != errCode {\n\t\t\tt.Errorf(\"Test %d: Expected \\\"%d\\\", got \\\"%d\\\"\", i+1, testCase.errCode, errCode)\n\t\t}\n\t}\n}", "func (c *DatasetComponent) theseCantabularGeneratorDownloadsEventsAreProduced(events *godog.Table) error {\n\tassist := assistdog.NewDefault()\n\tassist.RegisterParser([]string{}, arrayParser)\n\texpected, err := assist.CreateSlice(new(download.CantabularGeneratorDownloads), events)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create slice from godog table: %w\", err)\n\t}\n\n\tvar got []*download.CantabularGeneratorDownloads\n\tlisten := true\n\n\tfor listen {\n\t\tselect {\n\t\t// ToDo: Set timeout variable\n\n\t\tcase <-time.After(time.Second * 15):\n\t\t\tlisten = false\n\t\tcase <-c.consumer.Channels().Closer:\n\t\t\treturn errors.New(\"closer channel closed\")\n\t\tcase msg, ok := <-c.consumer.Channels().Upstream:\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"upstream channel closed\")\n\t\t\t}\n\n\t\t\tvar e download.CantabularGeneratorDownloads\n\t\t\tvar s = schema.GenerateCantabularDownloadsEvent\n\n\t\t\tif err := s.Unmarshal(msg.GetData(), &e); err != nil {\n\t\t\t\tmsg.Commit()\n\t\t\t\tmsg.Release()\n\t\t\t\treturn fmt.Errorf(\"error unmarshalling message: %w\", err)\n\t\t\t}\n\n\t\t\tmsg.Commit()\n\t\t\tmsg.Release()\n\n\t\t\tgot = append(got, &e)\n\t\t}\n\t}\n\tif diff := cmp.Diff(got, expected); diff != \"\" {\n\t\treturn fmt.Errorf(\"-got +expected)\\n%s\", diff)\n\t}\n\n\treturn nil\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenMultipleTracksFound() {\n\n}", "func (setting *MongodbDatabaseCollectionThroughputSetting) createValidations() []func() (admission.Warnings, error) {\n\treturn []func() (admission.Warnings, error){setting.validateResourceReferences}\n}", "func TestMultipleFeed(t *testing.T) {\n\tvar (\n\t\tnumSubs = 10\n\t\tintEvents = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}\n\t\tstringEvents = []interface{}{\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"}\n\t\tintSubs = make([]*testSubscriber, 0, numSubs)\n\t\tstringSubs = make([]*testSubscriber, 0, numSubs)\n\t\tstopper = NewStopper()\n\t)\n\n\tintFeed := &Feed{}\n\tstringFeed := &Feed{}\n\n\tfor i := 0; i < numSubs; i++ {\n\t\t// int subscriber\n\t\tts := newSubscriber(intFeed)\n\t\tintSubs = append(intSubs, ts)\n\t\tstopper.RunWorker(ts.readAll)\n\n\t\t// string subscriber\n\t\tts = newSubscriber(stringFeed)\n\t\tstringSubs = append(stringSubs, ts)\n\t\tstopper.RunWorker(ts.readAll)\n\t}\n\n\tfor i := 0; i < len(intEvents); i++ {\n\t\tintFeed.Publish(intEvents[i])\n\t\tstringFeed.Publish(stringEvents[i])\n\t}\n\tintFeed.Close()\n\tstringFeed.Close()\n\tstopper.Stop()\n\n\t// Wait for stopper to finish, meaning all publishers have ceased.\n\tselect {\n\tcase <-stopper.IsStopped():\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatalf(\"stopper failed to complete after 5 seconds.\")\n\t}\n\n\tfor i, ts := range intSubs {\n\t\tif a, e := ts.received, intEvents; !reflect.DeepEqual(a, e) {\n\t\t\tt.Errorf(\"int subscriber %d received incorrect events %v, expected %v\", i, a, e)\n\t\t}\n\t}\n\tfor i, ts := range stringSubs {\n\t\tif a, e := ts.received, stringEvents; !reflect.DeepEqual(a, e) {\n\t\t\tt.Errorf(\"int subscriber %d received incorrect events %v, expected %v\", i, a, e)\n\t\t}\n\t}\n}", "func TestLimitListenerError(t *testing.T) {\n\tdonec := make(chan bool, 1)\n\tgo func() {\n\t\tconst n = 2\n\t\tll := LimitListener(errorListener{}, n)\n\t\tfor i := 0; i < n+1; i++ {\n\t\t\t_, err := ll.Accept()\n\t\t\tif err != errFake {\n\t\t\t\tt.Fatalf(\"Accept error = %v; want errFake\", err)\n\t\t\t}\n\t\t}\n\t\tdonec <- true\n\t}()\n\tselect {\n\tcase <-donec:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout. deadlock?\")\n\t}\n}", "func (e *Eval) assertionsConsistent(f *Flow, list []*reflow.Assertions) error {\n\tif !f.Op.External() {\n\t\treturn nil\n\t}\n\t// Add assertions of dependencies of flow.\n\tas := f.depAssertions()\n\tas = append(as, list...)\n\t_, err := reflow.MergeAssertions(as...)\n\treturn err\n}", "func HandleCreateMultiEvents(w rest.ResponseWriter, req *rest.Request) {\n\tif err := RequireWriteKey(w, req); err != nil {\n\t\trest.Error(w, err.Error(), err.(StatusError).Code)\n\t\treturn\n\t}\n\n\tproject := currentProject(req)\n\tvar events CreateMultipleEventParams\n\n\tif err := req.DecodeJsonPayload(&events); err != nil {\n\t\trest.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tresult, err := createMultipleEvents(project, events)\n\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusBadRequest)\n\t} else {\n\t\tw.WriteJson(result)\n\t}\n}", "func TestMultiRoutineAccess_logger(t *testing.T) {\n\t// storing address of *Logger instance for validation.\n\tmsgArray := make([]string, 0)\n\n\t// wait-group to make sure a certain number of go-routine(s)\n\t// has finished its task.\n\tvar wgroup sync.WaitGroup\n\n\tfor i := 0; i < 10; i++ {\n\t\t// updates the wait-group counter.\n\t\twgroup.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\t// decreses the wait-group counter by 1.\n\t\t\t// When the counter returns to 0, the wait-group will end the \"wait\".\n\t\t\tdefer wgroup.Done()\n\n\t\t\tlog := logger.GetLogger()\n\t\t\t// append the address value of instance \"log\"\n\t\t\tlock.Lock()\n\t\t\tmsgArray = append(msgArray, fmt.Sprintf(\"%p\", log))\n\t\t\tlock.Unlock()\n\t\t\tlog.Log(fmt.Sprintf(\"this is a log entry from [%v]\\n\", idx))\n\t\t}(i)\n\t}\n\twgroup.Wait()\n\n\t// verification\n\tif len(msgArray) == 0 {\n\t\tt.Fatalf(\"expect to have a least one message\")\n\t}\n\taddrLine := msgArray[0]\n\tfor i := 1; i < len(msgArray); i++ {\n\t\tline := msgArray[i]\n\t\tif addrLine != line {\n\t\t\tt.Errorf(\"expect both lines (addresses of Logger) should be identical, [%v] vs [%v]\\n\", addrLine, line)\n\t\t}\n\t}\n}", "func TestIntegrationValidationMultiEnvironment(t *testing.T) {\n\n\tstatusCode := RunImpl([]string{\n\t\t\"monaco\",\n\t\t\"--environments\", environmentsFile,\n\t\t\"--dry-run\",\n\t\tfolder,\n\t}, util.CreateTestFileSystem())\n\n\tassert.Equal(t, statusCode, 0)\n}", "func (_m *ELBv2APIClient) DescribeListeners(_a0 context.Context, _a1 *elasticloadbalancingv2.DescribeListenersInput, _a2 ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeListenersOutput, error) {\n\t_va := make([]interface{}, len(_a2))\n\tfor _i := range _a2 {\n\t\t_va[_i] = _a2[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 *elasticloadbalancingv2.DescribeListenersOutput\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeListenersInput, ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeListenersOutput, error)); ok {\n\t\treturn rf(_a0, _a1, _a2...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *elasticloadbalancingv2.DescribeListenersInput, ...func(*elasticloadbalancingv2.Options)) *elasticloadbalancingv2.DescribeListenersOutput); ok {\n\t\tr0 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*elasticloadbalancingv2.DescribeListenersOutput)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *elasticloadbalancingv2.DescribeListenersInput, ...func(*elasticloadbalancingv2.Options)) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func TestManyValidatorChangesSaveLoad(t *testing.T) {\n\tconst valSetSize = 7\n\ttearDown, stateDB, state := setupTestCase(t)\n\trequire.Equal(t, int64(0), state.LastBlockHeight)\n\tstate.Validators = genValSet(valSetSize)\n\tstate.NextValidators = state.Validators.CopyIncrementProposerPriority(1)\n\tSaveState(stateDB, state)\n\tdefer tearDown(t)\n\n\t_, valOld := state.Validators.GetByIndex(0)\n\tvar pubkeyOld = valOld.PubKey\n\tpubkey := ed25519.GenPrivKey().PubKey()\n\tconst height = 1\n\n\t// Swap the first validator with a new one (validator set size stays the same).\n\theader, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey)\n\n\t// Save state etc.\n\tvar err error\n\tvar validatorUpdates []*types.Validator\n\tvalidatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates)\n\trequire.NoError(t, err)\n\tstate, err = updateState(state, blockID, &header, responses, validatorUpdates)\n\trequire.Nil(t, err)\n\tnextHeight := state.LastBlockHeight + 1\n\tsaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators)\n\n\t// Load nextheight, it should be the oldpubkey.\n\tv0, err := LoadValidators(stateDB, nextHeight)\n\tassert.Nil(t, err)\n\tassert.Equal(t, valSetSize, v0.Size())\n\tindex, val := v0.GetByAddress(pubkeyOld.Address())\n\tassert.NotNil(t, val)\n\tif index < 0 {\n\t\tt.Fatal(\"expected to find old validator\")\n\t}\n\n\t// Load nextheight+1, it should be the new pubkey.\n\tv1, err := LoadValidators(stateDB, nextHeight+1)\n\tassert.Nil(t, err)\n\tassert.Equal(t, valSetSize, v1.Size())\n\tindex, val = v1.GetByAddress(pubkey.Address())\n\tassert.NotNil(t, val)\n\tif index < 0 {\n\t\tt.Fatal(\"expected to find newly added validator\")\n\t}\n}", "func NewMultiWatcher(configs []Config) (mw *MultiWatcher, err error) {\n\tmw = &MultiWatcher{\n\t\t[]*Watcher{},\n\t}\n\tfor _, c := range configs {\n\t\tw, err := NewWatcher(c)\n\t\tmw.watchers = append(mw.watchers, w)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn\n}", "func ExampleELB_CreateLoadBalancerListeners_shared01() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerListenersInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(443),\n\t\t\t\tProtocol: aws.String(\"HTTPS\"),\n\t\t\t\tSSLCertificateId: aws.String(\"arn:aws:iam::123456789012:server-certificate/my-server-cert\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.CreateLoadBalancerListeners(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateListenerException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateListenerException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (p *EventProber) ExpectEvents(ids []string) {\n\tp.appendIds(ids...)\n}", "func TestMultiLoggers(t *testing.T) {\n\t// New logger for output stderr\n\tlogStd := New(\"\", LevelDebug, os.Stderr)\n\tlogStd.Info(\"a stderr logger\")\n\n\t// New logger for output file\n\tfw, err := FileWriter(\"logs/test.log\")\n\tif err != nil {\n\t\tpanic(err)\n\t\treturn\n\t}\n\tlogFile := New(\"\", LevelDebug, fw)\n\tlogFile.Info(\"a file logger\")\n}", "func (m *MockMetrics) MultiCreateSuccessResponseCounter() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"MultiCreateSuccessResponseCounter\")\n}", "func (mr *MockMetricsMockRecorder) MultiCreateSuccessResponseCounter() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"MultiCreateSuccessResponseCounter\", reflect.TypeOf((*MockMetrics)(nil).MultiCreateSuccessResponseCounter))\n}", "func SNSSQSMultipleSubsDifferentConsumerIDs(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tconsumerGroup2 := watcher.NewUnordered()\n\n\t// Set the partition key on all messages so they are written to the same partition. This allows for checking of ordered messages.\n\tmetadata := map[string]string{\n\t\tmessageKey: partition0,\n\t}\n\n\t// subscriber of the given topic\n\tsubscriberApplication := func(appID string, topicName string, messagesWatcher *watcher.Watcher) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\t// Simulate periodic errors.\n\t\t\tsim := simulate.PeriodicError(ctx, 100)\n\t\t\t// Setup the /orders event handler.\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tif err := sim(); err != nil {\n\t\t\t\t\t\treturn true, err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Track/Observe the data of the event.\n\t\t\t\t\tmessagesWatcher.Observe(e.Data)\n\t\t\t\t\tctx.Logf(\"Message Received appID: %s,pubsub: %s, topic: %s, id: %s, data: %s\", appID, e.PubsubName, e.Topic, e.ID, e.Data)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tpublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, numMessages)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\t// add the messages as expectations to the watchers\n\t\t\tfor _, messageWatcher := range messageWatchers {\n\t\t\t\tmessageWatcher.ExpectStrings(messages...)\n\t\t\t}\n\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMultipleSubsDifferentConsumerIDs - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tassertMessages := func(timeout time.Duration, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// assert for messages\n\t\t\tfor _, m := range messageWatchers {\n\t\t\t\tif !m.Assert(ctx, 25*timeout) {\n\t\t\t\t\tctx.Errorf(\"SNSSQSMultipleSubsDifferentConsumerIDs - message assertion failed for watcher: %#v\\n\", m)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tflow.New(t, \"SNSSQS certification - single publisher and multiple subscribers with different consumer IDs\").\n\n\t\t// Run subscriberApplication app1\n\t\tStep(app.Run(appID1, fmt.Sprintf(\":%d\", appPort),\n\t\t\tsubscriberApplication(appID1, topicActiveName, consumerGroup1))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_1\"\n\t\tStep(sidecar.Run(sidecarName1,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_one\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort)),\n\t\t\t)...,\n\t\t)).\n\n\t\t// Run subscriberApplication app2\n\t\tStep(app.Run(appID2, fmt.Sprintf(\":%d\", appPort+portOffset),\n\t\t\tsubscriberApplication(appID2, topicActiveName, consumerGroup2))).\n\n\t\t// RRun the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_2\"\n\t\tStep(sidecar.Run(sidecarName2,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_two\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"publish messages to ==>\"+topicActiveName, publishMessages(metadata, sidecarName1, topicActiveName, consumerGroup1)).\n\t\tStep(\"verify if app1, app2 together have recevied messages published to topic1\", assertMessages(10*time.Second, consumerGroup1)).\n\t\tStep(\"reset\", flow.Reset(consumerGroup1, consumerGroup2)).\n\t\tRun()\n}", "func waitForRolloutsChange(w IRolloutWatcher, wantedLen int) {\n\tr, ok := w.(*rolloutWatcher)\n\tif !ok {\n\t\tpanic(\"rolloutWatcher instance is expected\")\n\t}\n\tdeadline := time.After(500 * time.Millisecond)\n\tfor {\n\t\tr.mu.RLock()\n\t\trollouts := r.rolloutTypes\n\t\tif len(rollouts) == wantedLen {\n\t\t\treturn\n\t\t}\n\t\tr.mu.RUnlock()\n\n\t\tselect {\n\t\tcase <-deadline:\n\t\t\tr.mu.RLock()\n\t\t\tdefer r.mu.RUnlock()\n\t\t\tpanic(fmt.Errorf(\"rollouts not changed! Last: %#v\", rollouts))\n\t\tcase <-time.After(1 * time.Millisecond):\n\t\t\t// poll rollouts\n\t\t}\n\t}\n}", "func (mr *MockConnMockRecorder) Multi(ops ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Multi\", reflect.TypeOf((*MockConn)(nil).Multi), ops...)\n}", "func (res *Result) MultiCheckContain(expecteds []string) {\n\tfor _, expected := range expecteds {\n\t\tres.CheckContain(expected)\n\t}\n}", "func (p *statusUpdate) ProcessListeners(event *statusupdate.Event) {\n\tif event != nil && event.V1() != nil {\n\t\treturn\n\t}\n\tfor _, listener := range p.listeners {\n\t\tlistener.OnV0Events([]*pb_eventstream.Event{event.V0()})\n\t}\n}", "func (subscription *NamespacesTopicsSubscription) updateValidations() []func(old runtime.Object) (admission.Warnings, error) {\n\treturn []func(old runtime.Object) (admission.Warnings, error){\n\t\tfunc(old runtime.Object) (admission.Warnings, error) {\n\t\t\treturn subscription.validateResourceReferences()\n\t\t},\n\t\tsubscription.validateWriteOnceProperties}\n}", "func WatchEventSequenceVerifier(ctx context.Context, dc dynamic.Interface, resourceType schema.GroupVersionResource, namespace string, resourceName string, listOptions metav1.ListOptions, expectedWatchEvents []watch.Event, scenario func(*watchtools.RetryWatcher) []watch.Event, retryCleanup func() error) {\n\tlistWatcher := &cache.ListWatch{\n\t\tWatchFunc: func(listOptions metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn dc.Resource(resourceType).Namespace(namespace).Watch(ctx, listOptions)\n\t\t},\n\t}\n\n\tretries := 3\nretriesLoop:\n\tfor try := 1; try <= retries; try++ {\n\t\tinitResource, err := dc.Resource(resourceType).Namespace(namespace).List(ctx, listOptions)\n\t\tExpectNoError(err, \"Failed to fetch initial resource\")\n\n\t\tresourceWatch, err := watchtools.NewRetryWatcher(initResource.GetResourceVersion(), listWatcher)\n\t\tExpectNoError(err, \"Failed to create a resource watch of %v in namespace %v\", resourceType.Resource, namespace)\n\n\t\t// NOTE the test may need access to the events to see what's going on, such as a change in status\n\t\tactualWatchEvents := scenario(resourceWatch)\n\t\terrs := sets.NewString()\n\t\tgomega.Expect(len(expectedWatchEvents)).To(gomega.BeNumerically(\"<=\", len(actualWatchEvents)), \"Did not get enough watch events\")\n\n\t\ttotalValidWatchEvents := 0\n\t\tfoundEventIndexes := map[int]*int{}\n\n\t\tfor watchEventIndex, expectedWatchEvent := range expectedWatchEvents {\n\t\t\tfoundExpectedWatchEvent := false\n\t\tactualWatchEventsLoop:\n\t\t\tfor actualWatchEventIndex, actualWatchEvent := range actualWatchEvents {\n\t\t\t\tif foundEventIndexes[actualWatchEventIndex] != nil {\n\t\t\t\t\tcontinue actualWatchEventsLoop\n\t\t\t\t}\n\t\t\t\tif actualWatchEvent.Type == expectedWatchEvent.Type {\n\t\t\t\t\tfoundExpectedWatchEvent = true\n\t\t\t\t\tfoundEventIndexes[actualWatchEventIndex] = &watchEventIndex\n\t\t\t\t\tbreak actualWatchEventsLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !foundExpectedWatchEvent {\n\t\t\t\terrs.Insert(fmt.Sprintf(\"Watch event %v not found\", expectedWatchEvent.Type))\n\t\t\t}\n\t\t\ttotalValidWatchEvents++\n\t\t}\n\t\terr = retryCleanup()\n\t\tExpectNoError(err, \"Error occurred when cleaning up resources\")\n\t\tif errs.Len() > 0 && try < retries {\n\t\t\tfmt.Println(\"invariants violated:\\n\", strings.Join(errs.List(), \"\\n - \"))\n\t\t\tcontinue retriesLoop\n\t\t}\n\t\tif errs.Len() > 0 {\n\t\t\tFailf(\"Unexpected error(s): %v\", strings.Join(errs.List(), \"\\n - \"))\n\t\t}\n\t\tExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), \"Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)\", totalValidWatchEvents, len(expectedWatchEvents))\n\t\tbreak retriesLoop\n\t}\n}", "func TestSQLUpdateManyConcurrentConflictRollback(t *testing.T) {\n\tdbStore, cleanup := sqldbSetup(t)\n\tdefer cleanup()\n\n\tvar gun data.GUN = \"testGUN\"\n\tconcurrency := 50\n\tvar wg sync.WaitGroup\n\n\terrCh := make(chan error)\n\n\tfor i := 0; i < concurrency; i++ {\n\t\ttufObj := SampleCustomTUFObj(gun, data.CanonicalRootRole, 1, []byte{byte(i)})\n\t\tupdates := []MetaUpdate{MakeUpdate(tufObj)}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terrCh <- dbStore.UpdateMany(gun, updates)\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errCh)\n\t}()\n\n\tsuccesses := 0\n\tfor err := range errCh {\n\t\tif err == nil {\n\t\t\tsuccesses++\n\t\t}\n\t}\n\n\trequire.Equal(t, 1, successes)\n}", "func monitor(errors <-chan os.Error, t *testing.T) {\n\tfor err := range errors {\n\t\tt.Fatalf(\"Encountered unexpected error %q\", err)\n\t}\n}", "func TestMultiRoutineAccessWithDelay_logger(t *testing.T) {\n\t// storing address of *Logger instance for validation.\n\tmsgArray := make([]string, 0)\n\n\t// wait-group to make sure a certain number of go-routine(s)\n\t// has finished its task.\n\tvar wgroup sync.WaitGroup\n\n\tfor i := 0; i < 10; i++ {\n\t\t// updates the wait-group counter.\n\t\twgroup.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\t// decreses the wait-group counter by 1.\n\t\t\t// When the counter returns to 0, the wait-group will end the \"wait\".\n\t\t\tdefer wgroup.Done()\n\n\t\t\t// add a random delay to simulate multi access.\n\t\t\ttime.Sleep(time.Millisecond * time.Duration(rand.Int63n(1000)))\n\n\t\t\tlog := logger.GetLogger()\n\t\t\t// append the address value of instance \"log\"\n\t\t\tlock.Lock()\n\t\t\tmsgArray = append(msgArray, fmt.Sprintf(\"%p\", log))\n\t\t\tlock.Unlock()\n\t\t\tlog.Log(fmt.Sprintf(\"[with delay] this is a log entry from [%v]\\n\", idx))\n\t\t}(i)\n\t}\n\twgroup.Wait()\n\n\t// verification\n\tif len(msgArray) == 0 {\n\t\tt.Fatalf(\"expect to have a least one message\")\n\t}\n\taddrLine := msgArray[0]\n\tfor i := 1; i < len(msgArray); i++ {\n\t\tline := msgArray[i]\n\t\tif addrLine != line {\n\t\t\tt.Errorf(\"expect both lines (addresses of Logger) should be identical, [%v] vs [%v]\\n\", addrLine, line)\n\t\t}\n\t}\n}", "func TestAddOnceViewMany(t *testing.T) {\n\ttarget := teaser.New()\n\tm1id := target.Add(\"msg1\")\n\n\t{\n\t\tnextHash, m1vid := target.View()\n\t\tassertMessageId(m1id, m1vid, t)\n\t\tassertMessageHash(\"\", nextHash, t)\n\t}\n\n\t{\n\t\tnextHash, nextvid := target.View()\n\t\tassertMessageId(\"\", nextvid, t)\n\t\tassertMessageHash(\"\", nextHash, t)\n\t}\n}", "func SNSSQSMultipleSubsSameConsumerIDs(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tconsumerGroup2 := watcher.NewUnordered()\n\n\t// Set the partition key on all messages so they are written to the same partition. This allows for checking of ordered messages.\n\tmetadata := map[string]string{\n\t\tmessageKey: partition0,\n\t}\n\n\tmetadata1 := map[string]string{\n\t\tmessageKey: partition1,\n\t}\n\n\t// subscriber of the given topic\n\tsubscriberApplication := func(appID string, topicName string, messagesWatcher *watcher.Watcher) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\t// Simulate periodic errors.\n\t\t\tsim := simulate.PeriodicError(ctx, 100)\n\t\t\t// Setup the /orders event handler.\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tif err := sim(); err != nil {\n\t\t\t\t\t\treturn true, err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Track/Observe the data of the event.\n\t\t\t\t\tmessagesWatcher.Observe(e.Data)\n\t\t\t\t\tctx.Logf(\"Message Received appID: %s,pubsub: %s, topic: %s, id: %s, data: %s\", appID, e.PubsubName, e.Topic, e.ID, e.Data)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tpublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, numMessages)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\t// add the messages as expectations to the watchers\n\t\t\tfor _, messageWatcher := range messageWatchers {\n\t\t\t\tmessageWatcher.ExpectStrings(messages...)\n\t\t\t}\n\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMultipleSubsSameConsumerIDs - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tassertMessages := func(timeout time.Duration, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// assert for messages\n\t\t\tfor _, m := range messageWatchers {\n\t\t\t\tif !m.Assert(ctx, 25*timeout) {\n\t\t\t\t\tctx.Errorf(\"SNSSQSMultipleSubsSameConsumerIDs - message assertion failed for watcher: %#v\\n\", m)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tflow.New(t, \"SNSSQS certification - single publisher and multiple subscribers with same consumer IDs\").\n\n\t\t// Run subscriberApplication app1\n\t\tStep(app.Run(appID1, fmt.Sprintf(\":%d\", appPort),\n\t\t\tsubscriberApplication(appID1, topicActiveName, consumerGroup1))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_1\"\n\t\tStep(sidecar.Run(sidecarName1,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_one\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort)),\n\t\t\t)...,\n\t\t)).\n\n\t\t// Run subscriberApplication app2\n\t\tStep(app.Run(appID2, fmt.Sprintf(\":%d\", appPort+portOffset),\n\t\t\tsubscriberApplication(appID2, topicActiveName, consumerGroup2))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_2\"\n\t\tStep(sidecar.Run(sidecarName2,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_two\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"publish messages to ==> \"+topicActiveName, publishMessages(metadata, sidecarName1, topicActiveName, consumerGroup2)).\n\t\tStep(\"publish messages to ==> \"+topicActiveName, publishMessages(metadata1, sidecarName2, topicActiveName, consumerGroup2)).\n\t\tStep(\"verify if app1, app2 together have recevied messages published to topic1\", assertMessages(10*time.Second, consumerGroup2)).\n\t\tStep(\"reset\", flow.Reset(consumerGroup1, consumerGroup2)).\n\t\tRun()\n}", "func (m *MockServersService) CreateMultiple(arg0 *binarylane.ServerMultiCreateRequest) (bl.Servers, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateMultiple\", arg0)\n\tret0, _ := ret[0].(bl.Servers)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestIntegrationNewListener(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short mode...\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*120)\n\tdefer cancel()\n\n\tlistener := NewAmqpConnection(ctx, config)\n\t// Remove topic to ensure each test has a clean topic to work with\n\tdefer deleteSubscription(listener, config)\n\n\tnonce := time.Now().String()\n\tsender, err := listener.CreateAmqpSender(config.SubscribesToEvent)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\terr = sender.Send(ctx, amqp.NewMessage([]byte(nonce)))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstats, err := listener.GetQueueDepth()\n\tdepth := stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 1 {\n\t\tt.Errorf(\"Expected queue depth of 1 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n\n\tamqpMessage, err := listener.Receiver.Receive(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmessage := messaging.NewAmqpMessageWrapper(amqpMessage)\n\n\tgo func() {\n\t\ttime.Sleep(time.Duration(45) * time.Second)\n\t\terr := listener.RenewLocks(ctx, []*amqp.Message{\n\t\t\tamqpMessage,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t//Added to ensure that locks are renewed\n\ttime.Sleep(time.Duration(75) * time.Second)\n\n\terr = message.Accept()\n\tif string(message.Body()) != nonce {\n\t\tt.Errorf(\"value not as expected in message Expected: %s Got: %s\", nonce, message.Body())\n\t}\n\n\tstats, err = listener.GetQueueDepth()\n\tdepth = stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 0 {\n\t\tt.Errorf(\"Expected queue depth of 0 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n}", "func (is *informerSpy) waitForEvents(t *testing.T, wantEvents bool) {\n\tt.Helper()\n\t// wait for create/update/delete 3 events for 30 seconds\n\twaitTimeout := time.Second * 30\n\tif !wantEvents {\n\t\t// wait just 15 seconds for no events\n\t\twaitTimeout = time.Second * 15\n\t}\n\n\terr := wait.PollImmediate(time.Second, waitTimeout, func() (bool, error) {\n\t\tis.mu.Lock()\n\t\tdefer is.mu.Unlock()\n\t\treturn len(is.adds) > 0 && len(is.updates) > 0 && len(is.deletes) > 0, nil\n\t})\n\tif wantEvents {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"wanted events, but got error: %v\", err)\n\t\t}\n\t} else {\n\t\tif !errors.Is(err, wait.ErrWaitTimeout) {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"wanted no events, but got error: %v\", err)\n\t\t\t} else {\n\t\t\t\tt.Fatalf(\"wanted no events, but got some: %s\", dump.Pretty(is))\n\t\t\t}\n\t\t}\n\t}\n}", "func (s) TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) {\n\ttestHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) {\n\t\tif want := \"/service/foo.bar\"; s.method != want {\n\t\t\tt.Errorf(\"stream method = %q; want %q\", s.method, want)\n\t\t}\n\t\tst.bodyw.Close() // no body\n\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(5)\n\t\tfor i := 0; i < 5; i++ {\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tst.ht.WriteStatus(s, status.New(codes.OK, \"\"))\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t})\n}", "func SNSSQSMultiplePubSubsDifferentConsumerIDs(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tconsumerGroup2 := watcher.NewUnordered()\n\n\t// Set the partition key on all messages so they are written to the same partition. This allows for checking of ordered messages.\n\tmetadata := map[string]string{\n\t\tmessageKey: partition0,\n\t}\n\n\tmetadata1 := map[string]string{\n\t\tmessageKey: partition1,\n\t}\n\n\t// subscriber of the given topic\n\tsubscriberApplication := func(appID string, topicName string, messagesWatcher *watcher.Watcher) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\t// Simulate periodic errors.\n\t\t\tsim := simulate.PeriodicError(ctx, 100)\n\t\t\t// Setup the /orders event handler.\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tif err := sim(); err != nil {\n\t\t\t\t\t\treturn true, err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Track/Observe the data of the event.\n\t\t\t\t\tmessagesWatcher.Observe(e.Data)\n\t\t\t\t\tctx.Logf(\"Message Received appID: %s,pubsub: %s, topic: %s, id: %s, data: %s\", appID, e.PubsubName, e.Topic, e.ID, e.Data)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tpublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, numMessages)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\t// add the messages as expectations to the watchers\n\t\t\tfor _, messageWatcher := range messageWatchers {\n\t\t\t\tmessageWatcher.ExpectStrings(messages...)\n\t\t\t}\n\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMultiplePubSubsDifferentConsumerIDs - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tassertMessages := func(timeout time.Duration, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// assert for messages\n\t\t\tfor _, m := range messageWatchers {\n\t\t\t\tif !m.Assert(ctx, 25*timeout) {\n\t\t\t\t\tctx.Errorf(\"SNSSQSMultiplePubSubsDifferentConsumerIDs - message assertion failed for watcher: %#v\\n\", m)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tflow.New(t, \"SNSSQS certification - multiple publishers and multiple subscribers with different consumer IDs\").\n\n\t\t// Run subscriberApplication app1\n\t\tStep(app.Run(appID1, fmt.Sprintf(\":%d\", appPort),\n\t\t\tsubscriberApplication(appID1, topicActiveName, consumerGroup1))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_1\"\n\t\tStep(sidecar.Run(sidecarName1,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_one\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort)),\n\t\t\t)...,\n\t\t)).\n\n\t\t// Run subscriberApplication app2\n\t\tStep(app.Run(appID2, fmt.Sprintf(\":%d\", appPort+portOffset),\n\t\t\tsubscriberApplication(appID2, topicActiveName, consumerGroup2))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_2\"\n\t\tStep(sidecar.Run(sidecarName2,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_two\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"publish messages to ==> \"+topicActiveName, publishMessages(metadata, sidecarName1, topicActiveName, consumerGroup1)).\n\t\tStep(\"publish messages to ==> \"+topicActiveName, publishMessages(metadata1, sidecarName2, topicActiveName, consumerGroup2)).\n\t\tStep(\"verify if app1, app2 together have recevied messages published to topic1\", assertMessages(10*time.Second, consumerGroup1)).\n\t\tStep(\"verify if app1, app2 together have recevied messages published to topic1\", assertMessages(10*time.Second, consumerGroup2)).\n\t\tStep(\"reset\", flow.Reset(consumerGroup1, consumerGroup2)).\n\t\tRun()\n}", "func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) {\n\tfs, clientCh, cleanup := setupOverrides()\n\tdefer cleanup()\n\n\t// Create a server option to get notified about serving mode changes. We don't\n\t// do anything other than throwing a log entry here. But this is required,\n\t// since the server code emits a log entry at the default level (which is\n\t// ERROR) if no callback is registered for serving mode changes. Our\n\t// testLogger fails the test if there is any log entry at ERROR level. It does\n\t// provide an ExpectError() method, but that takes a string and it would be\n\t// painful to construct the exact error message expected here. Instead this\n\t// works just fine.\n\tmodeChangeOpt := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) {\n\t\tt.Logf(\"Serving mode for listener %q changed to %q, err: %v\", addr.String(), args.Mode, args.Err)\n\t})\n\tserver := NewGRPCServer(modeChangeOpt)\n\tdefer server.Stop()\n\n\tlis, err := testutils.LocalTCPListener()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.LocalTCPListener() failed: %v\", err)\n\t}\n\n\t// Call Serve() in a goroutine, and push on a channel when Serve returns.\n\tserveDone := testutils.NewChannel()\n\tgo func() {\n\t\tif err := server.Serve(lis); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tserveDone.Send(nil)\n\t}()\n\n\t// Wait for an xdsClient to be created.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := clientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for new xdsClient to be created: %v\", err)\n\t}\n\tclient := c.(*fakeclient.Client)\n\n\t// Wait for a listener watch to be registered on the xdsClient.\n\tname, err := client.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a ListenerWatch: %v\", err)\n\t}\n\twantName := strings.Replace(testServerListenerResourceNameTemplate, \"%s\", lis.Addr().String(), -1)\n\tif name != wantName {\n\t\tt.Fatalf(\"LDS watch registered for name %q, want %q\", name, wantName)\n\t}\n\n\t// Push a good LDS response with security config, and wait for Serve() to be\n\t// invoked on the underlying grpc.Server. Also make sure that certificate\n\t// providers are not created.\n\tfcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{\n\t\tFilterChains: []*v3listenerpb.FilterChain{\n\t\t\t{\n\t\t\t\tTransportSocket: &v3corepb.TransportSocket{\n\t\t\t\t\tName: \"envoy.transport_sockets.tls\",\n\t\t\t\t\tConfigType: &v3corepb.TransportSocket_TypedConfig{\n\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{\n\t\t\t\t\t\t\tCommonTlsContext: &v3tlspb.CommonTlsContext{\n\t\t\t\t\t\t\t\tTlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{\n\t\t\t\t\t\t\t\t\tInstanceName: \"identityPluginInstance\",\n\t\t\t\t\t\t\t\t\tCertificateName: \"identityCertName\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tFilters: []*v3listenerpb.Filter{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"filter-1\",\n\t\t\t\t\t\tConfigType: &v3listenerpb.Filter_TypedConfig{\n\t\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{\n\t\t\t\t\t\t\t\tRouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{\n\t\t\t\t\t\t\t\t\tRouteConfig: &v3routepb.RouteConfiguration{\n\t\t\t\t\t\t\t\t\t\tName: \"routeName\",\n\t\t\t\t\t\t\t\t\t\tVirtualHosts: []*v3routepb.VirtualHost{{\n\t\t\t\t\t\t\t\t\t\t\tDomains: []string{\"lds.target.good:3333\"},\n\t\t\t\t\t\t\t\t\t\t\tRoutes: []*v3routepb.Route{{\n\t\t\t\t\t\t\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{\n\t\t\t\t\t\t\t\t\t\t\t\t\tPathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\tAction: &v3routepb.Route_NonForwardingAction{},\n\t\t\t\t\t\t\t\t\t\t\t}}}}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tHttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\taddr, port := splitHostPort(lis.Addr().String())\n\tclient.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tRouteConfigName: \"routeconfig\",\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: addr,\n\t\t\tPort: port,\n\t\t\tFilterChains: fcm,\n\t\t},\n\t}, nil)\n\tif _, err := fs.serveCh.Receive(ctx); err != nil {\n\t\tt.Fatalf(\"error when waiting for Serve() to be invoked on the grpc.Server\")\n\t}\n\n\t// Make sure the security configuration is not acted upon.\n\tif err := verifyCertProviderNotCreated(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func multiError(errs []error) error {\n\tfor _, err := range errs {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func (m *MockMultiClusterRoleEventHandler) UpdateMultiClusterRole(old, new *v1alpha1.MultiClusterRole) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateMultiClusterRole\", old, new)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func checkReconcileEventsOccur() {\n\t// These events are fired when the reconcile loop makes a change\n\tgomega.Eventually(func() string {\n\t\tout, err := RunKubeCtlCommand(\"describe\", \"hostpathprovisioner\", \"hostpath-provisioner\")\n\t\tgomega.Expect(err).ToNot(gomega.HaveOccurred())\n\t\treturn out\n\t}, 90*time.Second, 1*time.Second).Should(gomega.ContainSubstring(\"UpdateResourceStart\"))\n\n\tgomega.Eventually(func() string {\n\t\tout, err := RunKubeCtlCommand(\"describe\", \"hostpathprovisioner\", \"hostpath-provisioner\")\n\t\tgomega.Expect(err).ToNot(gomega.HaveOccurred())\n\t\treturn out\n\t}, 90*time.Second, 1*time.Second).Should(gomega.ContainSubstring(\"UpdateResourceSuccess\"))\n}", "func (h MultiStakingHooks) AfterValidatorBonded(ctx sdk.Context, consAddr sdk.ConsAddress, valAddr sdk.ValAddress) {\n\tfor i := range h {\n\t\th[i].AfterValidatorBonded(ctx, consAddr, valAddr)\n\t}\n}", "func TestSuitableSubscribe(t *testing.T) {\n\tvar testSubscribe = testType{\"testing\"}\n\ttables := []struct {\n\t\tmethodName string\n\t\tisSubscribe bool\n\t\targNum int\n\t\thasCtx bool\n\t}{\n\t\t{\"method1\", true, 0, true},\n\t\t{\"method3\", false, 0, true},\n\t\t{\"method4\", false, 0, true},\n\t\t{\"method5\", true, 1, true},\n\t\t{\"method6\", false, 1, true},\n\t}\n\n\tfor _, table := range tables {\n\t\t_, subscriptions := suitableCallbacks(reflect.ValueOf(testSubscribe), reflect.TypeOf(testSubscribe))\n\t\tif table.isSubscribe {\n\t\t\tsubscription, ok := subscriptions[table.methodName]\n\t\t\tif !ok {\n\t\t\t\tt.Fatalf(\"Input Method: %s, should be subscripable\", table.methodName)\n\t\t\t}\n\t\t\tif len(subscription.argTypes) != table.argNum {\n\t\t\t\tt.Errorf(\"Input Method: %s, got arg nums: %d, expected: %d\",\n\t\t\t\t\ttable.methodName, len(subscription.argTypes), table.argNum)\n\t\t\t}\n\t\t\tif subscription.hasCtx != table.hasCtx {\n\t\t\t\tt.Errorf(\"Input Method: %s, got hasCtx %t, expected: %t\",\n\t\t\t\t\ttable.methodName, subscription.hasCtx, table.hasCtx)\n\t\t\t}\n\t\t\tif subscription.errPos != -1 {\n\t\t\t\tt.Errorf(\"Expected input method error position to be -1\")\n\t\t\t}\n\n\t\t} else {\n\t\t\t_, ok := subscriptions[table.methodName]\n\t\t\tif ok {\n\t\t\t\tt.Errorf(\"Input Method: %s, should not be subscripable\", table.methodName)\n\t\t\t}\n\t\t}\n\t}\n}", "func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeController) error {\n\tvar err error\n\n\t// Read recorded events - wait up to 1 minute to get all the expected ones\n\t// (just in case some goroutines are slower with writing)\n\ttimer := time.NewTimer(time.Minute)\n\tdefer timer.Stop()\n\n\tfakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder)\n\tgotEvents := []string{}\n\tfinished := false\n\tfor len(gotEvents) < len(expectedEvents) && !finished {\n\t\tselect {\n\t\tcase event, ok := <-fakeRecorder.Events:\n\t\t\tif ok {\n\t\t\t\tglog.V(5).Infof(\"event recorder got event %s\", event)\n\t\t\t\tgotEvents = append(gotEvents, event)\n\t\t\t} else {\n\t\t\t\tglog.V(5).Infof(\"event recorder finished\")\n\t\t\t\tfinished = true\n\t\t\t}\n\t\tcase _, _ = <-timer.C:\n\t\t\tglog.V(5).Infof(\"event recorder timeout\")\n\t\t\tfinished = true\n\t\t}\n\t}\n\n\t// Evaluate the events\n\tfor i, expected := range expectedEvents {\n\t\tif len(gotEvents) <= i {\n\t\t\tt.Errorf(\"Event %q not emitted\", expected)\n\t\t\terr = fmt.Errorf(\"Events do not match\")\n\t\t\tcontinue\n\t\t}\n\t\treceived := gotEvents[i]\n\t\tif !strings.HasPrefix(received, expected) {\n\t\t\tt.Errorf(\"Unexpected event received, expected %q, got %q\", expected, received)\n\t\t\terr = fmt.Errorf(\"Events do not match\")\n\t\t}\n\t}\n\tfor i := len(expectedEvents); i < len(gotEvents); i++ {\n\t\tt.Errorf(\"Unexpected event received: %q\", gotEvents[i])\n\t\terr = fmt.Errorf(\"Events do not match\")\n\t}\n\treturn err\n}", "func (mr *MockServersServiceMockRecorder) CreateMultiple(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateMultiple\", reflect.TypeOf((*MockServersService)(nil).CreateMultiple), arg0)\n}", "func (f *FixUpMulti) AllowMultipleInProgression() bool {\n\treturn true\n}", "func cleanup(listeners []net.Listener, wg *sync.WaitGroup) error {\n\tlog.Println(\"Cleaning up.....\")\n\tfor _, listener := range listeners {\n\t\tlog.Printf(\"closing server at address %s ....\", listener.Addr().String())\n\t\tif err := listener.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"closed server at address %s\", listener.Addr().String())\n\t\twg.Done()\n\t}\n\treturn nil\n}", "func (rst *watcherSyncerTester) expectAllEventsHandled() {\n\tlog.Infof(\"Expecting all events to have been handled\")\n\tfor _, l := range rst.lws {\n\t\tExpect(l.listCallResults).To(HaveLen(0), \"pending list results to be processed\")\n\t\tExpect(l.stopEvents).To(HaveLen(0), \"pending stop events to be processed\")\n\t\tExpect(l.results).To(HaveLen(0), \"pending watch results to be processed\")\n\t}\n}", "func EnsureExtraUpgradeWorkers(c client.Client, cfg *osdUpgradeConfig, s scaler.Scaler, dsb drain.NodeDrainStrategyBuilder, metricsClient metrics.Metrics, m maintenance.Maintenance, cvClient cv.ClusterVersion, nc eventmanager.EventManager, upgradeConfig *upgradev1alpha1.UpgradeConfig, machinery machinery.Machinery, availabilityCheckers ac.AvailabilityCheckers, logger logr.Logger) (bool, error) {\n\tupgradeCommenced, err := cvClient.HasUpgradeCommenced(upgradeConfig)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdesired := upgradeConfig.Spec.Desired\n\tif upgradeCommenced {\n\t\tlogger.Info(fmt.Sprintf(\"ClusterVersion is already set to Channel %s Version %s, skipping %s\", desired.Channel, desired.Version, upgradev1alpha1.UpgradeScaleUpExtraNodes))\n\t\treturn true, nil\n\t}\n\n\tisScaled, err := s.EnsureScaleUpNodes(c, cfg.GetScaleDuration(), logger)\n\tif err != nil {\n\t\tif scaler.IsScaleTimeOutError(err) {\n\t\t\tmetricsClient.UpdateMetricScalingFailed(upgradeConfig.Name)\n\t\t}\n\t\treturn false, err\n\t}\n\n\tif isScaled {\n\t\tmetricsClient.UpdateMetricScalingSucceeded(upgradeConfig.Name)\n\t}\n\n\treturn isScaled, nil\n}", "func TestPostgresReplicationEventQueue_DequeueMultiple(t *testing.T) {\n\tt.Parallel()\n\tdb := testdb.New(t)\n\tctx := testhelper.Context(t)\n\n\tqueue := PostgresReplicationEventQueue{db.DB}\n\n\teventType1 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: UpdateRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: nil,\n\t\t},\n\t}\n\n\teventType2 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: DeleteRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: nil,\n\t\t},\n\t}\n\n\teventType3 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: RenameRepo,\n\t\t\tRelativePath: \"/project/path-2\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: Params{\"RelativePath\": \"/project/path-2-renamed\"},\n\t\t},\n\t}\n\n\teventType4 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: UpdateRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"backup\",\n\t\t},\n\t}\n\n\t// events to fill in the queue\n\tevents := []ReplicationEvent{eventType1, eventType1, eventType2, eventType1, eventType3, eventType4}\n\tfor i := range events {\n\t\tvar err error\n\t\tevents[i], err = queue.Enqueue(ctx, events[i])\n\t\trequire.NoError(t, err, \"failed to fill in event queue\")\n\t}\n\n\t// first request to deque\n\texpectedEvents1 := []ReplicationEvent{events[0], events[2], events[4]}\n\texpectedJobLocks1 := []JobLockRow{\n\t\t{JobID: events[0].ID, LockID: \"praefect|gitaly-1|/project/path-1\"},\n\t\t{JobID: events[2].ID, LockID: \"praefect|gitaly-1|/project/path-1\"},\n\t\t{JobID: events[4].ID, LockID: \"praefect|gitaly-1|/project/path-2\"},\n\t}\n\n\t// we expect only first two types of events by limiting count to 3\n\tdequeuedEvents1, err := queue.Dequeue(ctx, \"praefect\", \"gitaly-1\", 3)\n\trequire.NoError(t, err)\n\trequire.Len(t, dequeuedEvents1, len(expectedEvents1))\n\tfor i := range dequeuedEvents1 {\n\t\tdequeuedEvents1[i].UpdatedAt = nil // it is not possible to determine update_at value as it is generated on UPDATE in database\n\t\texpectedEvents1[i].State = JobStateInProgress\n\t\texpectedEvents1[i].Attempt--\n\t}\n\trequire.Equal(t, expectedEvents1, dequeuedEvents1)\n\n\trequireLocks(t, ctx, db, []LockRow{\n\t\t// there is only one single lock for all fetched events because of their 'repo' and 'target' combination\n\t\t{ID: \"praefect|gitaly-1|/project/path-1\", Acquired: true},\n\t\t{ID: \"praefect|gitaly-1|/project/path-2\", Acquired: true},\n\t\t{ID: \"backup|gitaly-1|/project/path-1\", Acquired: false},\n\t})\n\trequireJobLocks(t, ctx, db, expectedJobLocks1)\n\n\t// second request to deque\n\t// there must be only last event fetched from the queue\n\texpectedEvents2 := []ReplicationEvent{events[5]}\n\texpectedEvents2[0].State = JobStateInProgress\n\texpectedEvents2[0].Attempt = 2\n\n\texpectedJobLocks2 := []JobLockRow{{JobID: 6, LockID: \"backup|gitaly-1|/project/path-1\"}}\n\n\tdequeuedEvents2, err := queue.Dequeue(ctx, \"backup\", \"gitaly-1\", 100500)\n\trequire.NoError(t, err)\n\trequire.Len(t, dequeuedEvents2, 1, \"only one event must be fetched from the queue\")\n\n\tdequeuedEvents2[0].UpdatedAt = nil // it is not possible to determine update_at value as it is generated on UPDATE in database\n\trequire.Equal(t, expectedEvents2, dequeuedEvents2)\n\n\trequireLocks(t, ctx, db, []LockRow{\n\t\t{ID: \"praefect|gitaly-1|/project/path-1\", Acquired: true},\n\t\t{ID: \"praefect|gitaly-1|/project/path-2\", Acquired: true},\n\t\t{ID: \"backup|gitaly-1|/project/path-1\", Acquired: true},\n\t})\n\trequireJobLocks(t, ctx, db, append(expectedJobLocks1, expectedJobLocks2...))\n}", "func (s *EventStore) SaveMultiple(items []flux.MultipleItem) error {\n\n\t// make sure each item has only record for a single aggregate id.\n\tfor _, item := range items {\n\t\tif _, err := getAggregateID(item.Records); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// init db transaction\n\ttx, err := s.db.Begin()\n\tif err != nil {\n\t\treturn errors.New(flux.ErrUnexpectedDbError)\n\t}\n\n\t// store items\n\tstoredItems := make([]flux.MultipleItem, 0)\n\tfor _, item := range items {\n\n\t\t// check if aggregate exists, and it's current version\n\t\taggID, err := getAggregateID(item.Records)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcurrentVersion, err := s.maxVersion(aggID)\n\t\tif err != nil {\n\t\t\t_ = tx.Rollback()\n\t\t\tfmt.Printf(\"[es] db error: %s\\n\", err)\n\t\t\treturn errors.New(flux.ErrUnexpectedDbError)\n\t\t}\n\n\t\texpectedVersion := item.ExpectedVersion\n\t\tif expectedVersion == -2 {\n\t\t\t// cheat ( -2 means 'never fail' )\n\t\t\texpectedVersion = currentVersion\n\t\t\t// TODO: How to check for idempotency in such case?\n\t\t}\n\n\t\t// validate expected version with actual version\n\t\tif expectedVersion != currentVersion {\n\t\t\t_ = tx.Rollback()\n\t\t\tif expectedVersion < currentVersion {\n\t\t\t\t// TODO: Idempotency check!\n\t\t\t}\n\t\t\tfmt.Printf(\"OptimisticConcurrencyError %d != %d\\n\", expectedVersion, currentVersion)\n\t\t\treturn flux.NewError(errors.New(flux.ErrOptimisticConcurrencyError), flux.ErrOptimisticConcurrencyError,\n\t\t\t\t\"want: %v, got: %v\", expectedVersion, currentVersion)\n\t\t}\n\n\t\tvi := expectedVersion\n\t\tstoredRecords := make([]flux.Record, 0)\n\t\tfor _, r := range item.Records {\n\t\t\tvi++\n\t\t\tr.Version = vi\n\t\t\tinst := \"insert into \" + s.tableName + \" (aggregate_id, type, data, version) values ($1, $2, $3, $4)\"\n\t\t\t_, err := tx.Exec(inst, r.AggregateID, r.Type, r.Data, r.Version)\n\t\t\tif err != nil {\n\t\t\t\t_ = tx.Rollback()\n\t\t\t\tfmt.Printf(\"[es] db insert error: %s\\n\", err)\n\t\t\t\treturn errors.New(flux.ErrUnexpectedDbError)\n\t\t\t}\n\t\t\tstoredRecords = append(storedRecords, r)\n\t\t}\n\t\tstoredItems = append(storedItems, flux.MultipleItem{Records: storedRecords, ExpectedVersion: vi, PublishTopic: item.PublishTopic})\n\t\tlog.Printf(\"%v events were appended to stream %v (v%v)\", len(item.Records), aggID, vi)\n\t}\n\n\t// all done, can now commit tx.\n\terr = tx.Commit()\n\tif err != nil {\n\t\t// db commit failed :(\n\t\t_ = tx.Rollback()\n\t\tfmt.Printf(\"[es] db commit error: %s\\n\", err)\n\t\treturn errors.New(flux.ErrUnexpectedDbError)\n\t}\n\n\t// publish events\n\tfor _, item := range storedItems {\n\t\tfor _, record := range item.Records {\n\t\t\terr := s.Publish(record, item.PublishTopic)\n\t\t\tif err != nil {\n\t\t\t\t// error dispatching to event bus,\n\t\t\t\t// dispatcher must retry.\n\t\t\t} else {\n\t\t\t\t_, err := s.db.Exec(\"update \"+s.tableName+\" set dispatched=true where aggregate_id=$1 and version=$2\",\n\t\t\t\t\trecord.AggregateID, record.Version)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// if failed dispatcher will retry\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"[es] event %v:%v has been marked as dispatched.\\n\", record.AggregateID, record.Version)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Printf(\"[SaveMultiple] %v aggregates have been saved to store.\\n\", len(items))\n\treturn nil\n}", "func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {\n\t// TODO(djd): multiArg is very confusing. Fold this logic into the\n\t// relevant Put/Get methods to make the logic less opaque.\n\tif v.Kind() != reflect.Slice {\n\t\treturn multiArgTypeInvalid, nil\n\t}\n\tif v.Type() == typeOfPropertyList {\n\t\treturn multiArgTypeInvalid, nil\n\t}\n\telemType = v.Type().Elem()\n\tif reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {\n\t\treturn multiArgTypePropertyLoadSaver, elemType\n\t}\n\tswitch elemType.Kind() {\n\tcase reflect.Struct:\n\t\treturn multiArgTypeStruct, elemType\n\tcase reflect.Interface:\n\t\treturn multiArgTypeInterface, elemType\n\tcase reflect.Ptr:\n\t\telemType = elemType.Elem()\n\t\tif elemType.Kind() == reflect.Struct {\n\t\t\treturn multiArgTypeStructPtr, elemType\n\t\t}\n\t}\n\treturn multiArgTypeInvalid, nil\n}" ]
[ "0.7745197", "0.7306555", "0.6980485", "0.6618361", "0.62530243", "0.6223199", "0.605705", "0.5956157", "0.5439953", "0.5438847", "0.5389264", "0.53887194", "0.5385906", "0.53188586", "0.5271133", "0.52647465", "0.5137011", "0.51261395", "0.5063871", "0.5054911", "0.49436536", "0.4938048", "0.49314246", "0.48533753", "0.4828004", "0.47867697", "0.47758824", "0.4730213", "0.47298467", "0.47095215", "0.46809432", "0.46794814", "0.4661926", "0.4649892", "0.4643284", "0.46280038", "0.46247095", "0.46233487", "0.4607035", "0.45965305", "0.45921472", "0.4583069", "0.45718974", "0.45687678", "0.45685723", "0.4565775", "0.45577118", "0.45548502", "0.4551845", "0.45497245", "0.45393685", "0.45160764", "0.45155036", "0.4515481", "0.45067143", "0.45010808", "0.45006222", "0.4483838", "0.447911", "0.44784483", "0.446823", "0.44657972", "0.44655415", "0.44633794", "0.44574735", "0.44434154", "0.44404516", "0.44372222", "0.44314626", "0.44103515", "0.44023994", "0.4397079", "0.43969795", "0.43905574", "0.43857628", "0.43744317", "0.4373631", "0.43717968", "0.43704802", "0.43695801", "0.43655226", "0.43465373", "0.43453988", "0.4343896", "0.4342897", "0.4338411", "0.43343744", "0.43322736", "0.433177", "0.4330207", "0.4328085", "0.43260878", "0.43253598", "0.43252414", "0.43196896", "0.4319586", "0.43120024", "0.4310868", "0.43084598", "0.4300064" ]
0.8205306
0
DeleteMultiListeners mocks base method
func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteMultiListeners", region, lbID, listeners) ret0, _ := ret[0].(error) return ret0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mr *MockLoadBalanceMockRecorder) DeleteMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteMultiListeners), region, lbID, listeners)\n}", "func (m *MockLoadBalance) EnsureMultiListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestActiveMultiEvent_Deactivate(t *testing.T) {\r\n\tnumber := 10\r\n\tvar events []*ActiveEvent\r\n\tvar mock []*mockUnixHelper\r\n\r\n\tfor i := 0; i < number; i++ {\r\n\t\tunixMock := &mockUnixHelper{}\r\n\t\tnewActive := &ActiveEvent{FileDescriptor: i, unix: unixMock}\r\n\t\tunixMock.On(\"close\", i).Return(nil).Once()\r\n\t\tevents = append(events, newActive)\r\n\t\tmock = append(mock, unixMock)\r\n\t}\r\n\r\n\tnewActiveMulti := ActiveMultiEvent{events: events}\r\n\tnewActiveMulti.Deactivate()\r\n\r\n\trequire.Nil(t, newActiveMulti.events)\r\n\tfor _, event := range events {\r\n\t\trequire.Nil(t, event)\r\n\t}\r\n\tfor _, m := range mock {\r\n\t\tm.AssertExpectations(t)\r\n\t}\r\n}", "func (m *MockCallback) OnRemoveAll() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemoveAll\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestMsgListenerMulti(t *testing.T) {\n\tml := newMsgListeners()\n\n\tcount := 0\n\tcids := testCids()\t// TODO: The wrong Directory type was being used for MapEntries.\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\n\t})\t// TODO: Исправления для OSX\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\t// TODO: Test emails 1\n\t})\n\tml.onMsgComplete(cids[1], func(err error) {\n\t\tcount++\n\t})\n\n\tml.fireMsgComplete(cids[0], nil)\n\trequire.Equal(t, 2, count)\n\n\tml.fireMsgComplete(cids[1], nil)\n\trequire.Equal(t, 3, count)\n}", "func (m *MockDeletableStorage) Del(ctx context.Context, keys ...interface{}) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range keys {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Del\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockListener) Delete(listenerKey api.ListenerKey, checksum api.Checksum) error {\n\tret := m.ctrl.Call(m, \"Delete\", listenerKey, checksum)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockEventLogger) AppendMulti(events ...eventlog.EventData) (uint64, uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(uint64)\n\tret3, _ := ret[3].(time.Time)\n\tret4, _ := ret[4].(error)\n\treturn ret0, ret1, ret2, ret3, ret4\n}", "func (m *MockProvider) OnEndpointsDelete(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsDelete\", arg0)\n}", "func (m *MockMultiClusterRoleEventHandler) DeleteMultiClusterRole(obj *v1alpha1.MultiClusterRole) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiClusterRole\", obj)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestAddManyRemoveFew(t *testing.T) {\n\ttarget := teaser.New()\n\tmids := []string{}\n\tfor i := 0; i <= 10; i++ {\n\t\tmid := target.Add(fmt.Sprintf(\"msg%d\", i))\n\t\tmids = append(mids, mid)\n\t}\n\tif len(mids) < 10 {\n\t\tt.Fatal(\"broken test data, expected mids to have at least 9 elementes \")\n\t}\n\n\t{\n\t\tm5id := mids[5]\n\t\tdeleted := target.Delete(m5id)\n\t\tassertDeleted(m5id, deleted, t)\n\t\tassertIsNotInTheQueue(target, m5id, t)\n\n\t\tnewmids := mids[:5]\n\t\tnewmids = append(newmids, mids[6:]...)\n\t\tmids = newmids\n\t\twaitForMessages()\n\t\tassertTheQueueContainsIds(target, mids, t)\n\t\twaitForMessages()\n\t}\n\n\t{\n\t\tm5id := mids[5]\n\t\tdeleted := target.Delete(m5id)\n\t\tassertDeleted(m5id, deleted, t)\n\t\tassertIsNotInTheQueue(target, m5id, t)\n\t\tnmids := mids[:5]\n\t\tnmids = append(nmids, mids[6:]...)\n\t\tmids = nmids\n\t\twaitForMessages()\n\t\tassertTheQueueContainsIds(target, mids, t)\n\t}\n}", "func (m *MockEventDao) UnfinishedEvents(target, targetID string, optTypes ...string) ([]*model.ServiceEvent, error) {\n\tvarargs := []interface{}{target, targetID}\n\tfor _, a := range optTypes {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"UnfinishedEvents\", varargs...)\n\tret0, _ := ret[0].([]*model.ServiceEvent)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockHooks) OnDelete(arg0 proto.Message) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnDelete\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockEventLogger) AppendCheckMulti(assumedVersion uint64, events ...eventlog.EventData) (uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{assumedVersion}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendCheckMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(time.Time)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func (m *MockLoadBalance) DeleteListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func cleanup(listeners []net.Listener, wg *sync.WaitGroup) error {\n\tlog.Println(\"Cleaning up.....\")\n\tfor _, listener := range listeners {\n\t\tlog.Printf(\"closing server at address %s ....\", listener.Addr().String())\n\t\tif err := listener.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"closed server at address %s\", listener.Addr().String())\n\t\twg.Done()\n\t}\n\treturn nil\n}", "func (m *MockSubjectRoleManager) BulkDelete(roleType, system string, subjectPKs []int64) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BulkDelete\", roleType, system, subjectPKs)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockCallback) OnRemove(arg0 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockGeneralRepository) BulkDelete(arg0 []models.Model) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BulkDelete\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockCache) Del(ctx context.Context, keys ...string) (int64, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range keys {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Del\", varargs...)\n\tret0, _ := ret[0].(int64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func testAddDeleteMulti(tree T, t *testing.T) {\n\telem := \"test\"\n\telemII := \"testII\"\n\t//testDeleteSimple(NewQuadTree(0, d.width, 0, d.height, 10000), []interface{}{elem, elemII}, []interface{}{elem, elemII}, false, \"Simple Global Delete Take Two Of Two\", t)\n\ttestDeleteSimple(tree, []interface{}{elem, elemII}, []interface{}{elem, elemII}, true, \"Simple Exact Delete Take Two Of Two\", t)\n}", "func (it *integTestSuite) TestNpmWorkloadCreateDeleteWithMultiIntf(c *C) {\n\tc.Skip(\"Skipping till we fix workload update issues\")\n\n\tconst numWorkloads = 10\n\tconst numIntf = 10\n\tconst numIter = 10\n\t// if not present create the default tenant\n\tit.CreateTenant(\"default\")\n\t// create a wait channel\n\twaitCh := make(chan error, it.numAgents*2)\n\n\tfor iter := 0; iter < numIter; iter++ {\n\t\t// create number of workload on first host\n\t\tfor i := 0; i < numWorkloads; i++ {\n\t\t\t// build workload object\n\t\t\twr := workload.Workload{\n\t\t\t\tTypeMeta: api.TypeMeta{Kind: \"Workload\"},\n\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(\"testWorkload-%d\", i),\n\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\tTenant: \"default\",\n\t\t\t\t},\n\t\t\t\tSpec: workload.WorkloadSpec{\n\t\t\t\t\tHostName: \"testHost-0\",\n\t\t\t\t\tInterfaces: []workload.WorkloadIntfSpec{},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// each workload has multiple interfaces\n\t\t\tfor j := 0; j < numIntf; j++ {\n\t\t\t\tmacAddr := fmt.Sprintf(\"0002.0000.%02x%02x\", i, j)\n\t\t\t\twintf := workload.WorkloadIntfSpec{\n\t\t\t\t\tMACAddress: macAddr,\n\t\t\t\t\tMicroSegVlan: uint32(i*numIntf + j + 1),\n\t\t\t\t\tExternalVlan: uint32(j + 1),\n\t\t\t\t}\n\t\t\t\twr.Spec.Interfaces = append(wr.Spec.Interfaces, wintf)\n\t\t\t}\n\t\t\t_, err := it.apisrvClient.WorkloadV1().Workload().Create(context.Background(), &wr)\n\t\t\tAssertOk(c, err, \"Error creating workload\")\n\t\t}\n\n\t\t// wait for all endpoints to be propagated to other agents\n\t\tfor _, ag := range it.agents {\n\t\t\tgo func(ag *Dpagent) {\n\t\t\t\tfound := CheckEventually(func() (bool, interface{}) {\n\t\t\t\t\tepMeta := netproto.Endpoint{\n\t\t\t\t\t\tTypeMeta: api.TypeMeta{Kind: \"Endpoint\"},\n\t\t\t\t\t}\n\t\t\t\t\tendpoints, _ := ag.dscAgent.PipelineAPI.HandleEndpoint(agentTypes.List, epMeta)\n\t\t\t\t\treturn len(endpoints) == numWorkloads*numIntf, nil\n\t\t\t\t}, \"10ms\", it.pollTimeout())\n\t\t\t\tif !found {\n\t\t\t\t\twaitCh <- fmt.Errorf(\"Endpoint count incorrect in datapath\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\twaitCh <- nil\n\t\t\t}(ag)\n\t\t}\n\n\t\t// wait for all goroutines to complete\n\t\tfor i := 0; i < it.numAgents; i++ {\n\t\t\tAssertOk(c, <-waitCh, \"Endpoint info incorrect in datapath\")\n\n\t\t}\n\n\t\t// now delete the workloads\n\t\tfor i := 0; i < numWorkloads; i++ {\n\t\t\t// build workload object\n\t\t\twmeta := api.ObjectMeta{\n\t\t\t\tName: fmt.Sprintf(\"testWorkload-%d\", i),\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tTenant: \"default\",\n\t\t\t}\n\t\t\t_, err := it.apisrvClient.WorkloadV1().Workload().Delete(context.Background(), &wmeta)\n\t\t\tAssertOk(c, err, \"Error deleting workload\")\n\t\t}\n\n\t\t// verify endpoints are gone from apiserver\n\t\tAssertEventually(c, func() (bool, interface{}) {\n\t\t\tvar listopt api.ListWatchOptions\n\t\t\teplist, lerr := it.apisrvClient.WorkloadV1().Endpoint().List(context.Background(), &listopt)\n\t\t\tif lerr == nil && len(eplist) == 0 {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, eplist\n\t\t}, \"Endpoints still found in apiserver\")\n\n\t\t// verify endpoints are gone from agents\n\t\tfor _, ag := range it.agents {\n\t\t\tgo func(ag *Dpagent) {\n\t\t\t\tif !CheckEventually(func() (bool, interface{}) {\n\t\t\t\t\tepMeta := netproto.Endpoint{\n\t\t\t\t\t\tTypeMeta: api.TypeMeta{Kind: \"Endpoint\"},\n\t\t\t\t\t}\n\t\t\t\t\tendpoints, _ := ag.dscAgent.PipelineAPI.HandleEndpoint(agentTypes.List, epMeta)\n\t\t\t\t\treturn len(endpoints) == 0, nil\n\t\t\t\t}, \"10ms\", it.pollTimeout()) {\n\t\t\t\t\twaitCh <- fmt.Errorf(\"Endpoint was not deleted from datapath\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\twaitCh <- nil\n\t\t\t}(ag)\n\t\t}\n\n\t\t// wait for all goroutines to complete\n\t\tfor i := 0; i < it.numAgents; i++ {\n\t\t\tAssertOk(c, <-waitCh, \"Endpoint delete error\")\n\t\t}\n\t}\n\n\t// delete the networks\n\tfor j := 0; j < numIntf; j++ {\n\t\terr := it.DeleteNetwork(\"default\", fmt.Sprintf(\"Network-Vlan-%d\", j+1))\n\t\tc.Assert(err, IsNil)\n\t\tAssertEventually(c, func() (bool, interface{}) {\n\t\t\t_, nerr := it.npmCtrler.StateMgr.FindNetwork(\"default\", fmt.Sprintf(\"Network-Vlan-%d\", j+1))\n\t\t\treturn (nerr != nil), nil\n\t\t}, \"Network still found in NPM\")\n\t}\n}", "func TestPostgresReplicationEventQueue_DequeueMultiple(t *testing.T) {\n\tt.Parallel()\n\tdb := testdb.New(t)\n\tctx := testhelper.Context(t)\n\n\tqueue := PostgresReplicationEventQueue{db.DB}\n\n\teventType1 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: UpdateRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: nil,\n\t\t},\n\t}\n\n\teventType2 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: DeleteRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: nil,\n\t\t},\n\t}\n\n\teventType3 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: RenameRepo,\n\t\t\tRelativePath: \"/project/path-2\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: Params{\"RelativePath\": \"/project/path-2-renamed\"},\n\t\t},\n\t}\n\n\teventType4 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: UpdateRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"backup\",\n\t\t},\n\t}\n\n\t// events to fill in the queue\n\tevents := []ReplicationEvent{eventType1, eventType1, eventType2, eventType1, eventType3, eventType4}\n\tfor i := range events {\n\t\tvar err error\n\t\tevents[i], err = queue.Enqueue(ctx, events[i])\n\t\trequire.NoError(t, err, \"failed to fill in event queue\")\n\t}\n\n\t// first request to deque\n\texpectedEvents1 := []ReplicationEvent{events[0], events[2], events[4]}\n\texpectedJobLocks1 := []JobLockRow{\n\t\t{JobID: events[0].ID, LockID: \"praefect|gitaly-1|/project/path-1\"},\n\t\t{JobID: events[2].ID, LockID: \"praefect|gitaly-1|/project/path-1\"},\n\t\t{JobID: events[4].ID, LockID: \"praefect|gitaly-1|/project/path-2\"},\n\t}\n\n\t// we expect only first two types of events by limiting count to 3\n\tdequeuedEvents1, err := queue.Dequeue(ctx, \"praefect\", \"gitaly-1\", 3)\n\trequire.NoError(t, err)\n\trequire.Len(t, dequeuedEvents1, len(expectedEvents1))\n\tfor i := range dequeuedEvents1 {\n\t\tdequeuedEvents1[i].UpdatedAt = nil // it is not possible to determine update_at value as it is generated on UPDATE in database\n\t\texpectedEvents1[i].State = JobStateInProgress\n\t\texpectedEvents1[i].Attempt--\n\t}\n\trequire.Equal(t, expectedEvents1, dequeuedEvents1)\n\n\trequireLocks(t, ctx, db, []LockRow{\n\t\t// there is only one single lock for all fetched events because of their 'repo' and 'target' combination\n\t\t{ID: \"praefect|gitaly-1|/project/path-1\", Acquired: true},\n\t\t{ID: \"praefect|gitaly-1|/project/path-2\", Acquired: true},\n\t\t{ID: \"backup|gitaly-1|/project/path-1\", Acquired: false},\n\t})\n\trequireJobLocks(t, ctx, db, expectedJobLocks1)\n\n\t// second request to deque\n\t// there must be only last event fetched from the queue\n\texpectedEvents2 := []ReplicationEvent{events[5]}\n\texpectedEvents2[0].State = JobStateInProgress\n\texpectedEvents2[0].Attempt = 2\n\n\texpectedJobLocks2 := []JobLockRow{{JobID: 6, LockID: \"backup|gitaly-1|/project/path-1\"}}\n\n\tdequeuedEvents2, err := queue.Dequeue(ctx, \"backup\", \"gitaly-1\", 100500)\n\trequire.NoError(t, err)\n\trequire.Len(t, dequeuedEvents2, 1, \"only one event must be fetched from the queue\")\n\n\tdequeuedEvents2[0].UpdatedAt = nil // it is not possible to determine update_at value as it is generated on UPDATE in database\n\trequire.Equal(t, expectedEvents2, dequeuedEvents2)\n\n\trequireLocks(t, ctx, db, []LockRow{\n\t\t{ID: \"praefect|gitaly-1|/project/path-1\", Acquired: true},\n\t\t{ID: \"praefect|gitaly-1|/project/path-2\", Acquired: true},\n\t\t{ID: \"backup|gitaly-1|/project/path-1\", Acquired: true},\n\t})\n\trequireJobLocks(t, ctx, db, append(expectedJobLocks1, expectedJobLocks2...))\n}", "func (m *MockProc) OnSvcHostRemove(arg0 []*host.Host) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnSvcHostRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (suite *FIFOTestSuite) TestRemoveMultipleGRs() {\n\tvar (\n\t\twg sync.WaitGroup\n\t\ttotalElementsToEnqueue = 100\n\t\ttotalElementsToRemove = 90\n\t)\n\n\tfor i := 0; i < totalElementsToEnqueue; i++ {\n\t\tsuite.fifo.Enqueue(i)\n\t}\n\n\tfor i := 0; i < totalElementsToRemove; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := suite.fifo.Remove(1)\n\t\t\tsuite.NoError(err, \"Unexpected error during concurrent Remove(n)\")\n\t\t}()\n\t}\n\twg.Wait()\n\n\t// check len, should be == totalElementsToEnqueue - totalElementsToRemove\n\ttotalElementsAfterRemove := suite.fifo.GetLen()\n\tsuite.Equal(totalElementsToEnqueue-totalElementsToRemove, totalElementsAfterRemove, \"Total elements on list does not match with expected number\")\n\n\t// check current 2nd element (index 1) on the queue\n\tval, err := suite.fifo.Get(1)\n\tsuite.NoError(err, \"No error should be returned when getting an existent element\")\n\tsuite.Equalf(1+totalElementsToRemove, val, \"The expected value at position 1 (2nd element) should be: %v\", 1+totalElementsToRemove)\n}", "func TestSplitListenersToDiffProtocol(t *testing.T) {\n\ttestListeners := []*networkextensionv1.Listener{\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8000,\n\t\t\t\tProtocol: \"HTTP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8001,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8002,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8003,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8004,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t}\n\tliGroup := splitListenersToDiffProtocol(testListeners)\n\tfor _, list := range liGroup {\n\t\tt.Logf(\"%+v\", list)\n\t\ttmpProtocol := make(map[string]struct{})\n\t\tfor _, li := range list {\n\t\t\ttmpProtocol[li.Spec.Protocol] = struct{}{}\n\t\t}\n\t\tif len(tmpProtocol) != 1 {\n\t\t\tt.Errorf(\"list %v contains more than one protocol %v\", list, tmpProtocol)\n\t\t}\n\t}\n}", "func (m *MockUsecase) StopListenEvents(userID int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StopListenEvents\", userID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockWatcher) Delete() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\")\n}", "func (_m *OAuth20Service) DeleteMultipleClientCredentials(ctx context.Context, auths []model.SystemAuth) error {\n\tret := _m.Called(ctx, auths)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, []model.SystemAuth) error); ok {\n\t\tr0 = rf(ctx, auths)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockConn) Multi(ops ...interface{}) ([]zk.MultiResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range ops {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Multi\", varargs...)\n\tret0, _ := ret[0].([]zk.MultiResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestConsulStateDriverWatchAllStateDelete(t *testing.T) {\n\tdriver := setupConsulDriver(t)\n\tcommonTestStateDriverWatchAllStateDelete(t, driver)\n}", "func TestDeleteAllAfterDeactivate(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tdeleteAll(t)\n\n\talice := tu.UniqueString(\"alice\")\n\taliceClient, adminClient := getPachClient(t, alice), getPachClient(t, admin)\n\n\t// alice creates a pipeline\n\trepo := tu.UniqueString(\"TestDeleteAllAfterDeactivate\")\n\tpipeline := tu.UniqueString(\"pipeline\")\n\trequire.NoError(t, aliceClient.CreateRepo(repo))\n\trequire.NoError(t, aliceClient.CreatePipeline(\n\t\tpipeline,\n\t\t\"\", // default image: ubuntu:14.04\n\t\t[]string{\"bash\"},\n\t\t[]string{fmt.Sprintf(\"cp /pfs/%s/* /pfs/out/\", repo)},\n\t\t&pps.ParallelismSpec{Constant: 1},\n\t\tclient.NewAtomInput(repo, \"/*\"),\n\t\t\"\", // default output branch: master\n\t\tfalse,\n\t))\n\n\t// alice makes an input commit\n\tcommit, err := aliceClient.StartCommit(repo, \"master\")\n\trequire.NoError(t, err)\n\t_, err = aliceClient.PutFile(repo, commit.ID, \"/file1\", strings.NewReader(\"test data\"))\n\trequire.NoError(t, err)\n\trequire.NoError(t, aliceClient.FinishCommit(repo, commit.ID))\n\n\t// make sure the pipeline runs\n\titer, err := aliceClient.FlushCommit(\n\t\t[]*pfs.Commit{commit},\n\t\t[]*pfs.Repo{{Name: pipeline}},\n\t)\n\trequire.NoError(t, err)\n\trequire.NoErrorWithinT(t, 60*time.Second, func() error {\n\t\t_, err := iter.Next()\n\t\treturn err\n\t})\n\n\t// Deactivate auth\n\t_, err = adminClient.Deactivate(adminClient.Ctx(), &auth.DeactivateRequest{})\n\trequire.NoError(t, err)\n\n\t// Wait for auth to be deactivated\n\trequire.NoError(t, backoff.Retry(func() error {\n\t\t_, err := aliceClient.WhoAmI(aliceClient.Ctx(), &auth.WhoAmIRequest{})\n\t\tif err != nil && auth.IsErrNotActivated(err) {\n\t\t\treturn nil // WhoAmI should fail when auth is deactivated\n\t\t}\n\t\treturn errors.New(\"auth is not yet deactivated\")\n\t}, backoff.NewTestingBackOff()))\n\n\t// Make sure DeleteAll() succeeds\n\trequire.NoError(t, aliceClient.DeleteAll())\n}", "func (c CryptoServiceTester) TestRemoveFromMultipleKeystores(t *testing.T) {\n\tcryptoService := c.cryptoServiceFactory()\n\tcryptoService.keyStores = append(cryptoService.keyStores,\n\t\ttrustmanager.NewKeyMemoryStore(passphraseRetriever))\n\n\tprivKey, err := utils.GenerateECDSAKey(rand.Reader)\n\trequire.NoError(t, err, c.errorMsg(\"error creating key\"))\n\n\tfor _, store := range cryptoService.keyStores {\n\t\terr := store.AddKey(trustmanager.KeyInfo{Role: data.CanonicalRootRole, Gun: \"\"}, privKey)\n\t\trequire.NoError(t, err)\n\t}\n\n\trequire.NotNil(t, cryptoService.GetKey(privKey.ID()))\n\n\t// Remove removes it from all key stores\n\terr = cryptoService.RemoveKey(privKey.ID())\n\trequire.NoError(t, err, c.errorMsg(\"could not remove key\"))\n\n\tfor _, store := range cryptoService.keyStores {\n\t\t_, _, err := store.GetKey(privKey.ID())\n\t\trequire.Error(t, err)\n\t}\n}", "func (m *MockStream) RemoveEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RemoveEventListener\", streamEventListener)\n}", "func (m *MockChefIngesterClient) ProcessMultipleNodeDeletes(ctx context.Context, in *request.MultipleNodeDeleteRequest, opts ...grpc.CallOption) (*response.ProcessMultipleNodeDeleteResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"ProcessMultipleNodeDeletes\", varargs...)\n\tret0, _ := ret[0].(*response.ProcessMultipleNodeDeleteResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockIRandomPresenter) OnListReceived(arg0 []aggregates.Topic) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnListReceived\", arg0)\n}", "func (_m *MockInterface) BatchDelete(ctx context.Context, keys []string) error {\n\tret := _m.Called(ctx, keys)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, []string) error); ok {\n\t\tr0 = rf(ctx, keys)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockProvider) OnServiceDelete(arg0 *v1.Service) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceDelete\", arg0)\n}", "func (m *MockChefIngesterServer) ProcessMultipleNodeDeletes(arg0 context.Context, arg1 *request.MultipleNodeDeleteRequest) (*response.ProcessMultipleNodeDeleteResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ProcessMultipleNodeDeletes\", arg0, arg1)\n\tret0, _ := ret[0].(*response.ProcessMultipleNodeDeleteResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestDeleteMembers(t *testing.T) {\n\tcalls := []testutils.TestCmd{\n\t\tfakeRestoreSuccessCommand,\n\t}\n\tioshim := common.NewMockIOShim(calls)\n\tdefer ioshim.VerifyCalls(t, calls)\n\tiMgr := NewIPSetManager(applyAlwaysCfg, ioshim)\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"1.1.1.1\", \"a\"))\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"2.2.2.2\", \"b\"))\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"3.3.3.3\", \"c\"))\n\t// create to destroy later\n\tiMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata})\n\t// clear dirty cache, otherwise a set deletion will be a no-op\n\tiMgr.clearDirtyCache()\n\n\t// will remove this member\n\trequire.NoError(t, iMgr.RemoveFromSets([]*IPSetMetadata{TestNSSet.Metadata}, \"1.1.1.1\", \"a\"))\n\t// will add this member\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"5.5.5.5\", \"e\"))\n\t// won't add/remove this member since the next two calls cancel each other out\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"4.4.4.4\", \"d\"))\n\trequire.NoError(t, iMgr.RemoveFromSets([]*IPSetMetadata{TestNSSet.Metadata}, \"4.4.4.4\", \"d\"))\n\t// won't add/remove this member since the next two calls cancel each other out\n\trequire.NoError(t, iMgr.RemoveFromSets([]*IPSetMetadata{TestNSSet.Metadata}, \"2.2.2.2\", \"b\"))\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"2.2.2.2\", \"b\"))\n\t// destroy extra set\n\tiMgr.DeleteIPSet(TestCIDRSet.PrefixName, util.SoftDelete)\n\n\texpectedLines := []string{\n\t\tfmt.Sprintf(\"-N %s --exist nethash\", TestNSSet.HashedName),\n\t\tfmt.Sprintf(\"-D %s 1.1.1.1\", TestNSSet.HashedName),\n\t\tfmt.Sprintf(\"-A %s 5.5.5.5\", TestNSSet.HashedName),\n\t\tfmt.Sprintf(\"-F %s\", TestCIDRSet.HashedName),\n\t\tfmt.Sprintf(\"-X %s\", TestCIDRSet.HashedName),\n\t\t\"\",\n\t}\n\tsortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines)\n\tcreator := iMgr.fileCreatorForApply(len(calls))\n\tactualLines := testAndSortRestoreFileString(t, creator.ToString())\n\tdptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines)\n\twasFileAltered, err := creator.RunCommandOnceWithFile(\"ipset\", \"restore\")\n\trequire.NoError(t, err, \"ipset restore should be successful\")\n\trequire.False(t, wasFileAltered, \"file should not be altered\")\n}", "func (_m *GetterSetterDeleterIteratorUpdater) Delete(keys ...[]byte) error {\n\t_va := make([]interface{}, len(keys))\n\tfor _i := range keys {\n\t\t_va[_i] = keys[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(...[]byte) error); ok {\n\t\tr0 = rf(keys...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenMultipleTracksFound() {\n\n}", "func TestEventsMgrRestart(t *testing.T) {\n\tti := tInfo{}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\tnumRecorders := 3\n\n\tstopEventRecorders := make(chan struct{})\n\twg := new(sync.WaitGroup)\n\twg.Add(numRecorders + 1) // +1 for events manager restart go routine\n\n\t// uuid to make each source unique\n\tcomponentID := uuid.NewV4().String()\n\ttotalEventsSentBySrc := make([]int, numRecorders)\n\n\t// create recorder events directory\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\n\tfor i := 0; i < numRecorders; i++ {\n\t\tgo func(i int) {\n\t\t\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\t\t\tComponent: fmt.Sprintf(\"%v-%v\", componentID, i),\n\t\t\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\t\t\tBackupDir: recorderEventsDir}, ti.logger)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to create recorder for source %v\", i)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tti.recorders.Lock()\n\t\t\tti.recorders.list = append(ti.recorders.list, evtsRecorder)\n\t\t\tti.recorders.Unlock()\n\n\t\t\tticker := time.NewTicker(100 * time.Millisecond)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stopEventRecorders:\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test event - 1\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test event - 2\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_STOPPED, \"test event - 3\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// restart events manager\n\tgo func() {\n\t\tevtsMgrURL := ti.evtsMgr.RPCServer.GetListenURL()\n\n\t\tfor i := 0; i < 3; i++ {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tti.evtsMgr.Stop()\n\n\t\t\t// manager won't be able to accept any events for 1s; all the elastic writes will be denied\n\t\t\t// and all the events will be buffered at the writer for this time\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\t// exporters should be able to release all the holding events from the buffer\n\t\t\tevtsMgr, _, err := testutils.StartEvtsMgr(evtsMgrURL, ti.mockResolver, ti.logger, ti.esClient, nil)\n\t\t\tAssertOk(t, err, \"failed to start events manager, err: %v\", err)\n\t\t\tti.evtsMgr = evtsMgr\n\t\t}\n\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// stop all the recorders\n\t\tclose(stopEventRecorders)\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\t//total events sent by all the recorders\n\ttotalEventsSent := 0\n\tfor _, val := range totalEventsSentBySrc {\n\t\ttotalEventsSent += val\n\t}\n\n\tlog.Infof(\"total events sent: %v\", totalEventsSent)\n\n\t// total number of events received at elastic should match the total events sent\n\t// query all the events received from this source.component\n\tquery := es.NewRegexpQuery(\"source.component.keyword\", fmt.Sprintf(\"%v-.*\", componentID))\n\tti.assertElasticUniqueEvents(t, query, true, 3*numRecorders, \"60s\")\n\tti.assertElasticTotalEvents(t, query, false, totalEventsSent, \"60s\")\n}", "func testRemoveMultipleObjects() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"RemoveObjects(bucketName, objectsCh)\"\n\targs := map[string]interface{}{\n\t\t\"bucketName\": \"\",\n\t}\n\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object.\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Enable tracing, write to stdout.\n\t// c.TraceOn(os.Stderr)\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket.\n\terr = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"us-east-1\"})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tdefer cleanupBucket(bucketName, c)\n\n\tr := bytes.NewReader(bytes.Repeat([]byte(\"a\"), 8))\n\n\t// Multi remove of 1100 objects\n\tnrObjects := 200\n\n\tobjectsCh := make(chan minio.ObjectInfo)\n\n\tgo func() {\n\t\tdefer close(objectsCh)\n\t\t// Upload objects and send them to objectsCh\n\t\tfor i := 0; i < nrObjects; i++ {\n\t\t\tobjectName := \"sample\" + strconv.Itoa(i) + \".txt\"\n\t\t\tinfo, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,\n\t\t\t\tminio.PutObjectOptions{ContentType: \"application/octet-stream\"})\n\t\t\tif err != nil {\n\t\t\t\tlogError(testName, function, args, startTime, \"\", \"PutObject failed\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tobjectsCh <- minio.ObjectInfo{\n\t\t\t\tKey: info.Key,\n\t\t\t\tVersionID: info.VersionID,\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Call RemoveObjects API\n\terrorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{})\n\n\t// Check if errorCh doesn't receive any error\n\tselect {\n\tcase r, more := <-errorCh:\n\t\tif more {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Unexpected error\", r.Err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func TestMetricDeleted(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\thelper.preregisterAgent(t)\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// list of active metrics,\n\t// 2 query per metric to register (one to find potential inactive, one to register)\n\t// + 1 to register agent_status\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 8)\n\n\tmetrics := helper.MetricsFromAPI()\n\tif len(metrics) != 4 { // 3 + agent_status\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\thelper.AddTime(90 * time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t})\n\n\t// API deleted metric1\n\tfor _, m := range metrics {\n\t\tif m.Name == \"metric1\" {\n\t\t\thelper.api.resources[mockAPIResourceMetric].DelStore(m.ID)\n\t\t}\n\t}\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.AddTime(1 * time.Minute)\n\n\t// metric1 is still alive\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We list active metrics, 2 query to re-register metric\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 3)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 { // 3 + agent_status\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\thelper.s.nextFullSync = helper.Now().Add(2 * time.Hour)\n\thelper.AddTime(90 * time.Minute)\n\n\t// API deleted metric2\n\tfor _, m := range metrics {\n\t\tif m.Name == \"metric2\" {\n\t\t\thelper.api.resources[mockAPIResourceMetric].DelStore(m.ID)\n\t\t}\n\t}\n\n\t// all metrics are inactive\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 3 {\n\t\tt.Errorf(\"len(metrics) = %d, want 3\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif m.DeactivatedAt.IsZero() && m.Name != agentStatusName {\n\t\t\tt.Errorf(\"%v should be deactivated\", m)\n\n\t\t\tbreak\n\t\t} else if !m.DeactivatedAt.IsZero() && m.Name == agentStatusName {\n\t\t\tt.Errorf(\"%v should not be deactivated\", m)\n\t\t}\n\t}\n\n\thelper.AddTime(1 * time.Minute)\n\n\t// API deleted metric3\n\tfor _, m := range metrics {\n\t\tif m.Name == \"metric3\" {\n\t\t\thelper.api.resources[mockAPIResourceMetric].DelStore(m.ID)\n\t\t}\n\t}\n\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t})\n\n\thelper.s.forceSync[syncMethodMetric] = true\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 {\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif !m.DeactivatedAt.IsZero() {\n\t\t\tt.Errorf(\"%v should not be deactivated\", m)\n\t\t}\n\t}\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiListeners), region, lbID, listeners)\n}", "func Test_DeviceService_Remove_Success(t *testing.T) {\n\th := TestHelper{}\n\trep := new(mocks.IDeviceRepository)\n\trepAuth := new(mocks.IDeviceAuthRepository)\n\ts := h.CreateTestDeviceService(rep, repAuth)\n\n\tip := \"127.0.0.1\"\n\trep.On(\"Remove\", ip).Return(nil)\n\n\terr := s.Remove(ip)\n\tassert.NoError(t, err)\n}", "func (mock *MailgunMock) DeleteWebhookCalls() []struct {\n\tKind string\n} {\n\tvar calls []struct {\n\t\tKind string\n\t}\n\tlockMailgunMockDeleteWebhook.RLock()\n\tcalls = mock.calls.DeleteWebhook\n\tlockMailgunMockDeleteWebhook.RUnlock()\n\treturn calls\n}", "func (m *MockStudyServiceApiClient) DeleteParticipantFiles(arg0 context.Context, arg1 *api.DeleteParticipantFilesReq, arg2 ...grpc.CallOption) (*api.ServiceStatus, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteParticipantFiles\", varargs...)\n\tret0, _ := ret[0].(*api.ServiceStatus)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestDeleteAllAfterDeactivate(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tc, _ := minikubetestenv.AcquireCluster(t, defaultTestOptions)\n\ttu.ActivateAuthClient(t, c)\n\talice := tu.Robot(tu.UniqueString(\"alice\"))\n\taliceClient, rootClient := tu.AuthenticateClient(t, c, alice), tu.AuthenticateClient(t, c, auth.RootUser)\n\n\t// alice creates a pipeline\n\trepo := tu.UniqueString(\"TestDeleteAllAfterDeactivate\")\n\tpipeline := tu.UniqueString(\"pipeline\")\n\trequire.NoError(t, aliceClient.CreateRepo(pfs.DefaultProjectName, repo))\n\trequire.NoError(t, aliceClient.CreatePipeline(pfs.DefaultProjectName,\n\t\tpipeline,\n\t\t\"\", // default image: DefaultUserImage\n\t\t[]string{\"bash\"},\n\t\t[]string{fmt.Sprintf(\"cp /pfs/%s/* /pfs/out/\", repo)},\n\t\t&pps.ParallelismSpec{Constant: 1},\n\t\tclient.NewPFSInput(pfs.DefaultProjectName, repo, \"/*\"),\n\t\t\"\", // default output branch: master\n\t\tfalse,\n\t))\n\n\t// alice makes an input commit\n\tcommit, err := aliceClient.StartCommit(pfs.DefaultProjectName, repo, \"master\")\n\trequire.NoError(t, err)\n\terr = aliceClient.PutFile(commit, \"/file1\", strings.NewReader(\"test data\"))\n\trequire.NoError(t, err)\n\trequire.NoError(t, aliceClient.FinishCommit(pfs.DefaultProjectName, repo, commit.Branch.Name, commit.Id))\n\n\t// make sure the pipeline runs\n\trequire.NoErrorWithinT(t, 60*time.Second, func() error {\n\t\t_, err := aliceClient.WaitCommit(pfs.DefaultProjectName, pipeline, \"master\", commit.Id)\n\t\treturn err\n\t})\n\n\t// Deactivate auth\n\t_, err = rootClient.Deactivate(rootClient.Ctx(), &auth.DeactivateRequest{})\n\trequire.NoError(t, err)\n\n\t// Wait for auth to be deactivated\n\trequire.NoError(t, backoff.Retry(func() error {\n\t\t_, err := aliceClient.WhoAmI(aliceClient.Ctx(), &auth.WhoAmIRequest{})\n\t\tif err != nil && auth.IsErrNotActivated(err) {\n\t\t\treturn nil // WhoAmI should fail when auth is deactivated\n\t\t}\n\t\treturn errors.New(\"auth is not yet deactivated\")\n\t}, backoff.NewTestingBackOff()))\n\n\t// Make sure DeleteAll() succeeds\n\trequire.NoError(t, aliceClient.DeleteAll())\n}", "func (m *MockIPAMDriver) Del(arg0 *invoke.Args, arg1 *types.K8sArgs, arg2 []byte) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Del\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mock *GlobalRoleBindingInterfaceMock) DeleteCalls() []struct {\n\tName string\n\tOptions *metav1.DeleteOptions\n} {\n\tvar calls []struct {\n\t\tName string\n\t\tOptions *metav1.DeleteOptions\n\t}\n\tlockGlobalRoleBindingInterfaceMockDelete.RLock()\n\tcalls = mock.calls.Delete\n\tlockGlobalRoleBindingInterfaceMockDelete.RUnlock()\n\treturn calls\n}", "func (m *MockServiceProbeDao) DELServiceProbesByServiceID(serviceID string) error {\n\tret := m.ctrl.Call(m, \"DELServiceProbesByServiceID\", serviceID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar memLog base.InMemLogger\n\tmem := vfs.NewMem()\n\trequire.NoError(t, mem.MkdirAll(\"ext\", 0755))\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(t *testing.T, td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tmemLog.Reset()\n\t\t\tlel := MakeLoggingEventListener(&memLog)\n\t\t\tflushBegin, flushEnd := lel.FlushBegin, lel.FlushEnd\n\t\t\tlel.FlushBegin = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushBegin(info)\n\t\t\t}\n\t\t\tlel.FlushEnd = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushEnd(info)\n\t\t\t}\n\t\t\topts := &Options{\n\t\t\t\tFS: vfs.WithLogging(mem, memLog.Infof),\n\t\t\t\tFormatMajorVersion: internalFormatNewest,\n\t\t\t\tEventListener: &lel,\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tL0CompactionThreshold: 10,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t}\n\t\t\t// The table stats collector runs asynchronously and its\n\t\t\t// timing is less predictable. It increments nextJobID, which\n\t\t\t// can make these tests flaky. The TableStatsLoaded event is\n\t\t\t// tested separately in TestTableStats.\n\t\t\topts.private.disableTableStats = true\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tt := time.Now()\n\t\t\td.timeNow = func() time.Time {\n\t\t\t\tt = t.Add(time.Second)\n\t\t\t\treturn t\n\t\t\t}\n\t\t\td.testingAlwaysWaitForCleanup = true\n\t\t\treturn memLog.String()\n\n\t\tcase \"close\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"flush\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"compact\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\"), false); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"checkpoint\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Checkpoint(\"checkpoint\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"disable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\td.mu.Lock()\n\t\t\td.disableFileDeletions()\n\t\t\td.mu.Unlock()\n\t\t\treturn memLog.String()\n\n\t\tcase \"enable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tmemLog.Infof(\"%v\", r)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\td.mu.Lock()\n\t\t\t\tdefer d.mu.Unlock()\n\t\t\t\td.enableFileDeletions()\n\t\t\t}()\n\t\t\td.TestOnlyWaitForCleaning()\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest\":\n\t\t\tmemLog.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest-flushable\":\n\t\t\tmemLog.Reset()\n\n\t\t\t// Prevent flushes during this test to ensure determinism.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = true\n\t\t\td.mu.Unlock()\n\n\t\t\tb := d.NewBatch()\n\t\t\tif err := b.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Apply(b, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\twriteTable := func(name string, key byte) error {\n\t\t\t\tf, err := mem.Create(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t\t})\n\t\t\t\tif err := w.Add(base.MakeInternalKey([]byte{key}, 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttableA, tableB := \"ext/a\", \"ext/b\"\n\t\t\tif err := writeTable(tableA, 'a'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := writeTable(tableB, 'b'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{tableA, tableB}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\n\t\t\t// Re-enable flushes, to allow the subsequent flush to proceed.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = false\n\t\t\td.mu.Unlock()\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"metrics\":\n\t\t\t// The asynchronous loading of table stats can change metrics, so\n\t\t\t// wait for all the tables' stats to be loaded.\n\t\t\td.mu.Lock()\n\t\t\td.waitTableStats()\n\t\t\td.mu.Unlock()\n\n\t\t\treturn d.Metrics().String()\n\n\t\tcase \"sstables\":\n\t\t\tvar buf bytes.Buffer\n\t\t\ttableInfos, _ := d.SSTables()\n\t\t\tfor i, level := range tableInfos {\n\t\t\t\tif len(level) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(&buf, \"%d:\\n\", i)\n\t\t\t\tfor _, m := range level {\n\t\t\t\t\tfmt.Fprintf(&buf, \" %d:[%s-%s]\\n\",\n\t\t\t\t\t\tm.FileNum, m.Smallest.UserKey, m.Largest.UserKey)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (m *MockMetrics) MultiCreateSuccessResponseCounter() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"MultiCreateSuccessResponseCounter\")\n}", "func (_m *MockStateStore) BulkDelete(req []state.DeleteRequest) error {\n\tret := _m.Called(req)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func([]state.DeleteRequest) error); ok {\n\t\tr0 = rf(req)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (t *tInfo) teardown() {\n\tt.recorders.close()\n\n\tif t.apiClient != nil {\n\t\tt.apiClient.ClusterV1().Version().Delete(context.Background(), &api.ObjectMeta{Name: t.testName})\n\t\tt.apiClient.Close()\n\t\tt.apiClient = nil\n\t}\n\n\tif t.esClient != nil {\n\t\tt.esClient.Close()\n\t}\n\n\ttestutils.StopElasticsearch(t.elasticsearchName, t.elasticsearchDir)\n\n\tif t.mockCitadelQueryServer != nil {\n\t\tt.mockCitadelQueryServer.Stop()\n\t\tt.mockCitadelQueryServer = nil\n\t}\n\n\tif t.evtsMgr != nil {\n\t\tt.evtsMgr.Stop()\n\t\tt.evtsMgr = nil\n\t}\n\n\tt.evtProxyServices.Stop()\n\n\tif t.apiServer != nil {\n\t\tt.apiServer.Stop()\n\t\tt.apiServer = nil\n\t}\n\n\t// stop certificate server\n\ttestutils.CleanupIntegTLSProvider()\n\n\tif t.mockResolver != nil {\n\t\tt.mockResolver.Stop()\n\t\tt.mockResolver = nil\n\t}\n\n\t// remove the local persistent events store\n\tt.logger.Infof(\"removing events store %s\", t.storeConfig.Dir)\n\tos.RemoveAll(t.storeConfig.Dir)\n\n\tt.logger.Infof(\"completed test\")\n}", "func (_m *FileRepository) DeleteAll(ids []string) error {\n\tret := _m.Called(ids)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func([]string) error); ok {\n\t\tr0 = rf(ids)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestEventsMgrWithElasticRestart(t *testing.T) {\n\tti := tInfo{dedupInterval: 300 * time.Second, batchInterval: 100 * time.Millisecond}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\tnumRecorders := 3\n\n\tstopEventRecorders := make(chan struct{})\n\twg := new(sync.WaitGroup)\n\twg.Add(numRecorders + 1) // +1 for elastic restart go routine\n\n\t// uuid to make each source unique\n\tcomponentID := uuid.NewV4().String()\n\ttotalEventsSentBySrc := make([]int, numRecorders)\n\n\t// create recorder events directory\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\tfor i := 0; i < numRecorders; i++ {\n\t\tgo func(i int) {\n\t\t\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\t\t\tComponent: fmt.Sprintf(\"%v-%v\", componentID, i),\n\t\t\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\t\t\tBackupDir: recorderEventsDir}, ti.logger)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to create recorder for source %v\", i)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tti.recorders.Lock()\n\t\t\tti.recorders.list = append(ti.recorders.list, evtsRecorder)\n\t\t\tti.recorders.Unlock()\n\n\t\t\tticker := time.NewTicker(100 * time.Millisecond)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stopEventRecorders:\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test event - 1\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_UNRESPONSIVE, \"test event - 2\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_STOPPED, \"test event - 3\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// restart elasticsearch multiple times\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tfor i := 0; i < 3; i++ {\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t\ttestutils.StopElasticsearch(ti.elasticsearchName, ti.elasticsearchDir)\n\t\t\tti.removeResolverEntry(globals.ElasticSearch, ti.elasticsearchAddr)\n\n\t\t\t// let elasticsearch come up on the same port as before.\n\t\t\t// so, wait for the port to become available\n\t\t\tAssertEventually(t,\n\t\t\t\tfunc() (bool, interface{}) {\n\t\t\t\t\ttemp := strings.Split(ti.elasticsearchAddr, \":\")\n\t\t\t\t\tif len(temp) != 2 {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"invalid elastic addr: %v\", ti.elasticsearchAddr)\n\t\t\t\t\t}\n\n\t\t\t\t\tport, err := strconv.Atoi(temp[1])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"invalid elastic port: %v\", temp[1])\n\n\t\t\t\t\t}\n\t\t\t\t\tif getAvailablePort(port, port) == port {\n\t\t\t\t\t\treturn true, nil\n\t\t\t\t\t}\n\n\t\t\t\t\treturn false, fmt.Sprintf(\"elastic port not yet available\")\n\t\t\t\t}, \"port not available to start elasticsearch\", \"50ms\", \"5s\")\n\t\t\tti.elasticsearchAddr, ti.elasticsearchDir, err = testutils.StartElasticsearch(ti.elasticsearchName, ti.elasticsearchDir, ti.signer, ti.trustRoots)\n\t\t\tAssertOk(t, err, \"failed to start elasticsearch, err: %v\", err)\n\t\t\tti.updateResolver(globals.ElasticSearch, ti.elasticsearchAddr)\n\t\t}\n\n\t\ttime.Sleep(5 * time.Second)\n\t\tclose(stopEventRecorders) // stop all the recorders\n\t}()\n\n\twg.Wait()\n\n\t//total events sent by all the recorders\n\ttotalEventsSent := 0\n\tfor _, val := range totalEventsSentBySrc {\n\t\ttotalEventsSent += val\n\t}\n\n\t// total number of events received at elastic should match the total events sent\n\t// query all the events received from this source.component\n\tquery := es.NewRegexpQuery(\"source.component.keyword\", fmt.Sprintf(\"%v-.*\", componentID))\n\tti.assertElasticUniqueEvents(t, query, true, 3*numRecorders, \"120s\")\n\tti.assertElasticTotalEvents(t, query, false, totalEventsSent, \"120s\")\n\tAssert(t, ti.esClient.GetResetCount() > 0, \"client should have restarted\")\n}", "func (m *MockOperation) RemoveHandler(arg0 *client.EventTarget) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveHandler\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestDeleteHandler(t *testing.T) {\n\n // ...\n\n}", "func (m *MockCompany) Delete(arg0 ...repository.Filter) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range arg0 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Delete\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockrepoProvider) DeleteAll(ctx context.Context) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteAll\", ctx)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockDBStorage) DeleteCallback(arg0, arg1 string) (sql.Result, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteCallback\", arg0, arg1)\n\tret0, _ := ret[0].(sql.Result)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockResponseHandler) TargetList() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"TargetList\")\n}", "func TestDedupNotifications(t *testing.T) {\n\tt.Parallel()\n\n\tsql := `\n\tinsert into users (id, name, email) \n\tvalues \n\t\t({{uuid \"user\"}}, 'bob', 'joe');\n\tinsert into user_contact_methods (id, user_id, name, type, value) \n\tvalues\n\t\t({{uuid \"cm1\"}}, {{uuid \"user\"}}, 'personal', 'SMS', {{phone \"1\"}});\n\n\tinsert into user_notification_rules (user_id, contact_method_id, delay_minutes) \n\tvalues\n\t\t({{uuid \"user\"}}, {{uuid \"cm1\"}}, 1),\n\t\t({{uuid \"user\"}}, {{uuid \"cm1\"}}, 2);\n\n\tinsert into escalation_policies (id, name) \n\tvalues\n\t\t({{uuid \"eid\"}}, 'esc policy');\n\tinsert into escalation_policy_steps (id, escalation_policy_id) \n\tvalues\n\t\t({{uuid \"esid\"}}, {{uuid \"eid\"}});\n\tinsert into escalation_policy_actions (escalation_policy_step_id, user_id) \n\tvalues \n\t\t({{uuid \"esid\"}}, {{uuid \"user\"}});\n\n\tinsert into services (id, escalation_policy_id, name) \n\tvalues\n\t\t({{uuid \"sid\"}}, {{uuid \"eid\"}}, 'service');\n\n\tinsert into alerts (service_id, description) \n\tvalues\n\t\t({{uuid \"sid\"}}, 'testing');\n\n`\n\n\th := harness.NewHarness(t, sql, \"escalation-policy-step-reorder\")\n\tdefer h.Close()\n\n\t// Test that after 3 minutes, only 1 notification is generated\n\th.FastForward(time.Minute * 3)\n\n\th.Twilio(t).Device(h.Phone(\"1\")).ExpectSMS(\"testing\")\n}", "func TestPeerManagerDeletePeer(t *testing.T) {\n\tseeds := []string {\"127.0.0.1:6000\",}\n\tmanager1 := CreatePeerManager(6000, 6001, nil, FullMode)\n\tmanager2 := CreatePeerManager(7000, 7001, seeds, FullMode)\n\tmanager3 := CreatePeerManager(8000, 8001, seeds, FullMode)\n\n\tclosePeer := func(t *testing.T) {\n\t\tmanager1.Stop() // Just close http service\n\t}\n\n\tallPeers := []string {\"127.0.0.1:7001\", \"127.0.0.1:8001\"}\n\tnoPeers := []string {}\n\tPeerManagerPropagationHelper(t, manager1, manager2, manager3, \n\t\tnoPeers, allPeers, allPeers, closePeer, 2*time.Second, 4*time.Second)\t\n\n}", "func testDeleteExamList(t *testing.T) {\n\tif err := MainService.DeleteExamList(1); err != nil {\n\t\tt.Error(err)\n\t}\n}", "func TestShutdown(t *testing.T) {\n\n\t// Create Mock ConsumerGroups To Register Close() Requests\n\tconsumerGroup1 := consumertesting.NewMockConsumerGroup()\n\tconsumerGroup2 := consumertesting.NewMockConsumerGroup()\n\tconsumerGroup3 := consumertesting.NewMockConsumerGroup()\n\n\t// Create Test Subscribers To Close The ConsumerGroups Of\n\tsubscriber1 := eventingduck.SubscriberSpec{UID: id123}\n\tsubscriber2 := eventingduck.SubscriberSpec{UID: id456}\n\tsubscriber3 := eventingduck.SubscriberSpec{UID: id789}\n\tgroupId1 := fmt.Sprintf(\"kafka.%s\", subscriber1.UID)\n\tgroupId2 := fmt.Sprintf(\"kafka.%s\", subscriber2.UID)\n\tgroupId3 := fmt.Sprintf(\"kafka.%s\", subscriber3.UID)\n\n\t// Create The Dispatcher To Test With Existing Subscribers\n\tdispatcher := &DispatcherImpl{\n\t\tDispatcherConfig: DispatcherConfig{\n\t\t\tLogger: logtesting.TestLogger(t).Desugar(),\n\t\t},\n\t\tsubscribers: map[types.UID]*SubscriberWrapper{\n\t\t\tsubscriber1.UID: NewSubscriberWrapper(subscriber1, groupId1, consumerGroup1),\n\t\t\tsubscriber2.UID: NewSubscriberWrapper(subscriber2, groupId2, consumerGroup2),\n\t\t\tsubscriber3.UID: NewSubscriberWrapper(subscriber3, groupId3, consumerGroup3),\n\t\t},\n\t}\n\n\t// Perform The Test\n\tdispatcher.Shutdown()\n\n\t// Verify The Results\n\tassert.True(t, consumerGroup1.Closed)\n\tassert.True(t, consumerGroup2.Closed)\n\tassert.True(t, consumerGroup3.Closed)\n\tassert.Len(t, dispatcher.subscribers, 0)\n\n\t// Verify that calling Shutdown a second time does not cause a panic\n\tdispatcher.Shutdown()\n}", "func deleteTest(t *testing.T, creds client.Credentials, deleteData []testDeleteData) {\n\tserver := setup(t, testMultiUserFilenameJSON)\n\tdefer teardown(t, server)\n\ttokenCookie := login(t, server, creds)\n\tdefer logout(t, server, tokenCookie)\n\n\tfor _, d := range deleteData {\n\t\tt.Run(fmt.Sprintf(\"ID-%s\", d.id),\n\t\t\tfunc(t *testing.T) {\n\t\t\t\trequest := httptest.NewRequest(http.MethodDelete, \"http://user/Delete/\"+d.id, nil)\n\t\t\t\trequest.AddCookie(tokenCookie)\n\t\t\t\tresponse := httptest.NewRecorder()\n\t\t\t\tps := httprouter.Params{\n\t\t\t\t\thttprouter.Param{\n\t\t\t\t\t\tKey: \"id\",\n\t\t\t\t\t\tValue: d.id},\n\t\t\t\t}\n\t\t\t\tserver.DeleteUser(response, request, ps)\n\t\t\t\tassert.Equalf(t, d.expectedResponse, response.Code,\n\t\t\t\t\t\"%s attempted to delete user ID %s, expected '%s' got '%s'\", creds.Username, d.id,\n\t\t\t\t\thttp.StatusText(d.expectedResponse), http.StatusText(response.Code))\n\t\t\t})\n\t}\n}", "func (mock *GitModuleControllerMock) OnRemoveCalls() []struct {\n\tCtx context.Context\n\tName string\n\tSync v1.GitModuleHandler\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tName string\n\t\tSync v1.GitModuleHandler\n\t}\n\tlockGitModuleControllerMockOnRemove.RLock()\n\tcalls = mock.calls.OnRemove\n\tlockGitModuleControllerMockOnRemove.RUnlock()\n\treturn calls\n}", "func TestTrunkENI_DeleteCooledDownENIs_DeleteFailed(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\ttrunkENI, ec2APIHelper, _ := getMockHelperInstanceAndTrunkObject(ctrl)\n\tEniDetails1.deletionTimeStamp = time.Now().Add(-time.Second * 31)\n\tEniDetails2.deletionTimeStamp = time.Now().Add(-time.Second * 32)\n\ttrunkENI.usedVlanIds[VlanId1] = true\n\ttrunkENI.usedVlanIds[VlanId2] = true\n\n\ttrunkENI.deleteQueue = append(trunkENI.deleteQueue, EniDetails1, EniDetails2)\n\n\tgomock.InOrder(\n\t\tec2APIHelper.EXPECT().DeleteNetworkInterface(&EniDetails1.ID).Return(MockError).Times(MaxDeleteRetries),\n\t\tec2APIHelper.EXPECT().DeleteNetworkInterface(&EniDetails2.ID).Return(nil),\n\t)\n\n\ttrunkENI.DeleteCooledDownENIs()\n\tassert.Zero(t, len(trunkENI.deleteQueue))\n}", "func (m *MockVirtualMeshCertificateSigningRequestClient) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx context.Context, opts ...client.DeleteAllOfOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockStreamEventListener) OnDestroyStream() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnDestroyStream\")\n}", "func (m *MockEnvoyFilterSet) Delete(envoyFilter ezkube.ResourceId) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\", envoyFilter)\n}", "func (s) TestIgnoreResourceDeletionOnClient(t *testing.T) {\n\tserver1 := stubserver.StartTestService(t, nil)\n\tt.Cleanup(server1.Stop)\n\n\tserver2 := stubserver.StartTestService(t, nil)\n\tt.Cleanup(server2.Stop)\n\n\tinitialResourceOnServer := func(nodeID string) e2e.UpdateOptions {\n\t\treturn e2e.UpdateOptions{\n\t\t\tNodeID: nodeID,\n\t\t\tListeners: []*listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)},\n\t\t\tRoutes: []*routepb.RouteConfiguration{defaultRouteConfigWithTwoRoutes},\n\t\t\tClusters: []*clusterpb.Cluster{\n\t\t\t\te2e.DefaultCluster(cdsName1, edsName1, e2e.SecurityLevelNone),\n\t\t\t\te2e.DefaultCluster(cdsName2, edsName2, e2e.SecurityLevelNone),\n\t\t\t},\n\t\t\tEndpoints: []*endpointpb.ClusterLoadAssignment{\n\t\t\t\te2e.DefaultEndpoint(edsName1, \"localhost\", []uint32{testutils.ParsePort(t, server1.Address)}),\n\t\t\t\te2e.DefaultEndpoint(edsName2, \"localhost\", []uint32{testutils.ParsePort(t, server2.Address)}),\n\t\t\t},\n\t\t\tSkipValidation: true,\n\t\t}\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tupdateResource func(r *e2e.UpdateOptions)\n\t}{\n\t\t{\n\t\t\tname: \"listener\",\n\t\t\tupdateResource: func(r *e2e.UpdateOptions) {\n\t\t\t\tr.Listeners = nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"cluster\",\n\t\t\tupdateResource: func(r *e2e.UpdateOptions) {\n\t\t\t\tr.Clusters = nil\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s resource deletion ignored\", test.name), func(t *testing.T) {\n\t\t\ttestResourceDeletionIgnored(t, initialResourceOnServer, test.updateResource)\n\t\t})\n\t\tt.Run(fmt.Sprintf(\"%s resource deletion not ignored\", test.name), func(t *testing.T) {\n\t\t\ttestResourceDeletionNotIgnored(t, initialResourceOnServer, test.updateResource)\n\t\t})\n\t}\n}", "func (m *MockUserTokenService) RevokeAll(arg0 context.Context, arg1 *model.User, arg2 *sql.Tx) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RevokeAll\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestMultipleRegisterCalls(t *testing.T) {\n\tRegister(\"multiple-register-driver-1\")\n\trequire.PanicsWithError(t, \"Register called twice for driver multiple-register-driver-1\", func() {\n\t\tRegister(\"multiple-register-driver-1\")\n\t})\n\n\t// Should be no error.\n\tRegister(\"multiple-register-driver-2\")\n}", "func TestUpdateSubscriptionDeleteFilter(t *testing.T) {\n\trepository, mock := initTest(t)\n\n\t// subscription to update\n\tsubscription := models.Subscription{\n\t\tID: \"42\",\n\t\tCallbackURL: \"url\",\n\t\tCallbackType: models.HTTP,\n\t\tFilters: map[models.EventType]models.Filter{\n\t\t\tmodels.DirectoryBlockCommit: {Filtering: fmt.Sprintf(\"no change filtering\")},\n\t\t},\n\t}\n\tsubscriptionContext := &models.SubscriptionContext{\n\t\tSubscription: subscription,\n\t\tFailures: 0,\n\t}\n\n\tcolumns := []string{\"failures\", \"callback\", \"callback_type\", \"status\", \"info\", \"access_token\", \"username\", \"password\", \"event_type\", \"filtering\"}\n\tmock.ExpectQuery(`SELECT failures, callback, callback_type, status, info, access_token, username, password, event_type, filtering FROM subscriptions LEFT JOIN filters ON filters.subscription = subscriptions.id WHERE subscriptions.id = \\?`).\n\t\tWithArgs(subscription.ID).\n\t\tWillReturnRows(sqlmock.NewRows(columns).\n\t\t\tAddRow(subscriptionContext.Failures, \"url-change\", subscription.CallbackType, subscription.SubscriptionStatus, subscription.SubscriptionInfo, subscription.Credentials.AccessToken, subscription.Credentials.BasicAuthUsername, subscription.Credentials.BasicAuthPassword, models.DirectoryBlockCommit, \"no change filtering\").\n\t\t\tAddRow(subscriptionContext.Failures, \"url-change\", subscription.CallbackType, subscription.SubscriptionStatus, subscription.SubscriptionInfo, subscription.Credentials.AccessToken, subscription.Credentials.BasicAuthUsername, subscription.Credentials.BasicAuthPassword, models.ChainCommit, \"this will be deleted\"))\n\n\tmock.ExpectBegin()\n\tmock.ExpectExec(`UPDATE subscriptions`).WithArgs(subscriptionContext.Failures, subscription.CallbackURL, subscription.CallbackType, subscription.SubscriptionStatus, subscription.SubscriptionInfo, subscription.Credentials.AccessToken, subscription.Credentials.BasicAuthUsername, subscription.Credentials.BasicAuthPassword, subscription.ID).WillReturnResult(sqlmock.NewResult(42, 1))\n\tmock.ExpectExec(`DELETE FROM filters`).WithArgs(subscription.ID, models.ChainCommit).WillReturnResult(sqlmock.NewResult(42, 1))\n\tmock.ExpectCommit()\n\n\t// now we execute our method\n\tupdatedSubscriptionContext, err := repository.UpdateSubscription(subscriptionContext)\n\tif err != nil {\n\t\tt.Errorf(\"error was not expected creating subscription: %s\", err)\n\t}\n\n\tassertSubscription(t, subscriptionContext, updatedSubscriptionContext)\n\n\t// we make sure that all expectations were met\n\tif err := mock.ExpectationsWereMet(); err != nil {\n\t\tt.Errorf(\"there were unfulfilled expectations: %s\", err)\n\t}\n}", "func (mock *MultiClusterAppInterfaceMock) DeleteCalls() []struct {\n\tName string\n\tOptions *metav1.DeleteOptions\n} {\n\tvar calls []struct {\n\t\tName string\n\t\tOptions *metav1.DeleteOptions\n\t}\n\tlockMultiClusterAppInterfaceMockDelete.RLock()\n\tcalls = mock.calls.Delete\n\tlockMultiClusterAppInterfaceMockDelete.RUnlock()\n\treturn calls\n}", "func (m *MockVirtualMeshCertificateSigningRequestWriter) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx context.Context, opts ...client.DeleteAllOfOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestAddRemoveEventsOnCachedCollection(t *testing.T) {\n\ttbl := []struct {\n\t\tEventName string // Name of the event. Either add or remove.\n\t\tEventPayload string // Event payload (raw JSON)\n\t\tExpectedCollection string // Expected collection after event (raw JSON)\n\t}{\n\t\t{\"add\", `{\"idx\":0,\"value\":\"bar\"}`, `[\"bar\",\"foo\",42,true,null]`},\n\t\t{\"add\", `{\"idx\":1,\"value\":\"bar\"}`, `[\"foo\",\"bar\",42,true,null]`},\n\t\t{\"add\", `{\"idx\":4,\"value\":\"bar\"}`, `[\"foo\",42,true,null,\"bar\"]`},\n\t\t{\"remove\", `{\"idx\":0}`, `[42,true,null]`},\n\t\t{\"remove\", `{\"idx\":1}`, `[\"foo\",true,null]`},\n\t\t{\"remove\", `{\"idx\":3}`, `[\"foo\",42,true]`},\n\t}\n\n\tfor i, l := range tbl {\n\t\tfor sameClient := true; sameClient; sameClient = false {\n\t\t\trunNamedTest(t, fmt.Sprintf(\"#%d with the same client being %+v\", i+1, sameClient), func(s *Session) {\n\t\t\t\tvar creq *ClientRequest\n\n\t\t\t\tc := s.Connect()\n\t\t\t\tsubscribeToTestCollection(t, s, c)\n\n\t\t\t\t// Send event on collection and validate client event\n\t\t\t\ts.ResourceEvent(\"test.collection\", l.EventName, json.RawMessage(l.EventPayload))\n\t\t\t\tc.GetEvent(t).Equals(t, \"test.collection.\"+l.EventName, json.RawMessage(l.EventPayload))\n\n\t\t\t\tif sameClient {\n\t\t\t\t\tc.Request(\"unsubscribe.test.collection\", nil).GetResponse(t)\n\t\t\t\t\t// Subscribe a second time\n\t\t\t\t\tcreq = c.Request(\"subscribe.test.collection\", nil)\n\t\t\t\t} else {\n\t\t\t\t\tc2 := s.Connect()\n\t\t\t\t\t// Subscribe a second time\n\t\t\t\t\tcreq = c2.Request(\"subscribe.test.collection\", nil)\n\t\t\t\t}\n\n\t\t\t\t// Handle collection access request\n\t\t\t\ts.GetRequest(t).AssertSubject(t, \"access.test.collection\").RespondSuccess(json.RawMessage(`{\"get\":true}`))\n\n\t\t\t\t// Validate client response\n\t\t\t\tcreq.GetResponse(t).AssertResult(t, json.RawMessage(`{\"collections\":{\"test.collection\":`+l.ExpectedCollection+`}}`))\n\t\t\t})\n\t\t}\n\t}\n}", "func (mock *s3BatchDeleteClientMock) DeleteCalls() []struct {\n\tIn1 context.Context\n\tIn2 s3manager.BatchDeleteIterator\n} {\n\tvar calls []struct {\n\t\tIn1 context.Context\n\t\tIn2 s3manager.BatchDeleteIterator\n\t}\n\tlocks3BatchDeleteClientMockDelete.RLock()\n\tcalls = mock.calls.Delete\n\tlocks3BatchDeleteClientMockDelete.RUnlock()\n\treturn calls\n}", "func (m *MockRouterTx) DELETE(path string, handler interface{}, options ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{path, handler}\n\tfor _, a := range options {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"DELETE\", varargs...)\n}", "func (m *MockDBStorage) DeleteBatch(arg0, arg1 string) (sql.Result, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteBatch\", arg0, arg1)\n\tret0, _ := ret[0].(sql.Result)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *K8sClient) DeleteAllOf(_a0 context.Context, _a1 client.Object, _a2 ...client.DeleteAllOfOption) error {\n\t_va := make([]interface{}, len(_a2))\n\tfor _i := range _a2 {\n\t\t_va[_i] = _a2[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, client.Object, ...client.DeleteAllOfOption) error); ok {\n\t\tr0 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestTargetSlice(t *testing.T) {\n\n\tt1 := &target{version: 1}\n\tt2 := &target{version: 2}\n\tt3 := &target{version: 3}\n\n\tts := &targetSlice{}\n\n\tassertContains := func(want []*target) {\n\t\tt.Helper()\n\t\tgot := *ts\n\t\tif len(want) != len(got) {\n\t\t\tt.Fatalf(\"different slice lengths; want=%v got=%v\", want, got)\n\t\t}\n\n\t\tfor i := range want {\n\t\t\tfound := false\n\t\t\tfor j := range got {\n\t\t\t\tif want[i] == got[j] {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tt.Fatalf(\"could not find wanted element %d. \"+\n\t\t\t\t\t\"want=%v got=%v\", i, want, got)\n\t\t\t}\n\n\t\t}\n\t}\n\n\tts.add(t1)\n\tts.add(t2)\n\tts.add(t3)\n\tassertContains([]*target{t1, t2, t3})\n\n\tts.del(t2)\n\tassertContains([]*target{t1, t3})\n\n\tts.del(t1)\n\tassertContains([]*target{t3})\n\n\tts.del(t3)\n\tassertContains([]*target{})\n}", "func (m *ManagerMock) Delete(ctx context.Context, s *hub.Subscription) error {\n\targs := m.Called(ctx, s)\n\treturn args.Error(0)\n}", "func (m *MockService) DeleteServerAndStorages(ctx context.Context, r *request.DeleteServerAndStoragesRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteServerAndStorages\", ctx, r)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockTenantServicePluginRelationDao) DeleteALLRelationByServiceID(serviceID string) error {\n\tret := m.ctrl.Call(m, \"DeleteALLRelationByServiceID\", serviceID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestTrunkENI_DeleteAllBranchENIs(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\ttrunkENI, mockEC2APIHelper, _ := getMockHelperInstanceAndTrunkObject(ctrl)\n\ttrunkENI.uidToBranchENIMap[PodUID] = branchENIs1\n\ttrunkENI.uidToBranchENIMap[PodUID2] = branchENIs2\n\ttrunkENI.deleteQueue = append(trunkENI.deleteQueue, branchENIs1[0])\n\n\t// Since we added the same branch ENIs in the cool down queue and in the pod to eni map\n\tmockEC2APIHelper.EXPECT().DeleteNetworkInterface(&Branch1Id).Return(nil).Times(2)\n\tmockEC2APIHelper.EXPECT().DeleteNetworkInterface(&Branch2Id).Return(nil)\n\n\ttrunkENI.DeleteAllBranchENIs()\n}", "func (m *MockProc) OnSvcAllHostReplace(arg0 []*host.Host) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnSvcAllHostReplace\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}" ]
[ "0.6633282", "0.6609152", "0.6516178", "0.6433535", "0.6138981", "0.6003157", "0.5966249", "0.5939354", "0.5854609", "0.5794363", "0.5759522", "0.57487077", "0.57487077", "0.57487077", "0.57487077", "0.57487077", "0.57487077", "0.57487077", "0.5685879", "0.5616986", "0.56137586", "0.5586267", "0.55852944", "0.5581818", "0.5576148", "0.5563346", "0.5506811", "0.54973674", "0.543916", "0.5422404", "0.5416975", "0.54122233", "0.53818107", "0.53704923", "0.5367836", "0.53666", "0.5339708", "0.5338379", "0.5326883", "0.5310291", "0.52904963", "0.52802706", "0.52801865", "0.526927", "0.52580154", "0.52543247", "0.52528167", "0.5245838", "0.5245572", "0.52454525", "0.52357185", "0.5225621", "0.52215445", "0.52196276", "0.52154773", "0.52029115", "0.5180994", "0.5169958", "0.51564133", "0.5150726", "0.51501393", "0.5149277", "0.51325154", "0.5131372", "0.51264304", "0.51251566", "0.51218957", "0.5118915", "0.5114856", "0.5111264", "0.51055765", "0.50985336", "0.50977653", "0.50945014", "0.50829417", "0.5068327", "0.50673175", "0.5066921", "0.50663424", "0.5042519", "0.50415516", "0.5034705", "0.50325185", "0.5031762", "0.5031067", "0.5024825", "0.5024524", "0.5021516", "0.5019935", "0.50198245", "0.5009132", "0.5006431", "0.5005995", "0.49888068", "0.4988457", "0.49875635", "0.49861303", "0.498174", "0.4976851", "0.49658036" ]
0.8236407
0
DeleteMultiListeners indicates an expected call of DeleteMultiListeners
func (mr *MockLoadBalanceMockRecorder) DeleteMultiListeners(region, lbID, listeners interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMultiListeners", reflect.TypeOf((*MockLoadBalance)(nil).DeleteMultiListeners), region, lbID, listeners) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiListeners), region, lbID, listeners)\n}", "func TestMsgListenerMulti(t *testing.T) {\n\tml := newMsgListeners()\n\n\tcount := 0\n\tcids := testCids()\t// TODO: The wrong Directory type was being used for MapEntries.\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\n\t})\t// TODO: Исправления для OSX\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\t// TODO: Test emails 1\n\t})\n\tml.onMsgComplete(cids[1], func(err error) {\n\t\tcount++\n\t})\n\n\tml.fireMsgComplete(cids[0], nil)\n\trequire.Equal(t, 2, count)\n\n\tml.fireMsgComplete(cids[1], nil)\n\trequire.Equal(t, 3, count)\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiSegmentListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners)\n}", "func (m *MockLoadBalance) EnsureMultiListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (r *dsState) DeleteMulti(keys []*ds.Key, cb ds.DeleteMultiCB) error {\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\treturn r.run(r.c, func() error {\n\t\treturn r.rds.DeleteMulti(keys, cb)\n\t})\n}", "func TestActiveMultiEvent_Deactivate(t *testing.T) {\r\n\tnumber := 10\r\n\tvar events []*ActiveEvent\r\n\tvar mock []*mockUnixHelper\r\n\r\n\tfor i := 0; i < number; i++ {\r\n\t\tunixMock := &mockUnixHelper{}\r\n\t\tnewActive := &ActiveEvent{FileDescriptor: i, unix: unixMock}\r\n\t\tunixMock.On(\"close\", i).Return(nil).Once()\r\n\t\tevents = append(events, newActive)\r\n\t\tmock = append(mock, unixMock)\r\n\t}\r\n\r\n\tnewActiveMulti := ActiveMultiEvent{events: events}\r\n\tnewActiveMulti.Deactivate()\r\n\r\n\trequire.Nil(t, newActiveMulti.events)\r\n\tfor _, event := range events {\r\n\t\trequire.Nil(t, event)\r\n\t}\r\n\tfor _, m := range mock {\r\n\t\tm.AssertExpectations(t)\r\n\t}\r\n}", "func TestAddManyRemoveFew(t *testing.T) {\n\ttarget := teaser.New()\n\tmids := []string{}\n\tfor i := 0; i <= 10; i++ {\n\t\tmid := target.Add(fmt.Sprintf(\"msg%d\", i))\n\t\tmids = append(mids, mid)\n\t}\n\tif len(mids) < 10 {\n\t\tt.Fatal(\"broken test data, expected mids to have at least 9 elementes \")\n\t}\n\n\t{\n\t\tm5id := mids[5]\n\t\tdeleted := target.Delete(m5id)\n\t\tassertDeleted(m5id, deleted, t)\n\t\tassertIsNotInTheQueue(target, m5id, t)\n\n\t\tnewmids := mids[:5]\n\t\tnewmids = append(newmids, mids[6:]...)\n\t\tmids = newmids\n\t\twaitForMessages()\n\t\tassertTheQueueContainsIds(target, mids, t)\n\t\twaitForMessages()\n\t}\n\n\t{\n\t\tm5id := mids[5]\n\t\tdeleted := target.Delete(m5id)\n\t\tassertDeleted(m5id, deleted, t)\n\t\tassertIsNotInTheQueue(target, m5id, t)\n\t\tnmids := mids[:5]\n\t\tnmids = append(nmids, mids[6:]...)\n\t\tmids = nmids\n\t\twaitForMessages()\n\t\tassertTheQueueContainsIds(target, mids, t)\n\t}\n}", "func testAddDeleteMulti(tree T, t *testing.T) {\n\telem := \"test\"\n\telemII := \"testII\"\n\t//testDeleteSimple(NewQuadTree(0, d.width, 0, d.height, 10000), []interface{}{elem, elemII}, []interface{}{elem, elemII}, false, \"Simple Global Delete Take Two Of Two\", t)\n\ttestDeleteSimple(tree, []interface{}{elem, elemII}, []interface{}{elem, elemII}, true, \"Simple Exact Delete Take Two Of Two\", t)\n}", "func cleanup(listeners []net.Listener, wg *sync.WaitGroup) error {\n\tlog.Println(\"Cleaning up.....\")\n\tfor _, listener := range listeners {\n\t\tlog.Printf(\"closing server at address %s ....\", listener.Addr().String())\n\t\tif err := listener.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"closed server at address %s\", listener.Addr().String())\n\t\twg.Done()\n\t}\n\treturn nil\n}", "func (m *MonkeyWrench) DeleteMulti(table string, keys []spanner.Key) error {\n\t// Create a mutation for each value set we have.\n\tmutations := make([]*spanner.Mutation, 0, len(keys))\n\tfor _, key := range keys {\n\t\tmutations = append(mutations, spanner.Delete(table, key))\n\t}\n\n\t// Apply the mutations.\n\terr := m.applyMutations(mutations)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func ExampleELB_DeleteLoadBalancerListeners_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.DeleteLoadBalancerListenersInput{\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t\tLoadBalancerPorts: []*int64{\n\t\t\taws.Int64(80),\n\t\t},\n\t}\n\n\tresult, err := svc.DeleteLoadBalancerListeners(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (c *Client) DeleteMulti(ctx context.Context, ks []*Key) error {\n\tpbks, err := keys(ks).proto()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := pb.NewStoreClient(c.ClientConn).Delete(c.newContext(ctx), &pb.Keys{\n\t\tKeys: pbks,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn multiErrorFromRecordStatusProto(resp.Status)\n}", "func (suite *FIFOTestSuite) TestRemoveMultipleGRs() {\n\tvar (\n\t\twg sync.WaitGroup\n\t\ttotalElementsToEnqueue = 100\n\t\ttotalElementsToRemove = 90\n\t)\n\n\tfor i := 0; i < totalElementsToEnqueue; i++ {\n\t\tsuite.fifo.Enqueue(i)\n\t}\n\n\tfor i := 0; i < totalElementsToRemove; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := suite.fifo.Remove(1)\n\t\t\tsuite.NoError(err, \"Unexpected error during concurrent Remove(n)\")\n\t\t}()\n\t}\n\twg.Wait()\n\n\t// check len, should be == totalElementsToEnqueue - totalElementsToRemove\n\ttotalElementsAfterRemove := suite.fifo.GetLen()\n\tsuite.Equal(totalElementsToEnqueue-totalElementsToRemove, totalElementsAfterRemove, \"Total elements on list does not match with expected number\")\n\n\t// check current 2nd element (index 1) on the queue\n\tval, err := suite.fifo.Get(1)\n\tsuite.NoError(err, \"No error should be returned when getting an existent element\")\n\tsuite.Equalf(1+totalElementsToRemove, val, \"The expected value at position 1 (2nd element) should be: %v\", 1+totalElementsToRemove)\n}", "func (mr *MockMultiClusterRoleEventHandlerMockRecorder) DeleteMultiClusterRole(obj interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteMultiClusterRole\", reflect.TypeOf((*MockMultiClusterRoleEventHandler)(nil).DeleteMultiClusterRole), obj)\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (ps *Store) MultiDelete(ctx context.Context, entityMetadata datastore.EntityMetadata, entities []datastore.Entity) error {\n\t// TODO\n\treturn nil\n}", "func TestSplitListenersToDiffProtocol(t *testing.T) {\n\ttestListeners := []*networkextensionv1.Listener{\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8000,\n\t\t\t\tProtocol: \"HTTP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8001,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8002,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8003,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8004,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t}\n\tliGroup := splitListenersToDiffProtocol(testListeners)\n\tfor _, list := range liGroup {\n\t\tt.Logf(\"%+v\", list)\n\t\ttmpProtocol := make(map[string]struct{})\n\t\tfor _, li := range list {\n\t\t\ttmpProtocol[li.Spec.Protocol] = struct{}{}\n\t\t}\n\t\tif len(tmpProtocol) != 1 {\n\t\t\tt.Errorf(\"list %v contains more than one protocol %v\", list, tmpProtocol)\n\t\t}\n\t}\n}", "func (instance *DSInstance) DeleteMulti(ctx context.Context, keys []*datastore.Key) error {\n\terr := instance.client.DeleteMulti(ctx, keys)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"DSInstance.DeleteMulti\")\n\t}\n\treturn nil\n}", "func DeleteMulti(c appengine.Context, key []*Key) os.Error {\n\tif len(key) == 0 {\n\t\treturn nil\n\t}\n\tif err := multiValid(key); err != nil {\n\t\treturn err\n\t}\n\treq := &pb.DeleteRequest{\n\t\tKey: multiKeyToProto(c.FullyQualifiedAppID(), key),\n\t}\n\tres := &pb.DeleteResponse{}\n\treturn c.Call(\"datastore_v3\", \"Delete\", req, res)\n}", "func (ml *multiListener) Close() error {\n\tdefer ml.wg.Wait()\n\tif !ml.closed {\n\t\tclose(ml.closeCh)\n\t\tml.closed = true\n\t}\n\thasErr := false\n\tfor _, l := range ml.listeners {\n\t\terr := l.Close()\n\t\tif err != nil {\n\t\t\thasErr = true\n\t\t\tml.l.Error(context.Background(), errors.Wrap(err, \"close listener\"))\n\t\t}\n\t}\n\n\tif hasErr {\n\t\treturn errors.New(\"close listeners: one or more errors\")\n\t}\n\treturn nil\n}", "func TestPostgresReplicationEventQueue_DequeueMultiple(t *testing.T) {\n\tt.Parallel()\n\tdb := testdb.New(t)\n\tctx := testhelper.Context(t)\n\n\tqueue := PostgresReplicationEventQueue{db.DB}\n\n\teventType1 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: UpdateRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: nil,\n\t\t},\n\t}\n\n\teventType2 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: DeleteRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: nil,\n\t\t},\n\t}\n\n\teventType3 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: RenameRepo,\n\t\t\tRelativePath: \"/project/path-2\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"praefect\",\n\t\t\tParams: Params{\"RelativePath\": \"/project/path-2-renamed\"},\n\t\t},\n\t}\n\n\teventType4 := ReplicationEvent{\n\t\tJob: ReplicationJob{\n\t\t\tChange: UpdateRepo,\n\t\t\tRelativePath: \"/project/path-1\",\n\t\t\tTargetNodeStorage: \"gitaly-1\",\n\t\t\tSourceNodeStorage: \"gitaly-0\",\n\t\t\tVirtualStorage: \"backup\",\n\t\t},\n\t}\n\n\t// events to fill in the queue\n\tevents := []ReplicationEvent{eventType1, eventType1, eventType2, eventType1, eventType3, eventType4}\n\tfor i := range events {\n\t\tvar err error\n\t\tevents[i], err = queue.Enqueue(ctx, events[i])\n\t\trequire.NoError(t, err, \"failed to fill in event queue\")\n\t}\n\n\t// first request to deque\n\texpectedEvents1 := []ReplicationEvent{events[0], events[2], events[4]}\n\texpectedJobLocks1 := []JobLockRow{\n\t\t{JobID: events[0].ID, LockID: \"praefect|gitaly-1|/project/path-1\"},\n\t\t{JobID: events[2].ID, LockID: \"praefect|gitaly-1|/project/path-1\"},\n\t\t{JobID: events[4].ID, LockID: \"praefect|gitaly-1|/project/path-2\"},\n\t}\n\n\t// we expect only first two types of events by limiting count to 3\n\tdequeuedEvents1, err := queue.Dequeue(ctx, \"praefect\", \"gitaly-1\", 3)\n\trequire.NoError(t, err)\n\trequire.Len(t, dequeuedEvents1, len(expectedEvents1))\n\tfor i := range dequeuedEvents1 {\n\t\tdequeuedEvents1[i].UpdatedAt = nil // it is not possible to determine update_at value as it is generated on UPDATE in database\n\t\texpectedEvents1[i].State = JobStateInProgress\n\t\texpectedEvents1[i].Attempt--\n\t}\n\trequire.Equal(t, expectedEvents1, dequeuedEvents1)\n\n\trequireLocks(t, ctx, db, []LockRow{\n\t\t// there is only one single lock for all fetched events because of their 'repo' and 'target' combination\n\t\t{ID: \"praefect|gitaly-1|/project/path-1\", Acquired: true},\n\t\t{ID: \"praefect|gitaly-1|/project/path-2\", Acquired: true},\n\t\t{ID: \"backup|gitaly-1|/project/path-1\", Acquired: false},\n\t})\n\trequireJobLocks(t, ctx, db, expectedJobLocks1)\n\n\t// second request to deque\n\t// there must be only last event fetched from the queue\n\texpectedEvents2 := []ReplicationEvent{events[5]}\n\texpectedEvents2[0].State = JobStateInProgress\n\texpectedEvents2[0].Attempt = 2\n\n\texpectedJobLocks2 := []JobLockRow{{JobID: 6, LockID: \"backup|gitaly-1|/project/path-1\"}}\n\n\tdequeuedEvents2, err := queue.Dequeue(ctx, \"backup\", \"gitaly-1\", 100500)\n\trequire.NoError(t, err)\n\trequire.Len(t, dequeuedEvents2, 1, \"only one event must be fetched from the queue\")\n\n\tdequeuedEvents2[0].UpdatedAt = nil // it is not possible to determine update_at value as it is generated on UPDATE in database\n\trequire.Equal(t, expectedEvents2, dequeuedEvents2)\n\n\trequireLocks(t, ctx, db, []LockRow{\n\t\t{ID: \"praefect|gitaly-1|/project/path-1\", Acquired: true},\n\t\t{ID: \"praefect|gitaly-1|/project/path-2\", Acquired: true},\n\t\t{ID: \"backup|gitaly-1|/project/path-1\", Acquired: true},\n\t})\n\trequireJobLocks(t, ctx, db, append(expectedJobLocks1, expectedJobLocks2...))\n}", "func (m *MockMultiClusterRoleEventHandler) DeleteMultiClusterRole(obj *v1alpha1.MultiClusterRole) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiClusterRole\", obj)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (t *Transaction) DeleteMulti(keys []*datastore.Key) (err error) {\n\tt.deleteKeys(keys...)\n\treturn t.txn.DeleteMulti(keys)\n}", "func (setting *MongodbDatabaseCollectionThroughputSetting) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func TestAddTwoDeleteScond(t *testing.T) {\n\ttarget := teaser.New()\n\tm1id := target.Add(\"msg1\")\n\tm2id := target.Add(\"msg2\")\n\n\t{\n\t\tdeleted := target.Delete(m2id)\n\t\tassertDeleted(m2id, deleted, t)\n\t}\n\n\t{\n\t\tnextHash, m1vid := target.View()\n\t\tassertMessageId(m1id, m1vid, t)\n\t\tassertMessageHash(\"\", nextHash, t)\n\t}\n}", "func (l *Listeners) Delete(id xid.ID) {\n\n\tl.Lock()\n\tdelete(l.listeners, id)\n\tl.Unlock()\n}", "func DeleteMulti(c context.Context, key []*datastore.Key) error {\n\tl := len(key)\n\n\t// only split into batches if needed\n\tif l <= SizeDelete {\n\t\treturn datastore.DeleteMulti(c, key)\n\t}\n\n\tvar errs []error\n\tvar batch []*datastore.Key\n\n\tfor s, e := 0, 0; s < l; s += SizeDelete {\n\t\te = s + SizeDelete\n\t\tif e >= l {\n\t\t\te = l\n\t\t}\n\n\t\tbatch = key[s:e]\n\n\t\tif err := datastore.DeleteMulti(c, batch); err != nil {\n\t\t\tif me, ok := err.(appengine.MultiError); ok {\n\t\t\t\tif len(errs) == 0 { // lazy init\n\t\t\t\t\terrs = make([]error, s, l) // add nils for previous batches\n\t\t\t\t}\n\n\t\t\t\tfor i := range me {\n\t\t\t\t\terrs = append(errs, me[i])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if len(errs) > 0 { // no errors, but another batch had errors, so add nils\n\t\t\tfor _ = range batch {\n\t\t\t\terrs = append(errs, nil)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn appengine.MultiError(errs) // combined multi-error for the whole set\n\t}\n\treturn nil\n}", "func (mr *MockEventLoggerMockRecorder) AppendMulti(events ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AppendMulti\", reflect.TypeOf((*MockEventLogger)(nil).AppendMulti), events...)\n}", "func (subscription *NamespacesTopicsSubscription) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func TestCallbackAddRemove(t *testing.T) {\n\tr := New()\n\ti := r.CreateInput(1)\n\tc := r.CreateCompute1(i, func(v int) int { return v + 1 })\n\tvar observed1 []int\n\tcb1 := c.AddCallback(func(v int) {\n\t\tobserved1 = append(observed1, v)\n\t})\n\tvar observed2 []int\n\tc.AddCallback(func(v int) {\n\t\tobserved2 = append(observed2, v)\n\t})\n\ti.SetValue(2)\n\tif len(observed1) != 1 || observed1[0] != 3 {\n\t\tt.Fatalf(\"observed1 not properly called\")\n\t}\n\tif len(observed2) != 1 || observed2[0] != 3 {\n\t\tt.Fatalf(\"observed2 not properly called\")\n\t}\n\tc.RemoveCallback(cb1)\n\ti.SetValue(3)\n\tif len(observed1) != 1 {\n\t\tt.Fatalf(\"observed1 called after removal\")\n\t}\n\tif len(observed2) != 2 || observed2[1] != 4 {\n\t\tt.Fatalf(\"observed2 not properly called after first callback removal\")\n\t}\n}", "func (o *Subscriber) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tfor _, hook := range subscriberAfterDeleteHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func DeleteMulti(ctx context.Context, ekx []string) error {\n\tvar kx []*datastore.Key\n\tvar kpcx []*datastore.Key\n\tfor _, v := range ekx {\n\t\tk, err := datastore.DecodeKey(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkx = append(kx, k)\n\t\tkpcx2, err := pageContext.GetKeys(ctx, k)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, v2 := range kpcx2 {\n\t\t\tkpcx = append(kpcx, v2)\n\t\t}\n\t}\n\topts := new(datastore.TransactionOptions)\n\topts.XG = true\n\treturn datastore.RunInTransaction(ctx, func(ctx context.Context) (err1 error) {\n\t\terr1 = datastore.DeleteMulti(ctx, kpcx)\n\t\tif err1 != nil {\n\t\t\treturn\n\t\t}\n\t\terr1 = datastore.DeleteMulti(ctx, kx)\n\t\treturn\n\t}, opts)\n}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func TestToManyRemove(t *testing.T) {}", "func (mr *MockChefIngesterServerMockRecorder) ProcessMultipleNodeDeletes(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ProcessMultipleNodeDeletes\", reflect.TypeOf((*MockChefIngesterServer)(nil).ProcessMultipleNodeDeletes), arg0, arg1)\n}", "func (policy *ServersConnectionPolicy) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func TestMultiRangeEmptyAfterTruncate(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\ts, db := setupMultipleRanges(t, \"c\", \"d\")\n\tdefer s.Stop()\n\n\t// Delete the keys within a transaction. Implicitly, the intents are\n\t// resolved via ResolveIntentRange upon completion.\n\tif err := db.Txn(func(txn *client.Txn) error {\n\t\tb := &client.Batch{}\n\t\tb.DelRange(\"a\", \"b\")\n\t\tb.DelRange(\"e\", \"f\")\n\t\tb.DelRange(keys.LocalMax, roachpb.KeyMax)\n\t\treturn txn.CommitInBatch(b)\n\t}); err != nil {\n\t\tt.Fatalf(\"unexpected error on transactional DeleteRange: %s\", err)\n\t}\n}", "func TestConsulStateDriverWatchAllStateDelete(t *testing.T) {\n\tdriver := setupConsulDriver(t)\n\tcommonTestStateDriverWatchAllStateDelete(t, driver)\n}", "func (rule *NamespacesTopicsSubscriptionsRule) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func TestMultiRangeEmptyAfterTruncate(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\ts, _, _ := serverutils.StartServer(t, base.TestServerArgs{})\n\tdefer s.Stopper().Stop()\n\tdb := setupMultipleRanges(t, s, \"c\", \"d\")\n\n\t// Delete the keys within a transaction. The range [c,d) doesn't have\n\t// any active requests.\n\tif err := db.Txn(context.TODO(), func(txn *client.Txn) error {\n\t\tb := txn.NewBatch()\n\t\tb.DelRange(\"a\", \"b\", false)\n\t\tb.DelRange(\"e\", \"f\", false)\n\t\treturn txn.CommitInBatch(b)\n\t}); err != nil {\n\t\tt.Fatalf(\"unexpected error on transactional DeleteRange: %s\", err)\n\t}\n}", "func (api *API) TriggerPrototypesDeleteByIds(ids []string) (err error) {\n\ttriggerids1, err := api.TriggerPrototypesDeleteIDs((ids))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(triggerids1) != len(ids) {\n\t\terr = &ExpectedMore{len(ids), len(triggerids1)}\n\t}\n\treturn\n}", "func (h MultiStakingHooks) AfterValidatorRemoved(ctx sdk.Context, consAddr sdk.ConsAddress, valAddr sdk.ValAddress) {\n\tfor i := range h {\n\t\th[i].AfterValidatorRemoved(ctx, consAddr, valAddr)\n\t}\n}", "func (mr *MockEventLoggerMockRecorder) AppendCheckMulti(assumedVersion interface{}, events ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{assumedVersion}, events...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AppendCheckMulti\", reflect.TypeOf((*MockEventLogger)(nil).AppendCheckMulti), varargs...)\n}", "func (rule *NamespacesEventhubsAuthorizationRule) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func (mock *MailgunMock) DeleteWebhookCalls() []struct {\n\tKind string\n} {\n\tvar calls []struct {\n\t\tKind string\n\t}\n\tlockMailgunMockDeleteWebhook.RLock()\n\tcalls = mock.calls.DeleteWebhook\n\tlockMailgunMockDeleteWebhook.RUnlock()\n\treturn calls\n}", "func TestPodDeletionEvent(t *testing.T) {\n\tf := func(path cmp.Path) bool {\n\t\tswitch path.String() {\n\t\t// These fields change at runtime, so ignore it\n\t\tcase \"LastTimestamp\", \"FirstTimestamp\", \"ObjectMeta.Name\":\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tt.Run(\"emitPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Marking for deletion Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n\n\tt.Run(\"emitCancelPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitCancelPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Cancelling deletion of Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n}", "func (mr *MockSubjectRoleManagerMockRecorder) BulkDelete(roleType, system, subjectPKs interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"BulkDelete\", reflect.TypeOf((*MockSubjectRoleManager)(nil).BulkDelete), roleType, system, subjectPKs)\n}", "func (o SubscriberSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tif o == nil {\n\t\treturn 0, errors.New(\"models: no Subscriber slice provided for delete all\")\n\t}\n\n\tif len(o) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tif len(subscriberBeforeDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), subscriberPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := \"DELETE FROM `subscribers` WHERE \" +\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, subscriberPrimaryKeyColumns, len(o))\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args)\n\t}\n\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to delete all from subscriber slice\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by deleteall for subscribers\")\n\t}\n\n\tif len(subscriberAfterDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doAfterDeleteHooks(ctx, exec); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rowsAff, nil\n}", "func (database *SqlDatabase) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func (c *Cache) DeleteMulti(ctx context.Context, keys []string) error {\n\tif bypassFromContext(ctx) == BypassReadWriting {\n\t\treturn nil\n\t}\n\n\treturn c.storage.Delete(ctx, keys...)\n}", "func TestDeleteExperimentsV1_MultiUser(t *testing.T) {\n\tviper.Set(common.MultiUserMode, \"true\")\n\tdefer viper.Set(common.MultiUserMode, \"false\")\n\tmd := metadata.New(map[string]string{common.GoogleIAPUserIdentityHeader: common.GoogleIAPUserIdentityPrefix + \"[email protected]\"})\n\tctx := metadata.NewIncomingContext(context.Background(), md)\n\n\tclientManager := resource.NewFakeClientManagerOrFatal(util.NewFakeTimeForEpoch())\n\tresourceManager := resource.NewResourceManager(clientManager)\n\tserver := ExperimentServer{resourceManager: resourceManager, options: &ExperimentServerOptions{CollectMetrics: false}}\n\tresourceReferences := []*apiv1beta1.ResourceReference{\n\t\t{\n\t\t\tKey: &apiv1beta1.ResourceKey{Type: apiv1beta1.ResourceType_NAMESPACE, Id: \"ns1\"},\n\t\t\tRelationship: apiv1beta1.Relationship_OWNER,\n\t\t},\n\t}\n\texperiment := &apiv1beta1.Experiment{\n\t\tName: \"ex1\",\n\t\tDescription: \"first experiment\",\n\t\tResourceReferences: resourceReferences,\n\t}\n\tresultExperiment, err := server.CreateExperimentV1(ctx, &apiv1beta1.CreateExperimentRequest{Experiment: experiment})\n\tassert.Nil(t, err)\n\n\t_, err = server.DeleteExperimentV1(ctx, &apiv1beta1.DeleteExperimentRequest{Id: \"ex2\"})\n\tassert.NotNil(t, err)\n\tassert.Contains(t, err.Error(), \"not found\")\n\n\t_, err = server.DeleteExperimentV1(ctx, &apiv1beta1.DeleteExperimentRequest{Id: resultExperiment.Id})\n\tassert.Nil(t, err)\n}", "func (c *FakeListeners) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {\n\taction := testing.NewDeleteCollectionAction(listenersResource, c.ns, listOpts)\n\n\t_, err := c.Fake.Invokes(action, &networkextensionv1.ListenerList{})\n\treturn err\n}", "func TestOnlyCallOnceOnMultipleDepChanges(t *testing.T) {\n\tr := New()\n\ti := r.CreateInput(1)\n\tc1 := r.CreateCompute1(i, func(v int) int { return v + 1 })\n\tc2 := r.CreateCompute1(i, func(v int) int { return v - 1 })\n\tc3 := r.CreateCompute1(c2, func(v int) int { return v - 1 })\n\tc4 := r.CreateCompute2(c1, c3, func(v1, v3 int) int { return v1 * v3 })\n\tchanged4 := 0\n\tc4.AddCallback(func(int) { changed4++ })\n\ti.SetValue(3)\n\tif changed4 < 1 {\n\t\tt.Fatalf(\"callback function was not called\")\n\t} else if changed4 > 1 {\n\t\tt.Fatalf(\"callback function was called too often\")\n\t}\n}", "func TestDeleteExperiments_MultiUser(t *testing.T) {\n\tviper.Set(common.MultiUserMode, \"true\")\n\tdefer viper.Set(common.MultiUserMode, \"false\")\n\tmd := metadata.New(map[string]string{common.GoogleIAPUserIdentityHeader: common.GoogleIAPUserIdentityPrefix + \"[email protected]\"})\n\tctx := metadata.NewIncomingContext(context.Background(), md)\n\n\tclientManager := resource.NewFakeClientManagerOrFatal(util.NewFakeTimeForEpoch())\n\tresourceManager := resource.NewResourceManager(clientManager)\n\tserver := ExperimentServer{resourceManager: resourceManager, options: &ExperimentServerOptions{CollectMetrics: false}}\n\texperiment := &apiV2beta1.Experiment{DisplayName: \"ex1\", Description: \"first experiment\", Namespace: \"ns1\"}\n\tresultExperiment, err := server.CreateExperiment(ctx, &apiV2beta1.CreateExperimentRequest{Experiment: experiment})\n\tassert.Nil(t, err)\n\n\t_, err = server.DeleteExperiment(ctx, &apiV2beta1.DeleteExperimentRequest{ExperimentId: \"ex2\"})\n\tassert.NotNil(t, err)\n\tassert.Contains(t, err.Error(), \"not found\")\n\n\t_, err = server.DeleteExperiment(ctx, &apiV2beta1.DeleteExperimentRequest{ExperimentId: resultExperiment.ExperimentId})\n\tassert.Nil(t, err)\n}", "func (o *InstrumentClass) doAfterDeleteHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range instrumentClassAfterDeleteHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (g *GCache) DelMulti(keys []string) error {\n\tfor _, key := range keys {\n\t\tg.db.Remove(key)\n\t}\n\treturn nil\n}", "func (m *MockEventDao) UnfinishedEvents(target, targetID string, optTypes ...string) ([]*model.ServiceEvent, error) {\n\tvarargs := []interface{}{target, targetID}\n\tfor _, a := range optTypes {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"UnfinishedEvents\", varargs...)\n\tret0, _ := ret[0].([]*model.ServiceEvent)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (o *OauthClient) doAfterDeleteHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range oauthClientAfterDeleteHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *MockCallback) OnRemoveAll() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemoveAll\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func DeleteExamples(t *testing.T, db *mongo.Database) {\n\tcoll := db.Collection(\"inventory_delete\")\n\n\terr := coll.Drop(context.Background())\n\trequire.NoError(t, err)\n\n\t{\n\t\t// Start Example 55\n\t\tdocs := []interface{}{\n\t\t\tbson.D{\n\t\t\t\t{\"item\", \"journal\"},\n\t\t\t\t{\"qty\", 25},\n\t\t\t\t{\"size\", bson.D{\n\t\t\t\t\t{\"h\", 14},\n\t\t\t\t\t{\"w\", 21},\n\t\t\t\t\t{\"uom\", \"cm\"},\n\t\t\t\t}},\n\t\t\t\t{\"status\", \"A\"},\n\t\t\t},\n\t\t\tbson.D{\n\t\t\t\t{\"item\", \"notebook\"},\n\t\t\t\t{\"qty\", 50},\n\t\t\t\t{\"size\", bson.D{\n\t\t\t\t\t{\"h\", 8.5},\n\t\t\t\t\t{\"w\", 11},\n\t\t\t\t\t{\"uom\", \"in\"},\n\t\t\t\t}},\n\t\t\t\t{\"status\", \"P\"},\n\t\t\t},\n\t\t\tbson.D{\n\t\t\t\t{\"item\", \"paper\"},\n\t\t\t\t{\"qty\", 100},\n\t\t\t\t{\"size\", bson.D{\n\t\t\t\t\t{\"h\", 8.5},\n\t\t\t\t\t{\"w\", 11},\n\t\t\t\t\t{\"uom\", \"in\"},\n\t\t\t\t}},\n\t\t\t\t{\"status\", \"D\"},\n\t\t\t},\n\t\t\tbson.D{\n\t\t\t\t{\"item\", \"planner\"},\n\t\t\t\t{\"qty\", 75},\n\t\t\t\t{\"size\", bson.D{\n\t\t\t\t\t{\"h\", 22.85},\n\t\t\t\t\t{\"w\", 30},\n\t\t\t\t\t{\"uom\", \"cm\"},\n\t\t\t\t}},\n\t\t\t\t{\"status\", \"D\"},\n\t\t\t},\n\t\t\tbson.D{\n\t\t\t\t{\"item\", \"postcard\"},\n\t\t\t\t{\"qty\", 45},\n\t\t\t\t{\"size\", bson.D{\n\t\t\t\t\t{\"h\", 10},\n\t\t\t\t\t{\"w\", 15.25},\n\t\t\t\t\t{\"uom\", \"cm\"},\n\t\t\t\t}},\n\t\t\t\t{\"status\", \"A\"},\n\t\t\t},\n\t\t}\n\n\t\tresult, err := coll.InsertMany(context.Background(), docs)\n\n\t\t// End Example 55\n\n\t\trequire.NoError(t, err)\n\t\trequire.Len(t, result.InsertedIDs, 5)\n\t}\n\n\t{\n\t\t// Start Example 57\n\n\t\tresult, err := coll.DeleteMany(\n\t\t\tcontext.Background(),\n\t\t\tbson.D{\n\t\t\t\t{\"status\", \"A\"},\n\t\t\t},\n\t\t)\n\n\t\t// End Example 57\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(2), result.DeletedCount)\n\t}\n\n\t{\n\t\t// Start Example 58\n\n\t\tresult, err := coll.DeleteOne(\n\t\t\tcontext.Background(),\n\t\t\tbson.D{\n\t\t\t\t{\"status\", \"D\"},\n\t\t\t},\n\t\t)\n\n\t\t// End Example 58\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(1), result.DeletedCount)\n\n\t}\n\n\t{\n\t\t// Start Example 56\n\n\t\tresult, err := coll.DeleteMany(context.Background(), bson.D{})\n\n\t\t// End Example 56\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(2), result.DeletedCount)\n\t}\n}", "func TestMultipleRegisterCalls(t *testing.T) {\n\tRegister(\"multiple-register-driver-1\")\n\trequire.PanicsWithError(t, \"Register called twice for driver multiple-register-driver-1\", func() {\n\t\tRegister(\"multiple-register-driver-1\")\n\t})\n\n\t// Should be no error.\n\tRegister(\"multiple-register-driver-2\")\n}", "func (m *MockSubjectRoleManager) BulkDelete(roleType, system string, subjectPKs []int64) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BulkDelete\", roleType, system, subjectPKs)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func DeleteMultiHop(exec executor.Executor, execnet, tonet, routingTable string, ref map[string]NetworkSettings) error {\n\tlocalNetGW, ok := ref[execnet]\n\tif !ok {\n\t\treturn fmt.Errorf(\"network %s not found in %v\", execnet, ref)\n\t}\n\n\texternalNet, ok := ref[tonet]\n\tif !ok {\n\t\treturn fmt.Errorf(\"network %s not found in %v\", tonet, ref)\n\t}\n\n\terr := routes.Delete(exec, fmt.Sprintf(\"%s/%d\", externalNet.IPAddress, externalNet.IPPrefixLen), localNetGW.IPAddress, routingTable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = routes.Delete(exec, fmt.Sprintf(\"%s/%d\", externalNet.GlobalIPv6Address, externalNet.GlobalIPv6PrefixLen), localNetGW.GlobalIPv6Address, routingTable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (mr *MockChefIngesterClientMockRecorder) ProcessMultipleNodeDeletes(ctx, in interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, in}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ProcessMultipleNodeDeletes\", reflect.TypeOf((*MockChefIngesterClient)(nil).ProcessMultipleNodeDeletes), varargs...)\n}", "func (o *Failure) doAfterDeleteHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range failureAfterDeleteHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (mock *MultiClusterAppInterfaceMock) DeleteCalls() []struct {\n\tName string\n\tOptions *metav1.DeleteOptions\n} {\n\tvar calls []struct {\n\t\tName string\n\t\tOptions *metav1.DeleteOptions\n\t}\n\tlockMultiClusterAppInterfaceMockDelete.RLock()\n\tcalls = mock.calls.Delete\n\tlockMultiClusterAppInterfaceMockDelete.RUnlock()\n\treturn calls\n}", "func (machine *VirtualMachine) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func (_m *OAuth20Service) DeleteMultipleClientCredentials(ctx context.Context, auths []model.SystemAuth) error {\n\tret := _m.Called(ctx, auths)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, []model.SystemAuth) error); ok {\n\t\tr0 = rf(ctx, auths)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (mock *GlobalRoleBindingInterfaceMock) DeleteCalls() []struct {\n\tName string\n\tOptions *metav1.DeleteOptions\n} {\n\tvar calls []struct {\n\t\tName string\n\t\tOptions *metav1.DeleteOptions\n\t}\n\tlockGlobalRoleBindingInterfaceMockDelete.RLock()\n\tcalls = mock.calls.Delete\n\tlockGlobalRoleBindingInterfaceMockDelete.RUnlock()\n\treturn calls\n}", "func (o *Notification) doAfterDeleteHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range notificationAfterDeleteHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (mock *GlobalRoleBindingInterfaceMock) DeleteCollectionCalls() []struct {\n\tDeleteOpts *metav1.DeleteOptions\n\tListOpts metav1.ListOptions\n} {\n\tvar calls []struct {\n\t\tDeleteOpts *metav1.DeleteOptions\n\t\tListOpts metav1.ListOptions\n\t}\n\tlockGlobalRoleBindingInterfaceMockDeleteCollection.RLock()\n\tcalls = mock.calls.DeleteCollection\n\tlockGlobalRoleBindingInterfaceMockDeleteCollection.RUnlock()\n\treturn calls\n}", "func (mock *MultiClusterAppInterfaceMock) DeleteCollectionCalls() []struct {\n\tDeleteOpts *metav1.DeleteOptions\n\tListOpts metav1.ListOptions\n} {\n\tvar calls []struct {\n\t\tDeleteOpts *metav1.DeleteOptions\n\t\tListOpts metav1.ListOptions\n\t}\n\tlockMultiClusterAppInterfaceMockDeleteCollection.RLock()\n\tcalls = mock.calls.DeleteCollection\n\tlockMultiClusterAppInterfaceMockDeleteCollection.RUnlock()\n\treturn calls\n}", "func (o *Latency) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range latencyAfterDeleteHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (credential *FederatedIdentityCredential) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func (l *Listener) delete(rOpts *ReconcileOptions) error {\n\tin := elbv2.DeleteListenerInput{\n\t\tListenerArn: l.CurrentListener.ListenerArn,\n\t}\n\n\tif err := awsutil.ALBsvc.RemoveListener(in); err != nil {\n\t\trOpts.Eventf(api.EventTypeWarning, \"ERROR\", \"Error deleting %v listener: %s\", *l.CurrentListener.Port, err.Error())\n\t\tl.logger.Errorf(\"Failed Listener deletion. ARN: %s: %s\",\n\t\t\t*l.CurrentListener.ListenerArn, err.Error())\n\t\treturn err\n\t}\n\n\tl.deleted = true\n\treturn nil\n}", "func TestMetricDeleted(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\thelper.preregisterAgent(t)\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// list of active metrics,\n\t// 2 query per metric to register (one to find potential inactive, one to register)\n\t// + 1 to register agent_status\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 8)\n\n\tmetrics := helper.MetricsFromAPI()\n\tif len(metrics) != 4 { // 3 + agent_status\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\thelper.AddTime(90 * time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t})\n\n\t// API deleted metric1\n\tfor _, m := range metrics {\n\t\tif m.Name == \"metric1\" {\n\t\t\thelper.api.resources[mockAPIResourceMetric].DelStore(m.ID)\n\t\t}\n\t}\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.AddTime(1 * time.Minute)\n\n\t// metric1 is still alive\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We list active metrics, 2 query to re-register metric\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 3)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 { // 3 + agent_status\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\thelper.s.nextFullSync = helper.Now().Add(2 * time.Hour)\n\thelper.AddTime(90 * time.Minute)\n\n\t// API deleted metric2\n\tfor _, m := range metrics {\n\t\tif m.Name == \"metric2\" {\n\t\t\thelper.api.resources[mockAPIResourceMetric].DelStore(m.ID)\n\t\t}\n\t}\n\n\t// all metrics are inactive\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 3 {\n\t\tt.Errorf(\"len(metrics) = %d, want 3\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif m.DeactivatedAt.IsZero() && m.Name != agentStatusName {\n\t\t\tt.Errorf(\"%v should be deactivated\", m)\n\n\t\t\tbreak\n\t\t} else if !m.DeactivatedAt.IsZero() && m.Name == agentStatusName {\n\t\t\tt.Errorf(\"%v should not be deactivated\", m)\n\t\t}\n\t}\n\n\thelper.AddTime(1 * time.Minute)\n\n\t// API deleted metric3\n\tfor _, m := range metrics {\n\t\tif m.Name == \"metric3\" {\n\t\t\thelper.api.resources[mockAPIResourceMetric].DelStore(m.ID)\n\t\t}\n\t}\n\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t})\n\n\thelper.s.forceSync[syncMethodMetric] = true\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 {\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif !m.DeactivatedAt.IsZero() {\n\t\t\tt.Errorf(\"%v should not be deactivated\", m)\n\t\t}\n\t}\n}", "func Test_DeleteInvalidConsumers(t *testing.T) {\n\t// pre-requisites\n\tjsBackend := &JetStream{\n\t\tcleaner: &cleaner.JetStreamCleaner{},\n\t}\n\n\tsubs := NewSubscriptionsWithMultipleTypes()\n\tgivenConsumers := NewConsumers(subs, jsBackend)\n\n\tdanglingConsumer := &nats.ConsumerInfo{\n\t\tName: \"dangling-invalid-consumer\",\n\t\tConfig: nats.ConsumerConfig{MaxAckPending: DefaultMaxInFlights},\n\t\tPushBound: false,\n\t}\n\t// add a dangling consumer which should be deleted\n\tgivenConsumersWithDangling := givenConsumers\n\tgivenConsumersWithDangling = append(givenConsumersWithDangling, danglingConsumer)\n\n\ttestCases := []struct {\n\t\tname string\n\t\tgivenSubscriptions []v1alpha2.Subscription\n\t\tjetStreamContext *jetStreamContextStub\n\t\twantConsumers []*nats.ConsumerInfo\n\t\twantError error\n\t}{\n\t\t{\n\t\t\tname: \"no consumer should be deleted\",\n\t\t\tgivenSubscriptions: subs,\n\t\t\tjetStreamContext: &jetStreamContextStub{\n\t\t\t\tconsumers: givenConsumers,\n\t\t\t},\n\t\t\twantConsumers: givenConsumers,\n\t\t\twantError: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"a dangling invalid consumer should be deleted\",\n\t\t\tgivenSubscriptions: subs,\n\t\t\tjetStreamContext: &jetStreamContextStub{\n\t\t\t\tconsumers: givenConsumersWithDangling,\n\t\t\t},\n\t\t\twantConsumers: givenConsumers,\n\t\t\twantError: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"no consumer should be deleted\",\n\t\t\tgivenSubscriptions: subs,\n\t\t\tjetStreamContext: &jetStreamContextStub{\n\t\t\t\tconsumers: givenConsumersWithDangling,\n\t\t\t\tdeleteConsumerErr: nats.ErrConnectionNotTLS,\n\t\t\t},\n\t\t\twantError: ErrDeleteConsumer,\n\t\t},\n\t\t{\n\t\t\tname: \"all consumers must be deleted if there is no subscription resource\",\n\t\t\tgivenSubscriptions: []v1alpha2.Subscription{},\n\t\t\tjetStreamContext: &jetStreamContextStub{\n\t\t\t\tconsumers: givenConsumers,\n\t\t\t},\n\t\t\twantConsumers: []*nats.ConsumerInfo{},\n\t\t\twantError: nil,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t// given\n\t\t\tjsBackend.jsCtx = tc.jetStreamContext\n\n\t\t\t// when\n\t\t\terr := jsBackend.DeleteInvalidConsumers(tc.givenSubscriptions)\n\n\t\t\t// then\n\t\t\tif tc.wantError != nil {\n\t\t\t\tassert.ErrorIs(t, err, tc.wantError)\n\t\t\t} else {\n\t\t\t\tcons := jsBackend.jsCtx.Consumers(\"\")\n\t\t\t\tactualConsumers := []*nats.ConsumerInfo{}\n\t\t\t\tfor con := range cons {\n\t\t\t\t\tactualConsumers = append(actualConsumers, con)\n\t\t\t\t}\n\t\t\t\tassert.Equal(t, len(tc.wantConsumers), len(actualConsumers))\n\t\t\t\tassert.Equal(t, tc.wantConsumers, actualConsumers)\n\t\t\t}\n\t\t})\n\t}\n}", "func (o *RecordMeasure) doAfterDeleteHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range recordMeasureAfterDeleteHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *MockProvider) OnEndpointsDelete(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsDelete\", arg0)\n}", "func (o FailureSlice) DeleteAll(exec boil.Executor) error {\n\tif o == nil {\n\t\treturn errors.New(\"models: no Failure slice provided for delete all\")\n\t}\n\n\tif len(o) == 0 {\n\t\treturn nil\n\t}\n\n\tif len(failureBeforeDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doBeforeDeleteHooks(exec); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), failurePrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := \"DELETE FROM `failure` WHERE \" +\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, failurePrimaryKeyColumns, len(o))\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args)\n\t}\n\n\t_, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"models: unable to delete all from failure slice\")\n\t}\n\n\tif len(failureAfterDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doAfterDeleteHooks(exec); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s) TestListenerResourceDeletionOnServerNotIgnored(t *testing.T) {\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources, lis := resourceWithListenerForGRPCServer(t, nodeID)\n\tupdateCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the listener to move to \"serving\" mode.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Test timed out waiting for a mode change update.\")\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeServing {\n\t\t\tt.Fatalf(\"Listener received new mode %v, want %v\", mode, connectivity.ServingModeServing)\n\t\t}\n\t}\n\n\t// Create a ClientConn and make a successful RPCs.\n\tcc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mgmtServer.Update(ctx, e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*listenerpb.Listener{}, // empty listener resource\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"timed out waiting for a mode change update: %v\", err)\n\tcase mode := <-updateCh:\n\t\tif mode != connectivity.ServingModeNotServing {\n\t\t\tt.Fatalf(\"listener received new mode %v, want %v\", mode, connectivity.ServingModeNotServing)\n\t\t}\n\t}\n}", "func (o *Subscriber) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tfor _, hook := range subscriberBeforeDeleteHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (store *ConfigurationStore) deleteValidations() []func() (admission.Warnings, error) {\n\treturn nil\n}", "func (o *Offer) doAfterDeleteHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range offerAfterDeleteHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestToManyRemove(t *testing.T) {\n\tt.Run(\"TeamToSubscribers\", testTeamToManyRemoveOpSubscribers)\n\tt.Run(\"UserToDeletedByPosts\", testUserToManyRemoveOpDeletedByPosts)\n\tt.Run(\"UserToUserCreateCodes\", testUserToManyRemoveOpUserCreateCodes)\n}", "func (s *BasejossListener) ExitDelCmdAllSel(ctx *DelCmdAllSelContext) {}", "func (e *ObservableEditableBuffer) deleted(q0, q1 OffsetTuple) {\n\te.treatasclean = false\n\tfor observer := range e.observers {\n\t\tobserver.Deleted(q0, q1)\n\t}\n}", "func TestAddRemoveEventsOnCachedCollection(t *testing.T) {\n\ttbl := []struct {\n\t\tEventName string // Name of the event. Either add or remove.\n\t\tEventPayload string // Event payload (raw JSON)\n\t\tExpectedCollection string // Expected collection after event (raw JSON)\n\t}{\n\t\t{\"add\", `{\"idx\":0,\"value\":\"bar\"}`, `[\"bar\",\"foo\",42,true,null]`},\n\t\t{\"add\", `{\"idx\":1,\"value\":\"bar\"}`, `[\"foo\",\"bar\",42,true,null]`},\n\t\t{\"add\", `{\"idx\":4,\"value\":\"bar\"}`, `[\"foo\",42,true,null,\"bar\"]`},\n\t\t{\"remove\", `{\"idx\":0}`, `[42,true,null]`},\n\t\t{\"remove\", `{\"idx\":1}`, `[\"foo\",true,null]`},\n\t\t{\"remove\", `{\"idx\":3}`, `[\"foo\",42,true]`},\n\t}\n\n\tfor i, l := range tbl {\n\t\tfor sameClient := true; sameClient; sameClient = false {\n\t\t\trunNamedTest(t, fmt.Sprintf(\"#%d with the same client being %+v\", i+1, sameClient), func(s *Session) {\n\t\t\t\tvar creq *ClientRequest\n\n\t\t\t\tc := s.Connect()\n\t\t\t\tsubscribeToTestCollection(t, s, c)\n\n\t\t\t\t// Send event on collection and validate client event\n\t\t\t\ts.ResourceEvent(\"test.collection\", l.EventName, json.RawMessage(l.EventPayload))\n\t\t\t\tc.GetEvent(t).Equals(t, \"test.collection.\"+l.EventName, json.RawMessage(l.EventPayload))\n\n\t\t\t\tif sameClient {\n\t\t\t\t\tc.Request(\"unsubscribe.test.collection\", nil).GetResponse(t)\n\t\t\t\t\t// Subscribe a second time\n\t\t\t\t\tcreq = c.Request(\"subscribe.test.collection\", nil)\n\t\t\t\t} else {\n\t\t\t\t\tc2 := s.Connect()\n\t\t\t\t\t// Subscribe a second time\n\t\t\t\t\tcreq = c2.Request(\"subscribe.test.collection\", nil)\n\t\t\t\t}\n\n\t\t\t\t// Handle collection access request\n\t\t\t\ts.GetRequest(t).AssertSubject(t, \"access.test.collection\").RespondSuccess(json.RawMessage(`{\"get\":true}`))\n\n\t\t\t\t// Validate client response\n\t\t\t\tcreq.GetResponse(t).AssertResult(t, json.RawMessage(`{\"collections\":{\"test.collection\":`+l.ExpectedCollection+`}}`))\n\t\t\t})\n\t\t}\n\t}\n}", "func (m *MockListener) Delete(listenerKey api.ListenerKey, checksum api.Checksum) error {\n\tret := m.ctrl.Call(m, \"Delete\", listenerKey, checksum)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockEventLogger) AppendCheckMulti(assumedVersion uint64, events ...eventlog.EventData) (uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{assumedVersion}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendCheckMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(time.Time)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func TestMultiRangeBoundedBatchDelRangeBoundary(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\ts, _, _ := serverutils.StartServer(t, base.TestServerArgs{})\n\tdefer s.Stopper().Stop()\n\n\tdb := setupMultipleRanges(t, s, \"a\", \"b\")\n\t// Check that a\n\tfor _, key := range []string{\"a1\", \"a2\", \"a3\", \"b1\", \"b2\"} {\n\t\tif err := db.Put(key, \"value\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tb := &client.Batch{}\n\tb.Header.MaxSpanRequestKeys = 3\n\tb.DelRange(\"a\", \"c\", true)\n\tif err := db.Run(b); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(b.Results) != 1 {\n\t\tt.Fatalf(\"%d results returned\", len(b.Results))\n\t}\n\tif string(b.Results[0].ResumeSpan.Key) != \"b\" || string(b.Results[0].ResumeSpan.EndKey) != \"c\" {\n\t\tt.Fatalf(\"received ResumeSpan %+v\", b.Results[0].ResumeSpan)\n\t}\n\n\tb = &client.Batch{}\n\tb.Header.MaxSpanRequestKeys = 1\n\tb.DelRange(\"b\", \"c\", true)\n\tif err := db.Run(b); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(b.Results) != 1 {\n\t\tt.Fatalf(\"%d results returned\", len(b.Results))\n\t}\n\tif string(b.Results[0].ResumeSpan.Key) != \"b2\" || string(b.Results[0].ResumeSpan.EndKey) != \"c\" {\n\t\tt.Fatalf(\"received ResumeSpan %+v\", b.Results[0].ResumeSpan)\n\t}\n}", "func (mr *MockProviderMockRecorder) OnEndpointsDelete(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"OnEndpointsDelete\", reflect.TypeOf((*MockProvider)(nil).OnEndpointsDelete), arg0)\n}", "func (mock *s3BatchDeleteClientMock) DeleteCalls() []struct {\n\tIn1 context.Context\n\tIn2 s3manager.BatchDeleteIterator\n} {\n\tvar calls []struct {\n\t\tIn1 context.Context\n\t\tIn2 s3manager.BatchDeleteIterator\n\t}\n\tlocks3BatchDeleteClientMockDelete.RLock()\n\tcalls = mock.calls.Delete\n\tlocks3BatchDeleteClientMockDelete.RUnlock()\n\treturn calls\n}", "func (mr *MockListenerMockRecorder) Delete(listenerKey, checksum interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Delete\", reflect.TypeOf((*MockListener)(nil).Delete), listenerKey, checksum)\n}" ]
[ "0.77888614", "0.6547149", "0.637997", "0.59574085", "0.5873279", "0.57735676", "0.57164836", "0.56791955", "0.5641537", "0.5567498", "0.55582744", "0.5517662", "0.55116504", "0.5494145", "0.5480422", "0.5468848", "0.5430449", "0.53740805", "0.5356894", "0.5351725", "0.53378564", "0.53337365", "0.5315189", "0.5244852", "0.5172958", "0.5127348", "0.5123859", "0.5109314", "0.50931054", "0.50757384", "0.5066906", "0.5051319", "0.5046756", "0.5045161", "0.5045161", "0.5045161", "0.5045161", "0.5045161", "0.5045161", "0.5045161", "0.5037885", "0.50301826", "0.50256926", "0.50253016", "0.5015984", "0.5004055", "0.50002843", "0.49898267", "0.49855033", "0.49816093", "0.4975975", "0.49714503", "0.4958707", "0.495699", "0.49507165", "0.49361593", "0.49273098", "0.49221945", "0.49105144", "0.4909651", "0.49015743", "0.4889271", "0.488753", "0.4886054", "0.4883632", "0.48787156", "0.48779187", "0.48769104", "0.48748654", "0.4869464", "0.486893", "0.48687398", "0.48619777", "0.48559105", "0.48550364", "0.48389456", "0.4837344", "0.48288482", "0.48285824", "0.48237705", "0.48215646", "0.48174787", "0.48100203", "0.48024285", "0.4798191", "0.47974923", "0.47972763", "0.47958824", "0.47813106", "0.477553", "0.47666922", "0.476454", "0.47624674", "0.47545347", "0.47543982", "0.47504732", "0.47419974", "0.4738145", "0.47361317", "0.47292686" ]
0.8045929
0
EnsureSegmentListener mocks base method
func (m *MockLoadBalance) EnsureSegmentListener(region string, listener *v1.Listener) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EnsureSegmentListener", region, listener) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) DeleteSegmentListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSegmentListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureSegmentListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureSegmentListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureSegmentListener), region, listener)\n}", "func TestSegmentDummy(t *testing.T) {\n\ttype tcase struct {\n\t\tline geom.Line\n\t}\n\n\tfn := func(t *testing.T, tc tcase) {\n\t\ts := NewSegment(tc.line)\n\t\tif s.GetStart().Equals(tc.line[0]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[0], s.GetStart())\n\t\t}\n\t\tif s.GetEnd().Equals(tc.line[1]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[1], s.GetEnd())\n\t\t}\n\t\tif s.GetLineSegment() != tc.line {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line, s.GetLineSegment())\n\t\t}\n\t}\n\ttestcases := []tcase{\n\t\t{\n\t\t\tline: geom.Line{{1, 2}, {3, 4}},\n\t\t},\n\t}\n\n\tfor i, tc := range testcases {\n\t\ttc := tc\n\t\tt.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) { fn(t, tc) })\n\t}\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func NewMockSegmentManager(t mockConstructorTestingTNewMockSegmentManager) *MockSegmentManager {\n\tmock := &MockSegmentManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (f Factory) TestGetSegmentOK(t *testing.T) {\n\tprocess := \"test\"\n\tparent, _ := f.Client.CreateMap(process, nil, \"test\")\n\n\tsegment, err := f.Client.GetSegment(process, parent.GetLinkHash())\n\tassert.NoError(t, err)\n\tassert.NotNil(t, segment)\n}", "func TestSegmentString(t *testing.T) {\n\ttestCases := []struct {\n\t\tinput Segment\n\t\twant string\n\t}{\n\t\t{\n\t\t\tinput: Segment{11, 13},\n\t\t\twant: \"[start: 11, end: 13]\",\n\t\t},\n\t\t{\n\t\t\tinput: Segment{313, 313},\n\t\t\twant: \"[start: 313, end: 313]\",\n\t\t},\n\t}\n\n\tfor _, test := range testCases {\n\t\tif got := test.input.String(); got != test.want {\n\t\t\tt.Errorf(\"s.String() = %s, should be %s\", got, test.want)\n\t\t}\n\t}\n}", "func (m *MockLoadBalance) EnsureListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockSegmentManager) Remove(segmentID int64, scope querypb.DataScope) {\n\t_m.Called(segmentID, scope)\n}", "func (h *halCtlSuite) TestL2SegmentGet(c *C) {\n\tvar err error\n\tvar resp string\n\treq := &halproto.L2SegmentGetRequest{\n\t\tKeyOrHandle: &halproto.L2SegmentKeyHandle{\n\t\t\tKeyOrHandle: &halproto.L2SegmentKeyHandle_SegmentId{\n\t\t\t\tSegmentId: uint64(1),\n\t\t\t},\n\t\t},\n\t}\n\tl2SegGetReqMsg := &halproto.L2SegmentGetRequestMsg{\n\t\tRequest: []*halproto.L2SegmentGetRequest{req},\n\t}\n\n\tAssertEventually(c, func() (bool, interface{}) {\n\t\tresp, err = h.getL2Segments(l2SegGetReqMsg)\n\t\treturn err == nil, nil\n\t}, \"Failed to get L2Segments\")\n\tAssertEquals(c, true, strings.Contains(resp, \"1 Mgmt\"), fmt.Sprintf(\"halctl returned: %v\", resp))\n\t//AssertEquals(c, true, strings.Contains(resp, \"segmentid: 1\"), fmt.Sprintf(\"halctl returned: %v\", resp))\n}", "func (_m *MockOptions) SegmentReaderPool() xio.SegmentReaderPool {\n\tret := _m.ctrl.Call(_m, \"SegmentReaderPool\")\n\tret0, _ := ret[0].(xio.SegmentReaderPool)\n\treturn ret0\n}", "func (c ClientFake) UpdateSegment(name, campaignID, segmentID string) (Segment, error) {\n\treturn Segment{}, nil\n}", "func (f Factory) TestGetSegmentNotFound(t *testing.T) {\n\tprocess := \"test\"\n\tfakeLinkHash, _ := types.NewBytes32FromString(\"0000000000000000000000000000000000000000000000000000000000000000\")\n\tsegment, err := f.Client.GetSegment(process, fakeLinkHash)\n\tassert.EqualError(t, err, \"Not Found\")\n\tassert.Nil(t, segment)\n}", "func (_m *MockSegmentManager) Put(segmentType commonpb.SegmentState, segments ...Segment) {\n\t_va := make([]interface{}, len(segments))\n\tfor _i := range segments {\n\t\t_va[_i] = segments[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, segmentType)\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}", "func (_m *MockOptions) SetSegmentReaderPool(value xio.SegmentReaderPool) Options {\n\tret := _m.ctrl.Call(_m, \"SetSegmentReaderPool\", value)\n\tret0, _ := ret[0].(Options)\n\treturn ret0\n}", "func (m *MockStreamEventListener) OnDestroyStream() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnDestroyStream\")\n}", "func (c ClientFake) CreateSegment(name, campaignID string) (Segment, error) {\n\treturn Segment{}, nil\n}", "func (m *MockStream) AddEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddEventListener\", streamEventListener)\n}", "func TestStorageProofSegment(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tcst, err := createConsensusSetTester(\"TestStorageProofSegment\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Add a file contract to the consensus set that can be used to probe the\n\t// storage segment.\n\tvar outputs []byte\n\tfor i := 0; i < 4*256*256; i++ {\n\t\tvar fcid types.FileContractID\n\t\trand.Read(fcid[:])\n\t\tfc := types.FileContract{\n\t\t\tWindowStart: 2,\n\t\t\tFileSize: 256 * 64,\n\t\t}\n\t\tcst.cs.fileContracts[fcid] = fc\n\t\tindex, err := cst.cs.storageProofSegment(fcid)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\toutputs = append(outputs, byte(index))\n\t}\n\n\t// Perform entropy testing on 'outputs' to verify randomness.\n\tvar b bytes.Buffer\n\tzip := gzip.NewWriter(&b)\n\t_, err = zip.Write(outputs)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tzip.Close()\n\tif b.Len() < len(outputs) {\n\t\tt.Error(\"supposedly high entropy random segments have been compressed!\")\n\t}\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiSegmentListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners)\n}", "func (c ClientFake) GetSegment(campaignID, segmentID string) (Segment, error) {\n\treturn Segment{}, nil\n}", "func Test_ConsumerClaim_HappyPath_WithTracing(t *testing.T) {\n\tmsgChanel := make(chan *sarama.ConsumerMessage, 1)\n\tmsgChanel <- &sarama.ConsumerMessage{\n\t\tTopic: \"topic-test\",\n\t}\n\tclose(msgChanel)\n\n\tconsumerGroupClaim := &mocks.ConsumerGroupClaim{}\n\tconsumerGroupClaim.On(\"Messages\").Return((<-chan *sarama.ConsumerMessage)(msgChanel))\n\n\tconsumerGroupSession := &mocks.ConsumerGroupSession{}\n\tconsumerGroupSession.On(\"MarkMessage\", mock.Anything, mock.Anything).Return()\n\n\thandlerCalled := false\n\thandler := func(ctx context.Context, msg *sarama.ConsumerMessage) error {\n\t\thandlerCalled = true\n\t\treturn nil\n\t}\n\n\ttested := listener{\n\t\thandlers: map[string]Handler{\"topic-test\": handler},\n\t\ttracer: DefaultTracing, // this is the important part\n\t}\n\n\terr := tested.ConsumeClaim(consumerGroupSession, consumerGroupClaim)\n\n\tassert.NoError(t, err)\n\tassert.True(t, handlerCalled)\n\tconsumerGroupClaim.AssertExpectations(t)\n\tconsumerGroupSession.AssertExpectations(t)\n}", "func (client WorkloadNetworksClient) GetSegmentResponder(resp *http.Response) (result WorkloadNetworkSegment, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar memLog base.InMemLogger\n\tmem := vfs.NewMem()\n\trequire.NoError(t, mem.MkdirAll(\"ext\", 0755))\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(t *testing.T, td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tmemLog.Reset()\n\t\t\tlel := MakeLoggingEventListener(&memLog)\n\t\t\tflushBegin, flushEnd := lel.FlushBegin, lel.FlushEnd\n\t\t\tlel.FlushBegin = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushBegin(info)\n\t\t\t}\n\t\t\tlel.FlushEnd = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushEnd(info)\n\t\t\t}\n\t\t\topts := &Options{\n\t\t\t\tFS: vfs.WithLogging(mem, memLog.Infof),\n\t\t\t\tFormatMajorVersion: internalFormatNewest,\n\t\t\t\tEventListener: &lel,\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tL0CompactionThreshold: 10,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t}\n\t\t\t// The table stats collector runs asynchronously and its\n\t\t\t// timing is less predictable. It increments nextJobID, which\n\t\t\t// can make these tests flaky. The TableStatsLoaded event is\n\t\t\t// tested separately in TestTableStats.\n\t\t\topts.private.disableTableStats = true\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tt := time.Now()\n\t\t\td.timeNow = func() time.Time {\n\t\t\t\tt = t.Add(time.Second)\n\t\t\t\treturn t\n\t\t\t}\n\t\t\td.testingAlwaysWaitForCleanup = true\n\t\t\treturn memLog.String()\n\n\t\tcase \"close\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"flush\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"compact\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\"), false); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"checkpoint\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Checkpoint(\"checkpoint\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"disable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\td.mu.Lock()\n\t\t\td.disableFileDeletions()\n\t\t\td.mu.Unlock()\n\t\t\treturn memLog.String()\n\n\t\tcase \"enable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tmemLog.Infof(\"%v\", r)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\td.mu.Lock()\n\t\t\t\tdefer d.mu.Unlock()\n\t\t\t\td.enableFileDeletions()\n\t\t\t}()\n\t\t\td.TestOnlyWaitForCleaning()\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest\":\n\t\t\tmemLog.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest-flushable\":\n\t\t\tmemLog.Reset()\n\n\t\t\t// Prevent flushes during this test to ensure determinism.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = true\n\t\t\td.mu.Unlock()\n\n\t\t\tb := d.NewBatch()\n\t\t\tif err := b.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Apply(b, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\twriteTable := func(name string, key byte) error {\n\t\t\t\tf, err := mem.Create(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t\t})\n\t\t\t\tif err := w.Add(base.MakeInternalKey([]byte{key}, 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttableA, tableB := \"ext/a\", \"ext/b\"\n\t\t\tif err := writeTable(tableA, 'a'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := writeTable(tableB, 'b'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{tableA, tableB}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\n\t\t\t// Re-enable flushes, to allow the subsequent flush to proceed.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = false\n\t\t\td.mu.Unlock()\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"metrics\":\n\t\t\t// The asynchronous loading of table stats can change metrics, so\n\t\t\t// wait for all the tables' stats to be loaded.\n\t\t\td.mu.Lock()\n\t\t\td.waitTableStats()\n\t\t\td.mu.Unlock()\n\n\t\t\treturn d.Metrics().String()\n\n\t\tcase \"sstables\":\n\t\t\tvar buf bytes.Buffer\n\t\t\ttableInfos, _ := d.SSTables()\n\t\t\tfor i, level := range tableInfos {\n\t\t\t\tif len(level) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(&buf, \"%d:\\n\", i)\n\t\t\t\tfor _, m := range level {\n\t\t\t\t\tfmt.Fprintf(&buf, \" %d:[%s-%s]\\n\",\n\t\t\t\t\t\tm.FileNum, m.Smallest.UserKey, m.Largest.UserKey)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (m *MockisAclSelector_SegSelector) isAclSelector_SegSelector() {\n\tm.ctrl.Call(m, \"isAclSelector_SegSelector\")\n}", "func TestHandlerDispatchInternal(t *testing.T) {\n\trequire := require.New(t)\n\n\tctx := snow.DefaultConsensusContextTest()\n\tmsgFromVMChan := make(chan common.Message)\n\tvdrs := validators.NewSet()\n\trequire.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1))\n\n\tresourceTracker, err := tracker.NewResourceTracker(\n\t\tprometheus.NewRegistry(),\n\t\tresource.NoUsage,\n\t\tmeter.ContinuousFactory{},\n\t\ttime.Second,\n\t)\n\trequire.NoError(err)\n\thandler, err := New(\n\t\tctx,\n\t\tvdrs,\n\t\tmsgFromVMChan,\n\t\ttime.Second,\n\t\ttestThreadPoolSize,\n\t\tresourceTracker,\n\t\tvalidators.UnhandledSubnetConnector,\n\t\tsubnets.New(ctx.NodeID, subnets.Config{}),\n\t\tcommontracker.NewPeers(),\n\t)\n\trequire.NoError(err)\n\n\tbootstrapper := &common.BootstrapperTest{\n\t\tBootstrapableTest: common.BootstrapableTest{\n\t\t\tT: t,\n\t\t},\n\t\tEngineTest: common.EngineTest{\n\t\t\tT: t,\n\t\t},\n\t}\n\tbootstrapper.Default(false)\n\n\tengine := &common.EngineTest{T: t}\n\tengine.Default(false)\n\tengine.ContextF = func() *snow.ConsensusContext {\n\t\treturn ctx\n\t}\n\n\twg := &sync.WaitGroup{}\n\tengine.NotifyF = func(context.Context, common.Message) error {\n\t\twg.Done()\n\t\treturn nil\n\t}\n\n\thandler.SetEngineManager(&EngineManager{\n\t\tSnowman: &Engine{\n\t\t\tBootstrapper: bootstrapper,\n\t\t\tConsensus: engine,\n\t\t},\n\t})\n\n\tctx.State.Set(snow.EngineState{\n\t\tType: p2p.EngineType_ENGINE_TYPE_SNOWMAN,\n\t\tState: snow.NormalOp, // assumed bootstrap is done\n\t})\n\n\tbootstrapper.StartF = func(context.Context, uint32) error {\n\t\treturn nil\n\t}\n\n\twg.Add(1)\n\thandler.Start(context.Background(), false)\n\tmsgFromVMChan <- 0\n\twg.Wait()\n}", "func BenchSegment(ctx context.Context, name string) (stop func())", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar buf syncedBuffer\n\tmem := vfs.NewMem()\n\terr := mem.MkdirAll(\"ext\", 0755)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tbuf.Reset()\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", &Options{\n\t\t\t\tFS: loggingFS{mem, &buf},\n\t\t\t\tEventListener: MakeLoggingEventListener(&buf),\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"flush\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"compact\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\")); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"ingest\":\n\t\t\tbuf.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(f, nil, LevelOptions{})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := mem.Remove(\"ext/0\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"metrics\":\n\t\t\treturn d.Metrics().String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (_e *MockDataCoord_Expecter) SetSegmentState(ctx interface{}, req interface{}) *MockDataCoord_SetSegmentState_Call {\n\treturn &MockDataCoord_SetSegmentState_Call{Call: _e.mock.On(\"SetSegmentState\", ctx, req)}\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteSegmentListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteSegmentListener\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteSegmentListener), region, listener)\n}", "func (m *MockProvider) OnEndpointsSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsSynced\")\n}", "func (client WorkloadNetworksClient) CreateSegmentsResponder(resp *http.Response) (result WorkloadNetworkSegment, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (_m *azureBlobClient) ListBlobsFlatSegment(_a0 context.Context, _a1 azblob.Marker, _a2 azblob.ListBlobsSegmentOptions) (*azblob.ListBlobsFlatSegmentResponse, error) {\n\tret := _m.Called(_a0, _a1, _a2)\n\n\tvar r0 *azblob.ListBlobsFlatSegmentResponse\n\tif rf, ok := ret.Get(0).(func(context.Context, azblob.Marker, azblob.ListBlobsSegmentOptions) *azblob.ListBlobsFlatSegmentResponse); ok {\n\t\tr0 = rf(_a0, _a1, _a2)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*azblob.ListBlobsFlatSegmentResponse)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, azblob.Marker, azblob.ListBlobsSegmentOptions) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockProvider) OnServiceSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceSynced\")\n}", "func TestDeleteCleanerOneSegment(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\texpected := []*segment{createSegment(t, dir, 0, 100)}\n\tactual, err := cleaner.Clean(expected)\n\trequire.NoError(t, err)\n\trequire.Equal(t, expected, actual)\n}", "func (m *MockisAclSelector_SegSelector) EXPECT() *MockisAclSelector_SegSelectorMockRecorder {\n\treturn m.recorder\n}", "func TestListener(t *testing.T) {\n\tlistener := &fakeListener{}\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, listener)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\tgo func() {\n\t\treq := <-rt.req\n\t\ttrace := httptrace.ContextClientTrace(req.Context())\n\t\ttrace.GotConn(httptrace.GotConnInfo{\n\t\t\tConn: &fakeConn{\n\t\t\t\tremoteAddr: &net.TCPAddr{\n\t\t\t\t\tIP: net.ParseIP(\"192.0.2.2\"),\n\t\t\t\t\tPort: 443,\n\t\t\t\t}}})\n\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\tw.Write([]byte{0, 0, 8, 9, 10})\n\t\tw.Close()\n\t}()\n\n\tdoh.Query(simpleQueryBytes)\n\ts := listener.summary\n\tif s.Latency < 0 {\n\t\tt.Errorf(\"Negative latency: %f\", s.Latency)\n\t}\n\tif !bytes.Equal(s.Query, simpleQueryBytes) {\n\t\tt.Errorf(\"Wrong query: %v\", s.Query)\n\t}\n\tif !bytes.Equal(s.Response, []byte{0xbe, 0xef, 8, 9, 10}) {\n\t\tt.Errorf(\"Wrong response: %v\", s.Response)\n\t}\n\tif s.Server != \"192.0.2.2\" {\n\t\tt.Errorf(\"Wrong server IP string: %s\", s.Server)\n\t}\n\tif s.Status != Complete {\n\t\tt.Errorf(\"Wrong status: %d\", s.Status)\n\t}\n}", "func (_m *MockDataCoord) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSegmentRequest) (*commonpb.Status, error) {\n\tret := _m.Called(ctx, req)\n\n\tvar r0 *commonpb.Status\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *datapb.SaveImportSegmentRequest) (*commonpb.Status, error)); ok {\n\t\treturn rf(ctx, req)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *datapb.SaveImportSegmentRequest) *commonpb.Status); ok {\n\t\tr0 = rf(ctx, req)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*commonpb.Status)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *datapb.SaveImportSegmentRequest) error); ok {\n\t\tr1 = rf(ctx, req)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func MockListener(t *testing.T, address string) {\n\tladdr, err := net.ResolveUDPAddr(\"udp\", address)\n\tif err != nil {\n\t\tt.Fatal(\"Couldn't resolve address\", err)\n\t}\n\n\t_, err = net.ListenUDP(\"udp\", laddr)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't listen to %s: %s\", address, err)\n\t}\n\n\ttime.Sleep(10 * time.Second)\n\treturn\n}", "func (m *MockStream) RemoveEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RemoveEventListener\", streamEventListener)\n}", "func TestServiceStatusHandler(t *testing.T) {\n\ttestServicesCmdHandler(statusCmd, t)\n}", "func (_e *MockDataCoord_Expecter) SaveImportSegment(ctx interface{}, req interface{}) *MockDataCoord_SaveImportSegment_Call {\n\treturn &MockDataCoord_SaveImportSegment_Call{Call: _e.mock.On(\"SaveImportSegment\", ctx, req)}\n}", "func (m *MockLoadBalance) DeleteListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) {\n\toldRBAC := envconfig.XDSRBAC\n\tenvconfig.XDSRBAC = true\n\tdefer func() {\n\t\tenvconfig.XDSRBAC = oldRBAC\n\t}()\n\t_, readyCh, xdsC, _, cleanup := newListenerWrapper(t)\n\tdefer cleanup()\n\n\t// Verify that the listener wrapper registers a listener watch for the\n\t// expected Listener resource name.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tname, err := xdsC.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a watch on a Listener resource: %v\", err)\n\t}\n\tif name != testListenerResourceName {\n\t\tt.Fatalf(\"listenerWrapper registered a lds watch on %s, want %s\", name, testListenerResourceName)\n\t}\n\tfcm, err := xdsresource.NewFilterChainManager(listenerWithRouteConfiguration)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\n\t// Push a good update which contains a Filter Chain that specifies dynamic\n\t// RDS Resources that need to be received. This should ping rds handler\n\t// about which rds names to start, which will eventually start a watch on\n\t// xds client for rds name \"route-1\".\n\txdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: fakeListenerHost,\n\t\t\tPort: strconv.Itoa(fakeListenerPort),\n\t\t\tFilterChains: fcm,\n\t\t}}, nil)\n\n\t// This should start a watch on xds client for rds name \"route-1\".\n\trouteName, err := xdsC.WaitForWatchRouteConfig(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a watch on a Route resource: %v\", err)\n\t}\n\tif routeName != \"route-1\" {\n\t\tt.Fatalf(\"listenerWrapper registered a lds watch on %s, want %s\", routeName, \"route-1\")\n\t}\n\n\t// This shouldn't invoke good update channel, as has not received rds updates yet.\n\ttimer := time.NewTimer(defaultTestShortTimeout)\n\tselect {\n\tcase <-timer.C:\n\t\ttimer.Stop()\n\tcase <-readyCh:\n\t\tt.Fatalf(\"ready channel written to without rds configuration specified\")\n\t}\n\n\t// Invoke rds callback for the started rds watch. This valid rds callback\n\t// should trigger the listener wrapper to fire GoodUpdate, as it has\n\t// received both it's LDS Configuration and also RDS Configuration,\n\t// specified in LDS Configuration.\n\txdsC.InvokeWatchRouteConfigCallback(\"route-1\", xdsresource.RouteConfigUpdate{}, nil)\n\n\t// All of the xDS updates have completed, so can expect to send a ping on\n\t// good update channel.\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"timeout waiting for the ready channel to be written to after receipt of a good rds update\")\n\tcase <-readyCh:\n\t}\n}", "func (m *MockServerStreamConnectionEventListener) NewStreamDetect(context context.Context, sender types.StreamSender, span types.Span) types.StreamReceiveListener {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewStreamDetect\", context, sender, span)\n\tret0, _ := ret[0].(types.StreamReceiveListener)\n\treturn ret0\n}", "func TestLifecycleSharedSubnet(t *testing.T) {\n\trunLifecycleTestAWS(&LifecycleTestOptions{\n\t\tt: t,\n\t\tSrcDir: \"shared_subnet\",\n\t\tShared: []string{\"subnet-12345678\"},\n\t})\n}", "func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) {\n\tfs, clientCh, cleanup := setupOverrides()\n\tdefer cleanup()\n\n\t// Create a server option to get notified about serving mode changes. We don't\n\t// do anything other than throwing a log entry here. But this is required,\n\t// since the server code emits a log entry at the default level (which is\n\t// ERROR) if no callback is registered for serving mode changes. Our\n\t// testLogger fails the test if there is any log entry at ERROR level. It does\n\t// provide an ExpectError() method, but that takes a string and it would be\n\t// painful to construct the exact error message expected here. Instead this\n\t// works just fine.\n\tmodeChangeOpt := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) {\n\t\tt.Logf(\"Serving mode for listener %q changed to %q, err: %v\", addr.String(), args.Mode, args.Err)\n\t})\n\tserver := NewGRPCServer(modeChangeOpt)\n\tdefer server.Stop()\n\n\tlis, err := testutils.LocalTCPListener()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.LocalTCPListener() failed: %v\", err)\n\t}\n\n\t// Call Serve() in a goroutine, and push on a channel when Serve returns.\n\tserveDone := testutils.NewChannel()\n\tgo func() {\n\t\tif err := server.Serve(lis); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tserveDone.Send(nil)\n\t}()\n\n\t// Wait for an xdsClient to be created.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := clientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for new xdsClient to be created: %v\", err)\n\t}\n\tclient := c.(*fakeclient.Client)\n\n\t// Wait for a listener watch to be registered on the xdsClient.\n\tname, err := client.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a ListenerWatch: %v\", err)\n\t}\n\twantName := strings.Replace(testServerListenerResourceNameTemplate, \"%s\", lis.Addr().String(), -1)\n\tif name != wantName {\n\t\tt.Fatalf(\"LDS watch registered for name %q, want %q\", name, wantName)\n\t}\n\n\t// Push a good LDS response with security config, and wait for Serve() to be\n\t// invoked on the underlying grpc.Server. Also make sure that certificate\n\t// providers are not created.\n\tfcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{\n\t\tFilterChains: []*v3listenerpb.FilterChain{\n\t\t\t{\n\t\t\t\tTransportSocket: &v3corepb.TransportSocket{\n\t\t\t\t\tName: \"envoy.transport_sockets.tls\",\n\t\t\t\t\tConfigType: &v3corepb.TransportSocket_TypedConfig{\n\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{\n\t\t\t\t\t\t\tCommonTlsContext: &v3tlspb.CommonTlsContext{\n\t\t\t\t\t\t\t\tTlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{\n\t\t\t\t\t\t\t\t\tInstanceName: \"identityPluginInstance\",\n\t\t\t\t\t\t\t\t\tCertificateName: \"identityCertName\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tFilters: []*v3listenerpb.Filter{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"filter-1\",\n\t\t\t\t\t\tConfigType: &v3listenerpb.Filter_TypedConfig{\n\t\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{\n\t\t\t\t\t\t\t\tRouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{\n\t\t\t\t\t\t\t\t\tRouteConfig: &v3routepb.RouteConfiguration{\n\t\t\t\t\t\t\t\t\t\tName: \"routeName\",\n\t\t\t\t\t\t\t\t\t\tVirtualHosts: []*v3routepb.VirtualHost{{\n\t\t\t\t\t\t\t\t\t\t\tDomains: []string{\"lds.target.good:3333\"},\n\t\t\t\t\t\t\t\t\t\t\tRoutes: []*v3routepb.Route{{\n\t\t\t\t\t\t\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{\n\t\t\t\t\t\t\t\t\t\t\t\t\tPathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\tAction: &v3routepb.Route_NonForwardingAction{},\n\t\t\t\t\t\t\t\t\t\t\t}}}}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tHttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\taddr, port := splitHostPort(lis.Addr().String())\n\tclient.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tRouteConfigName: \"routeconfig\",\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: addr,\n\t\t\tPort: port,\n\t\t\tFilterChains: fcm,\n\t\t},\n\t}, nil)\n\tif _, err := fs.serveCh.Receive(ctx); err != nil {\n\t\tt.Fatalf(\"error when waiting for Serve() to be invoked on the grpc.Server\")\n\t}\n\n\t// Make sure the security configuration is not acted upon.\n\tif err := verifyCertProviderNotCreated(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func testListener(t *testing.T, handler func(io.ReadWriter)) string {\n\tassert := assert.New(t)\n\trequire := require.New(t)\n\taddress := fmt.Sprintf(\"127.0.0.1:%d\", rand.Int31n(16384)+20000)\n\tl, err := net.Listen(`tcp4`, address)\n\trequire.Nil(err)\n\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\trequire.Nil(err)\n\t\tdefer func() {\n\t\t\tassert.Nil(c.Close())\n\t\t}()\n\n\t\tif handler != nil {\n\t\t\thandler(c)\n\t\t}\n\t}()\n\n\treturn address\n}", "func (s *GeomSuite) TestCrossSegmentsIn(c *C) {\n\tx, y := CrossSegments(-1., 1., 4., 1., 0., 0., 2., 2.)\n\tc.Check(x, Near, 1., math.SmallestNonzeroFloat64)\n\tc.Check(y, Near, 1., math.SmallestNonzeroFloat64)\n}", "func (s *SegmentChangesWrapper) AddToSegment(segmentName string, keys []string) error {\n\treturn errSegmentStorageNotImplementedMethod\n}", "func TestIntersectionVertical(t *testing.T) {\n\tsegment1, err := newLineSegment(point{x: 0, y: 0}, point{x: 0, y: 10})\n\tif err != nil {\n\t\tt.Error(\"error\")\n\t}\n\n\tsegment2, err := newLineSegment(point{x: -10, y: 5}, point{x: 10, y: 5})\n\tif err != nil {\n\t\tt.Error(\"error\")\n\t}\n\n\tintersection, points := doesIntersect(segment1, segment2)\n\n\tif !intersection {\n\t\tt.Error(\"segments should intersect\")\n\t}\n\n\tpoint := points[0]\n\n\tif point.x != 0 && point.y != 5 {\n\t\tt.Error(\"intersection point should be {0, 5}\")\n\t}\n}", "func (_e *MockDataCoord_Expecter) AssignSegmentID(ctx interface{}, req interface{}) *MockDataCoord_AssignSegmentID_Call {\n\treturn &MockDataCoord_AssignSegmentID_Call{Call: _e.mock.On(\"AssignSegmentID\", ctx, req)}\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureListener), region, listener)\n}", "func (s) TestClientWatchEDS(t *testing.T) {\n\tedsLBCh := testutils.NewChannel()\n\txdsC, cleanup := setup(edsLBCh)\n\tdefer cleanup()\n\n\tbuilder := balancer.Get(Name)\n\tedsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{})\n\tif edsB == nil {\n\t\tt.Fatalf(\"builder.Build(%s) failed and returned nil\", Name)\n\t}\n\tdefer edsB.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\t// If eds service name is not set, should watch for cluster name.\n\tif err := edsB.UpdateClientConnState(balancer.ClientConnState{\n\t\tResolverState: xdsclient.SetClient(resolver.State{}, xdsC),\n\t\tBalancerConfig: newLBConfigWithOneEDS(\"cluster-1\"),\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := verifyExpectedRequests(ctx, xdsC, \"cluster-1\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Update with an non-empty edsServiceName should trigger an EDS watch for\n\t// the same.\n\tif err := edsB.UpdateClientConnState(balancer.ClientConnState{\n\t\tResolverState: xdsclient.SetClient(resolver.State{}, xdsC),\n\t\tBalancerConfig: newLBConfigWithOneEDS(\"foobar-1\"),\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := verifyExpectedRequests(ctx, xdsC, \"\", \"foobar-1\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Also test the case where the edsServerName changes from one non-empty\n\t// name to another, and make sure a new watch is registered. The previously\n\t// registered watch will be cancelled, which will result in an EDS request\n\t// with no resource names being sent to the server.\n\tif err := edsB.UpdateClientConnState(balancer.ClientConnState{\n\t\tResolverState: xdsclient.SetClient(resolver.State{}, xdsC),\n\t\tBalancerConfig: newLBConfigWithOneEDS(\"foobar-2\"),\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := verifyExpectedRequests(ctx, xdsC, \"\", \"foobar-2\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (m *MockAuthorizeResponder) AddFragment(arg0, arg1 string) {\n\tm.ctrl.Call(m, \"AddFragment\", arg0, arg1)\n}", "func TestDeleteCleanerNoSegments(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tsegments, err := cleaner.Clean(nil)\n\trequire.NoError(t, err)\n\trequire.Nil(t, segments)\n}", "func (s) TestSubConnStateChange(t *testing.T) {\n\tedsLBCh := testutils.NewChannel()\n\txdsC, cleanup := setup(edsLBCh)\n\tdefer cleanup()\n\n\tbuilder := balancer.Get(Name)\n\tedsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{})\n\tif edsB == nil {\n\t\tt.Fatalf(\"builder.Build(%s) failed and returned nil\", Name)\n\t}\n\tdefer edsB.Close()\n\n\tif err := edsB.UpdateClientConnState(balancer.ClientConnState{\n\t\tResolverState: xdsclient.SetClient(resolver.State{}, xdsC),\n\t\tBalancerConfig: newLBConfigWithOneEDS(testEDSServcie),\n\t}); err != nil {\n\t\tt.Fatalf(\"edsB.UpdateClientConnState() failed: %v\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif _, err := xdsC.WaitForWatchEDS(ctx); err != nil {\n\t\tt.Fatalf(\"xdsClient.WatchEndpoints failed with error: %v\", err)\n\t}\n\txdsC.InvokeWatchEDSCallback(\"\", defaultEndpointsUpdate, nil)\n\tedsLB, err := waitForNewChildLB(ctx, edsLBCh)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfsc := &fakeSubConn{}\n\tstate := balancer.SubConnState{ConnectivityState: connectivity.Ready}\n\tedsB.UpdateSubConnState(fsc, state)\n\tif err := edsLB.waitForSubConnStateChange(ctx, &scStateChange{sc: fsc, state: state}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestOAuthServiceAccountClientEvent(t *testing.T) {\n\n\ttests := map[string]struct {\n\t\tannotationPrefix string\n\t\tannotation string\n\t\texpectedEventReason string\n\t\texpectedEventMsg string\n\t\tnumEvents int\n\t\texpectBadRequest bool\n\t}{\n\t\t\"test-good-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"/oauthcallback\",\n\t\t\tnumEvents: 0,\n\t\t},\n\t\t\"test-bad-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"foo:foo\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-url-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"::\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"[parse ::: missing protocol scheme, system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-annotation-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{\"kind\":\"foo\",\"apiVersion\":\"oauth.openshift.io/v1\",\"metadata\":{\"creationTimestamp\":null},\"reference\":{\"group\":\"foo\",\"kind\":\"Route\",\"name\":\"route1\"}}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[no kind \"foo\" is registered for version \"oauth.openshift.io/v1\" in scheme \"github.com/openshift/origin/pkg/serviceaccounts/oauthclient/oauthclientregistry.go:54\", system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-type-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{asdf\":\"adsf\"}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[couldn't get version/kind; json parse error: invalid character 'a' looking for beginning of object key string, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-not-found\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[routes.route.openshift.io \"route1\" not found, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-wrong-group\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"foo\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-reference-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"foo\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t}\n\n\ttestServer, err := setupTestOAuthServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up test server: %s\", err)\n\t}\n\n\tdefer testServer.oauthServer.Close()\n\tdefer testserver.CleanupMasterEtcd(t, testServer.masterConfig)\n\n\tfor tcName, testCase := range tests {\n\t\tvar redirect string = testServer.oauthServer.URL + \"/oauthcallback\"\n\t\tif testCase.numEvents != 0 {\n\t\t\tredirect = testCase.annotation\n\t\t}\n\n\t\tt.Logf(\"%s: annotationPrefix %s, annotation %s\", tcName, testCase.annotationPrefix, testCase.annotation)\n\t\tsa, err := setupTestSA(testServer.clusterAdminKubeClient, testCase.annotationPrefix, redirect)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test SA: %s\", tcName, err)\n\t\t}\n\n\t\tsecret, err := setupTestSecrets(testServer.clusterAdminKubeClient, sa)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test secrets: %s\", tcName, err)\n\t\t}\n\n\t\trunTestOAuthFlow(t, testServer, sa, secret, redirect, testCase.expectBadRequest)\n\n\t\t// Check events with a short poll to stop flakes\n\t\tvar evList *kapi.EventList\n\t\terr = wait.Poll(time.Second, 5*time.Second, func() (bool, error) {\n\t\t\tevList, err = testServer.clusterAdminKubeClient.Core().Events(projectName).List(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif len(evList.Items) < testCase.numEvents {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: err polling for events\", tcName)\n\t\t}\n\n\t\tevents := collectEventsWithReason(evList, testCase.expectedEventReason)\n\n\t\tif testCase.numEvents != len(events) {\n\t\t\tt.Fatalf(\"%s: expected %d events, found %d\", tcName, testCase.numEvents, len(events))\n\t\t}\n\n\t\tif testCase.numEvents != 0 && events[0].Message != testCase.expectedEventMsg {\n\t\t\tt.Fatalf(\"%s: expected event message %s, got %s\", tcName, testCase.expectedEventMsg, events[0].Message)\n\t\t}\n\n\t\terr = testServer.clusterAdminKubeClient.Core().Events(projectName).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error deleting events: %s\", tcName, err)\n\t\t}\n\t}\n}", "func (m *MockHealthCheck) SetListener(arg0 discovery.LegacyHealthCheckStatsListener, arg1 bool) {\n\tm.ctrl.Call(m, \"SetListener\", arg0, arg1)\n}", "func TestProcessor_StartWithErrorAfterRebalance(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tvar (\n\t\terr error\n\t\tconsumer = mock.NewMockConsumer(ctrl)\n\t\tst = mock.NewMockStorage(ctrl)\n\t\tsb = func(topic string, par int32) (storage.Storage, error) {\n\t\t\treturn st, nil\n\t\t}\n\t\tfinal = make(chan bool)\n\t\tch = make(chan kafka.Event)\n\t\tp = createProcessor(t, ctrl, consumer, 3, sb)\n\t\tvalue = []byte(\"value\")\n\t)\n\t// -- expectations --\n\t// 1. start\n\tconsumer.EXPECT().Subscribe(topOff).Return(nil)\n\tconsumer.EXPECT().Events().Return(ch).AnyTimes()\n\t// 2. rebalance\n\tst.EXPECT().Open().Times(3)\n\tst.EXPECT().GetOffset(int64(-2)).Return(int64(123), nil).Times(3)\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(0), int64(123))\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(1), int64(123))\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(2), int64(123))\n\t// 3. message\n\tgomock.InOrder(\n\t\tst.EXPECT().Set(\"key\", value).Return(nil),\n\t\tst.EXPECT().SetOffset(int64(1)),\n\t\tst.EXPECT().MarkRecovered(),\n\t)\n\t// 4. error\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(0))\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(1))\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(2))\n\tst.EXPECT().Close().Times(3)\n\tconsumer.EXPECT().Close().Do(func() { close(ch) })\n\n\t// -- test --\n\t// 1. start\n\tgo func() {\n\t\terr = p.Run(context.Background())\n\t\tensure.NotNil(t, err)\n\t\tclose(final)\n\t}()\n\n\t// 2. rebalance\n\tensure.True(t, len(p.partitions) == 0)\n\tch <- (*kafka.Assignment)(&map[int32]int64{0: -1, 1: -1, 2: -1})\n\terr = syncWith(t, ch, -1) // with processor\n\tensure.Nil(t, err)\n\tensure.True(t, len(p.partitions) == 3)\n\n\t// 3. message\n\tch <- &kafka.Message{\n\t\tTopic: tableName(group),\n\t\tPartition: 1,\n\t\tOffset: 1,\n\t\tKey: \"key\",\n\t\tValue: value,\n\t}\n\terr = syncWith(t, ch, 1) // with partition\n\tensure.Nil(t, err)\n\n\t// 4. receive error\n\tch <- new(kafka.Error)\n\n\t// 5. stop\n\terr = doTimed(t, func() { <-final })\n\tensure.Nil(t, err)\n}", "func (_e *MockQueryCoord_Expecter) GetSegmentInfo(ctx interface{}, req interface{}) *MockQueryCoord_GetSegmentInfo_Call {\n\treturn &MockQueryCoord_GetSegmentInfo_Call{Call: _e.mock.On(\"GetSegmentInfo\", ctx, req)}\n}", "func (s *SmartContract) SaveSegment(stub shim.ChaincodeStubInterface, args []string) sc.Response {\n\t// Parse segment\n\tbyteArgs := stub.GetArgs()\n\tsegment := &cs.Segment{}\n\tif err := json.Unmarshal(byteArgs[1], segment); err != nil {\n\t\treturn shim.Error(\"Could not parse segment\")\n\t}\n\n\t// Validate segment\n\tif err := segment.Validate(); err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t// Set pending evidence\n\tsegment.SetEvidence(\n\t\tmap[string]interface{}{\n\t\t\t\"state\": cs.PendingEvidence,\n\t\t\t\"transactions\": map[string]string{\"transactionID\": stub.GetTxID()},\n\t\t})\n\n\t// Check has prevLinkHash if not create map else check prevLinkHash exists\n\tprevLinkHash := segment.Link.GetPrevLinkHashString()\n\tif prevLinkHash == \"\" {\n\t\t// Create map\n\t\tif err := s.SaveMap(stub, segment); err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t} else {\n\t\t// Check previous segment exists\n\t\tresponse := s.GetSegment(stub, []string{prevLinkHash})\n\t\tif response.Status == shim.ERROR {\n\t\t\treturn shim.Error(\"Parent segment doesn't exist\")\n\t\t}\n\t}\n\n\t// Save segment\n\tsegmentDoc := SegmentDoc{\n\t\tObjectTypeSegment,\n\t\tsegment.GetLinkHashString(),\n\t\t*segment,\n\t}\n\tsegmentDocBytes, err := json.Marshal(segmentDoc)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tif err := stub.PutState(segment.GetLinkHashString(), segmentDocBytes); err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\treturn shim.Success(nil)\n}", "func TestDrainingResourceEventHandler(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tobj interface{}\n\t}{\n\t\t{\n\t\t\tname: \"CordonAndDrainNode\",\n\t\t\tobj: &core.Node{ObjectMeta: meta.ObjectMeta{Name: nodeName}},\n\t\t},\n\t\t{\n\t\t\tname: \"NotANode\",\n\t\t\tobj: &core.Pod{ObjectMeta: meta.ObjectMeta{Name: podName}},\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\th := NewDrainingResourceEventHandler(&NoopCordonDrainer{}, &record.FakeRecorder{}, WithDrainBuffer(0*time.Second))\n\t\t\th.OnUpdate(nil, tc.obj)\n\t\t})\n\t}\n}", "func (_m *MockSegmentManager) Get(segmentID int64) Segment {\n\tret := _m.Called(segmentID)\n\n\tvar r0 Segment\n\tif rf, ok := ret.Get(0).(func(int64) Segment); ok {\n\t\tr0 = rf(segmentID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Segment)\n\t\t}\n\t}\n\n\treturn r0\n}", "func TestProcessor_StartWithTableWithErrorAfterRebalance(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tvar (\n\t\terr error\n\t\tconsumer = mock.NewMockConsumer(ctrl)\n\t\tproducer = mock.NewMockProducer(ctrl)\n\t\tst = mock.NewMockStorage(ctrl)\n\t\tsb = func(topic string, par int32) (storage.Storage, error) {\n\t\t\treturn st, nil\n\t\t}\n\t\tfinal = make(chan bool)\n\t\tch = make(chan kafka.Event)\n\t\tp = createProcessorWithTable(t, ctrl, consumer, producer, 3, sb)\n\t\tvalue = []byte(\"value\")\n\t\tblockit = make(chan bool)\n\t\tunblocked = make(chan bool)\n\t)\n\tp.graph.callbacks[topic] = func(ctx Context, msg interface{}) {\n\t\tfmt.Println(\"hallodfads\", msg)\n\t\tdefer close(unblocked)\n\t\t<-blockit\n\t\tfmt.Println(\"unblocked\")\n\t}\n\n\t// -- expectations --\n\t// 1. start\n\tconsumer.EXPECT().Subscribe(topOff).Return(nil)\n\tconsumer.EXPECT().Events().Return(ch).AnyTimes()\n\t// 2. rebalance\n\tst.EXPECT().Open().Times(6)\n\tst.EXPECT().GetOffset(int64(-2)).Return(int64(123), nil).Times(6)\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(0), int64(123))\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(1), int64(123))\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(2), int64(123))\n\tconsumer.EXPECT().AddPartition(table, int32(0), int64(123))\n\tconsumer.EXPECT().AddPartition(table, int32(1), int64(123))\n\tconsumer.EXPECT().AddPartition(table, int32(2), int64(123))\n\t// 3. EOF messages\n\tst.EXPECT().MarkRecovered().Times(3)\n\t// 4. messages\n\tconsumer.EXPECT().Commit(topic, int32(1), int64(2))\n\t// 5. error\n\tconsumer.EXPECT().Close().Do(func() { close(ch) })\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(0))\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(1))\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(2))\n\tconsumer.EXPECT().RemovePartition(table, int32(0))\n\tconsumer.EXPECT().RemovePartition(table, int32(1))\n\tconsumer.EXPECT().RemovePartition(table, int32(2))\n\tst.EXPECT().Close().Times(6)\n\tproducer.EXPECT().Close()\n\n\t// -- test --\n\t// 1. start\n\tgo func() {\n\t\terr = p.Run(context.Background())\n\t\tensure.NotNil(t, err)\n\t\tclose(final)\n\t}()\n\n\t// 2. rebalance\n\tensure.True(t, len(p.partitions) == 0)\n\tensure.True(t, len(p.partitionViews) == 0)\n\tch <- (*kafka.Assignment)(&map[int32]int64{0: -1, 1: -1, 2: -1})\n\terr = syncWith(t, ch, -1) // with processor\n\tensure.Nil(t, err)\n\tensure.True(t, len(p.partitions) == 3)\n\tensure.True(t, len(p.partitionViews) == 3)\n\n\t// 3. message\n\tch <- &kafka.EOF{\n\t\tTopic: tableName(group),\n\t\tHwm: 0,\n\t\tPartition: 0,\n\t}\n\terr = syncWith(t, ch, 0) // with partition\n\tensure.Nil(t, err)\n\tch <- &kafka.EOF{\n\t\tTopic: tableName(group),\n\t\tHwm: 0,\n\t\tPartition: 1,\n\t}\n\terr = syncWith(t, ch, 1) // with partition\n\tensure.Nil(t, err)\n\tch <- &kafka.EOF{\n\t\tTopic: tableName(group),\n\t\tHwm: 0,\n\t\tPartition: 2,\n\t}\n\terr = syncWith(t, ch, 2) // with partition\n\tensure.Nil(t, err)\n\n\t// 4. heavy message\n\tch <- &kafka.Message{\n\t\tTopic: topic,\n\t\tPartition: 1,\n\t\tOffset: 2,\n\t\tKey: \"key\",\n\t\tValue: value,\n\t}\n\t// dont wait for that\n\n\t// 4. receive error\n\tch <- new(kafka.Error)\n\n\t// sync with partition (should be unblocked)\n\tclose(blockit)\n\t<-unblocked\n\n\t// 5. stop\n\terr = doTimed(t, func() {\n\t\t<-final\n\t})\n\tensure.Nil(t, err)\n}", "func Test_Listen_Happy_Path(t *testing.T) {\n\tcalledCounter := 0\n\tconsumeCalled := make(chan interface{})\n\tconsumerGroup := &mocks.ConsumerGroup{}\n\n\t// Mimic the end of a consumerGroup session by just not blocking\n\tconsumerGroup.On(\"Consume\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tcalledCounter++\n\t\t\tconsumeCalled <- true\n\t\t\tif calledCounter >= 2 {\n\t\t\t\ttime.Sleep(1000 * time.Second) // just wait\n\t\t\t}\n\t\t}).\n\t\tReturn(nil).Twice()\n\n\ttested := listener{consumerGroup: consumerGroup}\n\n\t// Listen() is blocking as long as there is no error or context is not canceled\n\tgo func() {\n\t\ttested.Listen(context.Background())\n\t\tassert.Fail(t, `We should have blocked on \"listen\", even if a consumer group session has ended`)\n\t}()\n\n\t// Assert that consume is called twice (2 consumer group sessions are expected)\n\t<-consumeCalled\n\t<-consumeCalled\n\n\tconsumerGroup.AssertExpectations(t)\n}", "func (m *MockDynamicCertPrivate) AddListener(arg0 dynamiccertificates.Listener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddListener\", arg0)\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenTrackFound() {\n\n}", "func (s *BasePlSqlParserListener) EnterSegment_management_clause(ctx *Segment_management_clauseContext) {\n}", "func (_e *MockDataCoord_Expecter) GetSegmentInfo(ctx interface{}, req interface{}) *MockDataCoord_GetSegmentInfo_Call {\n\treturn &MockDataCoord_GetSegmentInfo_Call{Call: _e.mock.On(\"GetSegmentInfo\", ctx, req)}\n}", "func TestRangesFull(t *testing.T) {\n\tinitDone := make(chan struct{})\n\t// A single /32 can't be used to allocate since we always reserve 2 IPs,\n\t// the network and broadcast address, which in the case of a /32 means it is always full.\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.123/32\", \"FF::123/128\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-a\" {\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t}\n\n\t\tif svc.Name != \"service-b\" {\n\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected two service updates\")\n\t}\n}", "func (_m *MockEncoder) Stream() xio.SegmentReader {\n\tret := _m.ctrl.Call(_m, \"Stream\")\n\tret0, _ := ret[0].(xio.SegmentReader)\n\treturn ret0\n}", "func TestEvents(t *testing.T) {\n\tti := tInfo{}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\t// uuid to make each source unique\n\tcomponentID := uuid.NewV4().String()\n\n\t// create recorder events directory\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\n\t// create recorder\n\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\tComponent: componentID,\n\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\tBackupDir: recorderEventsDir}, ti.logger)\n\tAssertOk(t, err, \"failed to create events recorder\")\n\tdefer evtsRecorder.Close()\n\n\t// send events (recorder -> proxy -> dispatcher -> writer -> evtsmgr -> elastic)\n\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test event - 1\", nil)\n\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test event - 2\", nil)\n\n\ttime.Sleep(1 * time.Second)\n\n\t// verify that it has reached elasticsearch; these are the first occurrences of an event\n\t// so it should have reached elasticsearch without being de-duped.\n\tquery := es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewTermQuery(\"type.keyword\", eventtypes.SERVICE_STARTED.String()))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, 1, \"4s\") // total == 1\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test event -2\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, 1, \"4s\") // total == 1\n\n\t// send duplicates and check whether they're compressed\n\tnumDuplicates := 25\n\tfor i := 0; i < numDuplicates; i++ {\n\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test dup event - 1\", nil)\n\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test dup event - 2\", nil)\n\t}\n\n\t// ensure the de-duped events reached elasticsearch\n\t// test duplicate event - 1\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test dup event - 1\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, numDuplicates, \"2s\") // total == numDuplicates\n\n\t// test duplicate event - 2\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test dup event - 2\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, numDuplicates, \"2s\") // total == numDuplicates\n\n\t// create test NIC object\n\ttestNIC := policygen.CreateSmartNIC(\"00-14-22-01-23-45\",\n\t\tcluster.DistributedServiceCardStatus_ADMITTED.String(),\n\t\t\"esx-1\",\n\t\t&cluster.DSCCondition{\n\t\t\tType: cluster.DSCCondition_HEALTHY.String(),\n\t\t\tStatus: cluster.ConditionStatus_FALSE.String(),\n\t\t})\n\n\t// record events with reference object\n\tfor i := 0; i < numDuplicates; i++ {\n\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test dup event - 1\", testNIC)\n\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test dup event - 2\", testNIC)\n\t}\n\n\t// query by kind\n\tqueryByKind := es.NewTermQuery(\"object-ref.kind.keyword\", testNIC.GetKind())\n\tti.assertElasticUniqueEvents(t, queryByKind, true, 2, \"4s\") // unique == 2 (eventType1 and eventType2)\n\tti.assertElasticTotalEvents(t, queryByKind, true, numDuplicates*2, \"4s\") // total == numDuplicates\n}", "func (_m *MockSegmentManager) GetSealed(segmentID int64) Segment {\n\tret := _m.Called(segmentID)\n\n\tvar r0 Segment\n\tif rf, ok := ret.Get(0).(func(int64) Segment); ok {\n\t\tr0 = rf(segmentID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Segment)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (m *MockListener) Create(listener api.Listener) (api.Listener, error) {\n\tret := m.ctrl.Call(m, \"Create\", listener)\n\tret0, _ := ret[0].(api.Listener)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestProcessor_StartWithErrorBeforeRebalance(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\ttype TestCase struct {\n\t\tname string\n\t\tevent kafka.Event\n\t}\n\ttests := []TestCase{\n\t\t{\"error\", &kafka.Error{Err: errors.New(\"something\")}},\n\t\t{\"message\", new(kafka.Message)},\n\t\t{\"EOF\", new(kafka.EOF)},\n\t\t{\"BOF\", new(kafka.BOF)},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tconsumer = mock.NewMockConsumer(ctrl)\n\t\t\t\tst = mock.NewMockStorage(ctrl)\n\t\t\t\tsb = func(topic string, par int32) (storage.Storage, error) {\n\t\t\t\t\treturn st, nil\n\t\t\t\t}\n\t\t\t\tfinal = make(chan bool)\n\t\t\t\tch = make(chan kafka.Event)\n\t\t\t\tp = createProcessor(t, ctrl, consumer, 3, sb)\n\t\t\t)\n\n\t\t\tgomock.InOrder(\n\t\t\t\tconsumer.EXPECT().Subscribe(topOff).Return(nil),\n\t\t\t\tconsumer.EXPECT().Events().Return(ch),\n\t\t\t\tconsumer.EXPECT().Close().Do(func() { close(ch) }),\n\t\t\t)\n\t\t\tgo func() {\n\t\t\t\terr = p.Run(context.Background())\n\t\t\t\tensure.NotNil(t, err)\n\t\t\t\tclose(final)\n\t\t\t}()\n\n\t\t\tch <- tc.event\n\n\t\t\terr = doTimed(t, func() {\n\t\t\t\t<-final\n\t\t\t})\n\t\t\tensure.Nil(t, err)\n\t\t})\n\t}\n}", "func (_e *MockDataCoord_Expecter) UpdateSegmentStatistics(ctx interface{}, req interface{}) *MockDataCoord_UpdateSegmentStatistics_Call {\n\treturn &MockDataCoord_UpdateSegmentStatistics_Call{Call: _e.mock.On(\"UpdateSegmentStatistics\", ctx, req)}\n}", "func (client WorkloadNetworksClient) UpdateSegmentsResponder(resp *http.Response) (result WorkloadNetworkSegment, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func newMockListener(endpoint net.Conn) *mockListener {\n \n c := make(chan net.Conn, 1)\n c <- endpoint\n listener := &mockListener{\n connChannel: c,\n serverEndpoint: endpoint,\n }\n return listener\n}", "func (_e *MockSegmentManager_Expecter) Get(segmentID interface{}) *MockSegmentManager_Get_Call {\n\treturn &MockSegmentManager_Get_Call{Call: _e.mock.On(\"Get\", segmentID)}\n}", "func (m *MockisIpsecSAAction_SaHandle) isIpsecSAAction_SaHandle() {\n\tm.ctrl.Call(m, \"isIpsecSAAction_SaHandle\")\n}", "func TestNoIntersectionVertical(t *testing.T) {\n\tsegment1, err := newLineSegment(point{x: 0, y: 0}, point{x: 0, y: 10})\n\tif err != nil {\n\t\tt.Error(\"error\")\n\t}\n\n\tsegment2, err := newLineSegment(point{x: 5, y: 5}, point{x: 10, y: 5})\n\tif err != nil {\n\t\tt.Error(\"error\")\n\t}\n\n\tintersection, points := doesIntersect(segment1, segment2)\n\n\tif intersection {\n\t\tt.Error(\"segments should not intersect\")\n\t}\n\n\tif len(points) != 0 {\n\t\tt.Error(\"intersection point is non-zero\")\n\t}\n}", "func SetupTestDBConnSegment(dbname string, port int, host string, gpVersion dbconn.GPDBVersion) *dbconn.DBConn {\n\n\tif dbname == \"\" {\n\t\tgplog.Fatal(errors.New(\"No database provided\"), \"\")\n\t}\n\tif port == 0 {\n\t\tgplog.Fatal(errors.New(\"No segment port provided\"), \"\")\n\t}\n\t// Don't fail if no host is passed, as that implies connecting on the local host\n\tusername := operating.System.Getenv(\"PGUSER\")\n\tif username == \"\" {\n\t\tcurrentUser, _ := operating.System.CurrentUser()\n\t\tusername = currentUser.Username\n\t}\n\tif host == \"\" {\n\t\thost := operating.System.Getenv(\"PGHOST\")\n\t\tif host == \"\" {\n\t\t\thost, _ = operating.System.Hostname()\n\t\t}\n\t}\n\n\tconn := &dbconn.DBConn{\n\t\tConnPool: nil,\n\t\tNumConns: 0,\n\t\tDriver: &dbconn.GPDBDriver{},\n\t\tUser: username,\n\t\tDBName: dbname,\n\t\tHost: host,\n\t\tPort: port,\n\t\tTx: nil,\n\t\tVersion: dbconn.GPDBVersion{},\n\t}\n\n\tvar gpRoleGuc string\n\tif gpVersion.Before(\"7\") {\n\t\tgpRoleGuc = \"gp_session_role\"\n\t} else {\n\t\tgpRoleGuc = \"gp_role\"\n\t}\n\n\tconnStr := fmt.Sprintf(\"postgres://%s@%s:%d/%s?sslmode=disable&statement_cache_capacity=0&%s=utility\", conn.User, conn.Host, conn.Port, conn.DBName, gpRoleGuc)\n\n\tsegConn, err := conn.Driver.Connect(\"pgx\", connStr)\n\tif err != nil {\n\t\tgplog.FatalOnError(err)\n\t}\n\tconn.ConnPool = make([]*sqlx.DB, 1)\n\tconn.ConnPool[0] = segConn\n\n\tconn.Tx = make([]*sqlx.Tx, 1)\n\tconn.NumConns = 1\n\tversion, err := dbconn.InitializeVersion(conn)\n\tif err != nil {\n\t\tgplog.FatalOnError(err)\n\t}\n\tconn.Version = version\n\treturn conn\n}", "func TestLifecyclePrivateSharedSubnet(t *testing.T) {\n\trunLifecycleTestAWS(&LifecycleTestOptions{\n\t\tt: t,\n\t\tSrcDir: \"private-shared-subnet\",\n\t\tShared: []string{\"subnet-12345678\", \"subnet-abcdef\"},\n\t})\n}", "func TestControllerHandleEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\taddServices []*corev1.Service\n\t\tupdateServices []string\n\t\tdelServices []string\n\t\texpAddedServices []string\n\t\texpDeletedServices []string\n\t}{\n\t\t{\n\t\t\tname: \"If a controller is watching services it should react to the service change events.\",\n\t\t\taddServices: []*corev1.Service{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc1\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc2\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tupdateServices: []string{\"svc1\"},\n\t\t\tdelServices: []string{\"svc1\", \"svc2\"},\n\t\t\texpAddedServices: []string{\"svc1\", \"svc2\", \"svc1\"},\n\t\t\texpDeletedServices: []string{\"svc1\", \"svc2\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\tassert := assert.New(t)\n\t\t\tresync := 30 * time.Second\n\t\t\tstopC := make(chan struct{})\n\t\t\tvar gotAddedServices []string\n\t\t\tvar gotDeletedServices []string\n\n\t\t\t// Create the kubernetes client.\n\t\t\tk8scli, _, _, err := cli.GetK8sClients(\"\")\n\n\t\t\trequire.NoError(err, \"kubernetes client is required\")\n\n\t\t\t// Prepare the environment on the cluster.\n\t\t\tprep := prepare.New(k8scli, t)\n\t\t\tprep.SetUp()\n\t\t\tdefer prep.TearDown()\n\n\t\t\t// Create the reitrever.\n\t\t\trt := &retrieve.Resource{\n\t\t\t\tListerWatcher: cache.NewListWatchFromClient(k8scli.CoreV1().RESTClient(), \"services\", prep.Namespace().Name, fields.Everything()),\n\t\t\t\tObject: &corev1.Service{},\n\t\t\t}\n\n\t\t\t// Call times are the number of times the handler should be called before sending the termination signal.\n\t\t\tstopCallTimes := len(test.addServices) + len(test.updateServices) + len(test.delServices)\n\t\t\tcalledTimes := 0\n\t\t\tvar mx sync.Mutex\n\n\t\t\t// Create the handler.\n\t\t\thl := &handler.HandlerFunc{\n\t\t\t\tAddFunc: func(_ context.Context, obj runtime.Object) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\tsvc := obj.(*corev1.Service)\n\t\t\t\t\tgotAddedServices = append(gotAddedServices, svc.Name)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(_ context.Context, id string) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\t// Ignore namespace.\n\t\t\t\t\tid = strings.Split(id, \"/\")[1]\n\t\t\t\t\tgotDeletedServices = append(gotDeletedServices, id)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Create a Pod controller.\n\t\t\tctrl := controller.NewSequential(resync, hl, rt, nil, log.Dummy)\n\t\t\trequire.NotNil(ctrl, \"controller is required\")\n\t\t\tgo ctrl.Run(stopC)\n\n\t\t\t// Create the required services.\n\t\t\tfor _, svc := range test.addServices {\n\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Create(svc)\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\tfor _, svc := range test.updateServices {\n\t\t\t\torigSvc, err := k8scli.CoreV1().Services(prep.Namespace().Name).Get(svc, metav1.GetOptions{})\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\t// Change something\n\t\t\t\t\torigSvc.Spec.Ports = append(origSvc.Spec.Ports, corev1.ServicePort{Name: \"updateport\", Port: 9876})\n\t\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Update(origSvc)\n\t\t\t\t\tassert.NoError(err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete the required services.\n\t\t\tfor _, svc := range test.delServices {\n\t\t\t\terr := k8scli.CoreV1().Services(prep.Namespace().Name).Delete(svc, &metav1.DeleteOptions{})\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\t// Wait until we have finished.\n\t\t\tselect {\n\t\t\t// Timeout.\n\t\t\tcase <-time.After(20 * time.Second):\n\t\t\t// Finished.\n\t\t\tcase <-stopC:\n\t\t\t}\n\n\t\t\t// Check.\n\t\t\tassert.Equal(test.expAddedServices, gotAddedServices)\n\t\t\tassert.Equal(test.expDeletedServices, gotDeletedServices)\n\t\t})\n\t}\n}", "func (_m *MockDataCoord) UpdateSegmentStatistics(ctx context.Context, req *datapb.UpdateSegmentStatisticsRequest) (*commonpb.Status, error) {\n\tret := _m.Called(ctx, req)\n\n\tvar r0 *commonpb.Status\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *datapb.UpdateSegmentStatisticsRequest) (*commonpb.Status, error)); ok {\n\t\treturn rf(ctx, req)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *datapb.UpdateSegmentStatisticsRequest) *commonpb.Status); ok {\n\t\tr0 = rf(ctx, req)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*commonpb.Status)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *datapb.UpdateSegmentStatisticsRequest) error); ok {\n\t\tr1 = rf(ctx, req)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func Test_App_Listener(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\n\tgo func() {\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tutils.AssertEqual(t, nil, app.Shutdown())\n\t}()\n\n\tln := fasthttputil.NewInmemoryListener()\n\tutils.AssertEqual(t, nil, app.Listener(ln))\n}", "func TestReallocOnInit(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"192.168.1.12\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP == \"192.168.1.12\" {\n\t\t\tt.Error(\"Expected ingress IP to not be the initial, bad IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n}", "func TestService_Handle_Inviter(t *testing.T) {\n\tmockStore := &mockstorage.MockStore{Store: make(map[string]mockstorage.DBEntry)}\n\tstoreProv := mockstorage.NewCustomMockStoreProvider(mockStore)\n\tk := newKMS(t, storeProv)\n\tprov := &protocol.MockProvider{\n\t\tStoreProvider: storeProv,\n\t\tServiceMap: map[string]interface{}{\n\t\t\tmediator.Coordination: &mockroute.MockMediatorSvc{},\n\t\t},\n\t\tCustomKMS: k,\n\t\tKeyTypeValue: kms.ED25519Type,\n\t\tKeyAgreementTypeValue: kms.X25519ECDHKWType,\n\t}\n\n\tctx := &context{\n\t\toutboundDispatcher: prov.OutboundDispatcher(),\n\t\tcrypto: &tinkcrypto.Crypto{},\n\t\tkms: k,\n\t\tkeyType: kms.ED25519Type,\n\t\tkeyAgreementType: kms.X25519ECDHKWType,\n\t}\n\n\tverPubKey, encPubKey := newSigningAndEncryptionDIDKeys(t, ctx)\n\n\tctx.vdRegistry = &mockvdr.MockVDRegistry{CreateValue: createDIDDocWithKey(verPubKey, encPubKey)}\n\n\tconnRec, err := connection.NewRecorder(prov)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, connRec)\n\n\tctx.connectionRecorder = connRec\n\n\tdoc, err := ctx.vdRegistry.Create(testMethod, nil)\n\trequire.NoError(t, err)\n\n\ts, err := New(prov)\n\trequire.NoError(t, err)\n\n\tactionCh := make(chan service.DIDCommAction, 10)\n\terr = s.RegisterActionEvent(actionCh)\n\trequire.NoError(t, err)\n\n\tstatusCh := make(chan service.StateMsg, 10)\n\terr = s.RegisterMsgEvent(statusCh)\n\trequire.NoError(t, err)\n\n\tcompletedFlag := make(chan struct{})\n\trespondedFlag := make(chan struct{})\n\n\tgo msgEventListener(t, statusCh, respondedFlag, completedFlag)\n\n\tgo func() { service.AutoExecuteActionEvent(actionCh) }()\n\n\tinvitation := &Invitation{\n\t\tType: InvitationMsgType,\n\t\tID: randomString(),\n\t\tLabel: \"Bob\",\n\t\tRecipientKeys: []string{verPubKey},\n\t\tServiceEndpoint: \"http://alice.agent.example.com:8081\",\n\t}\n\n\terr = ctx.connectionRecorder.SaveInvitation(invitation.ID, invitation)\n\trequire.NoError(t, err)\n\n\tthid := randomString()\n\n\t// Invitation was previously sent by Alice to Bob.\n\t// Bob now sends a did-exchange Invitation\n\tpayloadBytes, err := json.Marshal(\n\t\t&Request{\n\t\t\tType: RequestMsgType,\n\t\t\tID: thid,\n\t\t\tLabel: \"Bob\",\n\t\t\tThread: &decorator.Thread{\n\t\t\t\tPID: invitation.ID,\n\t\t\t},\n\t\t\tDID: doc.DIDDocument.ID,\n\t\t\tDocAttach: unsignedDocAttach(t, doc.DIDDocument),\n\t\t})\n\trequire.NoError(t, err)\n\tmsg, err := service.ParseDIDCommMsgMap(payloadBytes)\n\trequire.NoError(t, err)\n\t_, err = s.HandleInbound(msg, service.NewDIDCommContext(doc.DIDDocument.ID, \"\", nil))\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase <-respondedFlag:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event responded\")\n\t}\n\t// Alice automatically sends exchange Response to Bob\n\t// Bob replies with an ACK\n\tpayloadBytes, err = json.Marshal(\n\t\t&model.Ack{\n\t\t\tType: AckMsgType,\n\t\t\tID: randomString(),\n\t\t\tStatus: \"OK\",\n\t\t\tThread: &decorator.Thread{ID: thid},\n\t\t})\n\trequire.NoError(t, err)\n\n\tdidMsg, err := service.ParseDIDCommMsgMap(payloadBytes)\n\trequire.NoError(t, err)\n\n\t_, err = s.HandleInbound(didMsg, service.NewDIDCommContext(doc.DIDDocument.ID, \"\", nil))\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase <-completedFlag:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event complete\")\n\t}\n\n\tvalidateState(t, s, thid, findNamespace(AckMsgType), (&completed{}).Name())\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func (m *MockStreamEventListener) OnResetStream(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnResetStream\", reason)\n}", "func (_e *MockSegmentManager_Expecter) Put(segmentType interface{}, segments ...interface{}) *MockSegmentManager_Put_Call {\n\treturn &MockSegmentManager_Put_Call{Call: _e.mock.On(\"Put\",\n\t\tappend([]interface{}{segmentType}, segments...)...)}\n}", "func (s) TestResolverWatchCallbackAfterClose(t *testing.T) {\n\t// Setup the management server that synchronizes with the test goroutine\n\t// using two channels. The management server signals the test goroutine when\n\t// it receives a discovery request for a route configuration resource. And\n\t// the test goroutine signals the management server when the resolver is\n\t// closed.\n\twaitForRouteConfigDiscoveryReqCh := make(chan struct{}, 1)\n\twaitForResolverCloseCh := make(chan struct{})\n\tmgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{\n\t\tOnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error {\n\t\t\tif req.GetTypeUrl() == version.V3RouteConfigURL {\n\t\t\t\tselect {\n\t\t\t\tcase waitForRouteConfigDiscoveryReqCh <- struct{}{}:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\t<-waitForResolverCloseCh\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to start xDS management server: %v\", err)\n\t}\n\tdefer mgmtServer.Stop()\n\n\t// Create a bootstrap configuration specifying the above management server.\n\tnodeID := uuid.New().String()\n\tcleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{\n\t\tNodeID: nodeID,\n\t\tServerURI: mgmtServer.Address,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cleanup()\n\n\t// Configure listener and route configuration resources on the management\n\t// server.\n\tconst serviceName = \"my-service-client-side-xds\"\n\trdsName := \"route-\" + serviceName\n\tcdsName := \"cluster-\" + serviceName\n\tresources := e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)},\n\t\tRoutes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, serviceName, cdsName)},\n\t\tSkipValidation: true,\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL(\"xds:///\" + serviceName)})\n\tdefer rClose()\n\n\t// Wait for a discovery request for a route configuration resource.\n\tselect {\n\tcase <-waitForRouteConfigDiscoveryReqCh:\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timeout when waiting for a discovery request for a route configuration resource\")\n\t}\n\n\t// Close the resolver and unblock the management server.\n\trClose()\n\tclose(waitForResolverCloseCh)\n\n\t// Verify that the update from the management server is not propagated to\n\t// the ClientConn. The xDS resolver, once closed, is expected to drop\n\t// updates from the xDS client.\n\tsCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)\n\tdefer sCancel()\n\tif _, err := tcc.stateCh.Receive(sCtx); err != context.DeadlineExceeded {\n\t\tt.Fatalf(\"ClientConn received an update from the resolver that was closed: %v\", err)\n\t}\n}", "func (m *MockSession) AnnounceWorkerStopped() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AnnounceWorkerStopped\")\n}", "func TestServiceDelete(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tvar svcIP string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tsvcIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been allocated\")\n\t}\n\n\terr := fixture.svcClient.Services(\"default\").Delete(context.Background(), \"service-a\", meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been released\")\n\t}\n}", "func (client WorkloadNetworksClient) DeleteSegmentResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func (_m *MockDataCoord) SetSegmentState(ctx context.Context, req *datapb.SetSegmentStateRequest) (*datapb.SetSegmentStateResponse, error) {\n\tret := _m.Called(ctx, req)\n\n\tvar r0 *datapb.SetSegmentStateResponse\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *datapb.SetSegmentStateRequest) (*datapb.SetSegmentStateResponse, error)); ok {\n\t\treturn rf(ctx, req)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *datapb.SetSegmentStateRequest) *datapb.SetSegmentStateResponse); ok {\n\t\tr0 = rf(ctx, req)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*datapb.SetSegmentStateResponse)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *datapb.SetSegmentStateRequest) error); ok {\n\t\tr1 = rf(ctx, req)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockisAclSelector_SegSelector) Size() int {\n\tret := m.ctrl.Call(m, \"Size\")\n\tret0, _ := ret[0].(int)\n\treturn ret0\n}", "func TestStartListener(t *testing.T) {\n\ttimeout := time.Second * 10\n\tr := fixtureReceiver()\n\tmockedLogger, _ := logger.New(\"json\", \"info\")\n\tctx := context.Background()\n\n\t// used to simulate sending a stop signal\n\tctx, cancelFunc := context.WithCancel(ctx)\n\n\t// start receiver\n\twg := sync.WaitGroup{}\n\tstart := make(chan bool, 1)\n\tdefer close(start)\n\twg.Add(1)\n\tgo func(t *testing.T) {\n\t\tdefer wg.Done()\n\t\tstart <- true\n\t\tt.Log(\"starting receiver in goroutine\")\n\t\tif err := r.StartListen(ctx, &testHandler{}, mockedLogger); err != nil {\n\t\t\tt.Errorf(\"error while starting HTTPMessageReceiver: %v\", err)\n\t\t}\n\t\tt.Log(\"receiver goroutine ends here\")\n\t}(t)\n\n\t// wait for goroutine to start\n\t<-start\n\n\t// stop it\n\tcancelFunc()\n\tc := make(chan struct{})\n\tgo func() {\n\t\tdefer close(c)\n\t\twg.Wait()\n\t}()\n\n\tt.Log(\"Waiting for receiver to stop\")\n\tselect {\n\t// receiver shutdown properly\n\tcase <-c:\n\t\tt.Log(\"Waiting for receiver to stop [done]\")\n\t\tbreak\n\t// receiver shutdown in time\n\tcase <-time.Tick(timeout):\n\t\tt.Fatalf(\"Expected receiver to shutdown after timeout: %v\\n\", timeout)\n\t}\n}", "func (s) TestOutlierDetection(t *testing.T) {\n\tedsLBCh := testutils.NewChannel()\n\txdsC, cleanup := setup(edsLBCh)\n\tdefer cleanup()\n\tbuilder := balancer.Get(Name)\n\tedsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{})\n\tif edsB == nil {\n\t\tt.Fatalf(\"builder.Build(%s) failed and returned nil\", Name)\n\t}\n\tdefer edsB.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\t// Update Cluster Resolver with Client Conn State with Outlier Detection\n\t// configuration present. This is what will be passed down to this balancer,\n\t// as CDS Balancer gets the Cluster Update and converts the Outlier\n\t// Detection data to an Outlier Detection configuration and sends it to this\n\t// level.\n\tif err := edsB.UpdateClientConnState(balancer.ClientConnState{\n\t\tResolverState: xdsclient.SetClient(resolver.State{}, xdsC),\n\t\tBalancerConfig: newLBConfigWithOneEDSAndOutlierDetection(testEDSServcie, noopODCfg),\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := xdsC.WaitForWatchEDS(ctx); err != nil {\n\t\tt.Fatalf(\"xdsClient.WatchEndpoints failed with error: %v\", err)\n\t}\n\n\t// Invoke EDS Callback - causes child balancer to be built and then\n\t// UpdateClientConnState called on it with Outlier Detection as a direct\n\t// child.\n\txdsC.InvokeWatchEDSCallback(\"\", defaultEndpointsUpdate, nil)\n\tedsLB, err := waitForNewChildLB(ctx, edsLBCh)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlocalityID := xdsinternal.LocalityID{Zone: \"zone\"}\n\t// The priority configuration generated should have Outlier Detection as a\n\t// direct child due to Outlier Detection being turned on.\n\tpCfgWant := &priority.LBConfig{\n\t\tChildren: map[string]*priority.Child{\n\t\t\t\"priority-0-0\": {\n\t\t\t\tConfig: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\tName: outlierdetection.Name,\n\t\t\t\t\tConfig: &outlierdetection.LBConfig{\n\t\t\t\t\t\tInterval: 1<<63 - 1,\n\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\t\t\tName: clusterimpl.Name,\n\t\t\t\t\t\t\tConfig: &clusterimpl.LBConfig{\n\t\t\t\t\t\t\t\tCluster: testClusterName,\n\t\t\t\t\t\t\t\tEDSServiceName: \"test-eds-service-name\",\n\t\t\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{\n\t\t\t\t\t\t\t\t\tName: weightedtarget.Name,\n\t\t\t\t\t\t\t\t\tConfig: &weightedtarget.LBConfig{\n\t\t\t\t\t\t\t\t\t\tTargets: map[string]weightedtarget.Target{\n\t\t\t\t\t\t\t\t\t\t\tassertString(localityID.ToString): {\n\t\t\t\t\t\t\t\t\t\t\t\tWeight: 100,\n\t\t\t\t\t\t\t\t\t\t\t\tChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tIgnoreReresolutionRequests: true,\n\t\t\t},\n\t\t},\n\t\tPriorities: []string{\"priority-0-0\"},\n\t}\n\n\tif err := edsLB.waitForClientConnStateChangeVerifyBalancerConfig(ctx, balancer.ClientConnState{\n\t\tBalancerConfig: pCfgWant,\n\t}); err != nil {\n\t\tt.Fatalf(\"EDS impl got unexpected update: %v\", err)\n\t}\n}" ]
[ "0.6233907", "0.6015205", "0.5945788", "0.58792853", "0.58749723", "0.5430532", "0.54042137", "0.53451204", "0.5248736", "0.51176804", "0.51127785", "0.50938296", "0.49612555", "0.49122086", "0.4878933", "0.48782825", "0.48485902", "0.4828453", "0.48244298", "0.48207852", "0.48042047", "0.47871488", "0.4764254", "0.4754629", "0.47498858", "0.47425395", "0.47309777", "0.47215027", "0.47184995", "0.47163296", "0.46996185", "0.46743777", "0.46573365", "0.4656809", "0.4653681", "0.4642862", "0.46283907", "0.46249133", "0.46165305", "0.46101588", "0.46046713", "0.45876163", "0.4585864", "0.45816568", "0.45613834", "0.45611298", "0.4554824", "0.45516545", "0.4549596", "0.45354864", "0.45346794", "0.45235914", "0.4510714", "0.4503737", "0.45007822", "0.44981557", "0.44699237", "0.44610095", "0.44561356", "0.44542083", "0.44484377", "0.4435075", "0.44234923", "0.44185632", "0.44153383", "0.44119743", "0.44100058", "0.44053167", "0.43977553", "0.43922013", "0.43873316", "0.43800926", "0.43793914", "0.43764776", "0.43759686", "0.43758455", "0.43754637", "0.43731228", "0.4370385", "0.436253", "0.43620005", "0.43531975", "0.43394762", "0.43391868", "0.433835", "0.43323478", "0.43264145", "0.43263486", "0.43168318", "0.43134224", "0.43088147", "0.43063843", "0.4279782", "0.42797548", "0.4279502", "0.42789632", "0.42764473", "0.42734686", "0.4267727", "0.4261657" ]
0.74456346
0
EnsureSegmentListener indicates an expected call of EnsureSegmentListener
func (mr *MockLoadBalanceMockRecorder) EnsureSegmentListener(region, listener interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureSegmentListener", reflect.TypeOf((*MockLoadBalance)(nil).EnsureSegmentListener), region, listener) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) EnsureSegmentListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureSegmentListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiSegmentListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners)\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureListener), region, listener)\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteSegmentListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteSegmentListener\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteSegmentListener), region, listener)\n}", "func (d *segmentationDescriptor) SetSegmentsExpected(value uint8) {\n\td.segsExpected = value\n}", "func TestSegmentDummy(t *testing.T) {\n\ttype tcase struct {\n\t\tline geom.Line\n\t}\n\n\tfn := func(t *testing.T, tc tcase) {\n\t\ts := NewSegment(tc.line)\n\t\tif s.GetStart().Equals(tc.line[0]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[0], s.GetStart())\n\t\t}\n\t\tif s.GetEnd().Equals(tc.line[1]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[1], s.GetEnd())\n\t\t}\n\t\tif s.GetLineSegment() != tc.line {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line, s.GetLineSegment())\n\t\t}\n\t}\n\ttestcases := []tcase{\n\t\t{\n\t\t\tline: geom.Line{{1, 2}, {3, 4}},\n\t\t},\n\t}\n\n\tfor i, tc := range testcases {\n\t\ttc := tc\n\t\tt.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) { fn(t, tc) })\n\t}\n}", "func (m *MockLoadBalance) DeleteSegmentListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSegmentListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *BasePlSqlParserListener) EnterDeferred_segment_creation(ctx *Deferred_segment_creationContext) {\n}", "func (s *scheduler) onFlushSegDone(e sched.Event) {\n\tevent := e.(*flushSegEvent)\n\tif err := e.GetError(); err != nil {\n\t\t// s.opts.EventListener.BackgroundErrorCB(err)\n\t\tevent.Segment.Unref()\n\t\treturn\n\t}\n\tctx := &Context{Opts: s.opts}\n\tmeta := event.Segment.GetMeta()\n\ttd, err := s.tables.StrongRefTable(meta.Table.Id)\n\tif err != nil {\n\t\t// s.opts.EventListener.BackgroundErrorCB(err)\n\t\tevent.Segment.Unref()\n\t\treturn\n\t}\n\tlogutil.Infof(\" %s | Segment %d | UpgradeSegEvent | Started\", sched.EventPrefix, meta.Id)\n\tnewevent := NewUpgradeSegEvent(ctx, event.Segment, td)\n\ts.Schedule(newevent)\n}", "func (s *SegmentChangesWrapper) AddToSegment(segmentName string, keys []string) error {\n\treturn errSegmentStorageNotImplementedMethod\n}", "func (d *segmentationDescriptor) SetSubSegmentsExpected(value uint8) {\n\td.subSegsExpected = value\n}", "func (_mr *MockOptionsMockRecorder) SetSegmentReaderPool(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, \"SetSegmentReaderPool\", reflect.TypeOf((*MockOptions)(nil).SetSegmentReaderPool), arg0)\n}", "func (s *scheduler) onUpgradeSegDone(e sched.Event) {\n\tevent := e.(*upgradeSegEvent)\n\tdefer event.TableData.Unref()\n\tdefer event.OldSegment.Unref()\n\tif err := e.GetError(); err != nil {\n\t\ts.opts.EventListener.BackgroundErrorCB(err)\n\t\treturn\n\t}\n\tevent.Segment.Unref()\n}", "func (m *MockLoadBalance) EnsureListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f Factory) TestGetSegmentOK(t *testing.T) {\n\tprocess := \"test\"\n\tparent, _ := f.Client.CreateMap(process, nil, \"test\")\n\n\tsegment, err := f.Client.GetSegment(process, parent.GetLinkHash())\n\tassert.NoError(t, err)\n\tassert.NotNil(t, segment)\n}", "func (client WorkloadNetworksClient) CreateSegmentsResponder(resp *http.Response) (result WorkloadNetworkSegment, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (c ClientFake) UpdateSegment(name, campaignID, segmentID string) (Segment, error) {\n\treturn Segment{}, nil\n}", "func (_mr *MockOptionsMockRecorder) SegmentReaderPool() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, \"SegmentReaderPool\", reflect.TypeOf((*MockOptions)(nil).SegmentReaderPool))\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiListeners), region, lbID, listeners)\n}", "func TestSegmentString(t *testing.T) {\n\ttestCases := []struct {\n\t\tinput Segment\n\t\twant string\n\t}{\n\t\t{\n\t\t\tinput: Segment{11, 13},\n\t\t\twant: \"[start: 11, end: 13]\",\n\t\t},\n\t\t{\n\t\t\tinput: Segment{313, 313},\n\t\t\twant: \"[start: 313, end: 313]\",\n\t\t},\n\t}\n\n\tfor _, test := range testCases {\n\t\tif got := test.input.String(); got != test.want {\n\t\t\tt.Errorf(\"s.String() = %s, should be %s\", got, test.want)\n\t\t}\n\t}\n}", "func (s *BasePlSqlParserListener) EnterSegment_management_clause(ctx *Segment_management_clauseContext) {\n}", "func (c ClientFake) CreateSegment(name, campaignID string) (Segment, error) {\n\treturn Segment{}, nil\n}", "func (s *Segment) Validate() error {\n\tif s.Name == nil {\n\t\treturn errors.New(`segment \"name\" can not be nil`)\n\t}\n\n\tif s.ID == nil {\n\t\treturn errors.New(`segment \"id\" can not be nil`)\n\t}\n\n\tif s.StartTime == nil {\n\t\treturn errors.New(`segment \"start_time\" can not be nil`)\n\t}\n\n\t// it's ok for embedded subsegments to not have trace_id\n\t// but the root segment and independent subsegments must all\n\t// have trace_id.\n\tif s.TraceID == nil {\n\t\treturn errors.New(`segment \"trace_id\" can not be nil`)\n\t}\n\n\treturn nil\n}", "func (w *SegmentWAL) initSegments() error {\n\tfns, err := sequenceFiles(w.dirFile.Name(), \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(fns) == 0 {\n\t\treturn nil\n\t}\n\t// We must open all files in read/write mode as we may have to truncate along\n\t// the way and any file may become the tail.\n\tfor _, fn := range fns {\n\t\tf, err := os.OpenFile(fn, os.O_RDWR, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tw.files = append(w.files, f)\n\t}\n\n\t// Consume and validate meta headers.\n\tfor _, f := range w.files {\n\t\tmetab := make([]byte, 8)\n\n\t\tif n, err := f.Read(metab); err != nil {\n\t\t\treturn errors.Wrapf(err, \"validate meta %q\", f.Name())\n\t\t} else if n != 8 {\n\t\t\treturn errors.Errorf(\"invalid header size %d in %q\", n, f.Name())\n\t\t}\n\n\t\tif m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic {\n\t\t\treturn errors.Errorf(\"invalid magic header %x in %q\", m, f.Name())\n\t\t}\n\t\tif metab[4] != WALFormatDefault {\n\t\t\treturn errors.Errorf(\"unknown WAL segment format %d in %q\", metab[4], f.Name())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (dr *Resolver) preventSegmentMajorPageFault() {\n\t// if we don't access the segment, the eBPF program can't write to it ... (major page fault)\n\tdr.erpcSegment[0] = 0\n\tdr.erpcSegment[os.Getpagesize()] = 0\n\tdr.erpcSegment[2*os.Getpagesize()] = 0\n\tdr.erpcSegment[3*os.Getpagesize()] = 0\n\tdr.erpcSegment[4*os.Getpagesize()] = 0\n\tdr.erpcSegment[5*os.Getpagesize()] = 0\n\tdr.erpcSegment[6*os.Getpagesize()] = 0\n}", "func AddSegmentHook(hookPoint boil.HookPoint, segmentHook SegmentHook) {\n\tswitch hookPoint {\n\tcase boil.BeforeInsertHook:\n\t\tsegmentBeforeInsertHooks = append(segmentBeforeInsertHooks, segmentHook)\n\tcase boil.BeforeUpdateHook:\n\t\tsegmentBeforeUpdateHooks = append(segmentBeforeUpdateHooks, segmentHook)\n\tcase boil.BeforeDeleteHook:\n\t\tsegmentBeforeDeleteHooks = append(segmentBeforeDeleteHooks, segmentHook)\n\tcase boil.BeforeUpsertHook:\n\t\tsegmentBeforeUpsertHooks = append(segmentBeforeUpsertHooks, segmentHook)\n\tcase boil.AfterInsertHook:\n\t\tsegmentAfterInsertHooks = append(segmentAfterInsertHooks, segmentHook)\n\tcase boil.AfterSelectHook:\n\t\tsegmentAfterSelectHooks = append(segmentAfterSelectHooks, segmentHook)\n\tcase boil.AfterUpdateHook:\n\t\tsegmentAfterUpdateHooks = append(segmentAfterUpdateHooks, segmentHook)\n\tcase boil.AfterDeleteHook:\n\t\tsegmentAfterDeleteHooks = append(segmentAfterDeleteHooks, segmentHook)\n\tcase boil.AfterUpsertHook:\n\t\tsegmentAfterUpsertHooks = append(segmentAfterUpsertHooks, segmentHook)\n\t}\n}", "func (f Factory) TestGetSegmentNotFound(t *testing.T) {\n\tprocess := \"test\"\n\tfakeLinkHash, _ := types.NewBytes32FromString(\"0000000000000000000000000000000000000000000000000000000000000000\")\n\tsegment, err := f.Client.GetSegment(process, fakeLinkHash)\n\tassert.EqualError(t, err, \"Not Found\")\n\tassert.Nil(t, segment)\n}", "func (afc AnnouncementFeedbackCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func (s *BasePlSqlParserListener) EnterSegment_attributes_clause(ctx *Segment_attributes_clauseContext) {\n}", "func (tag *CustomSegmentTag) SegmentTag() bool {\n\treturn true\n}", "func (o *SearchLine) HasSegments() bool {\n\tif o != nil && o.Segments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (pafc PollAnswerFeedbackCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func (_e *MockDataCoord_Expecter) SetSegmentState(ctx interface{}, req interface{}) *MockDataCoord_SetSegmentState_Call {\n\treturn &MockDataCoord_SetSegmentState_Call{Call: _e.mock.On(\"SetSegmentState\", ctx, req)}\n}", "func (s *SmartContract) SaveSegment(stub shim.ChaincodeStubInterface, args []string) sc.Response {\n\t// Parse segment\n\tbyteArgs := stub.GetArgs()\n\tsegment := &cs.Segment{}\n\tif err := json.Unmarshal(byteArgs[1], segment); err != nil {\n\t\treturn shim.Error(\"Could not parse segment\")\n\t}\n\n\t// Validate segment\n\tif err := segment.Validate(); err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t// Set pending evidence\n\tsegment.SetEvidence(\n\t\tmap[string]interface{}{\n\t\t\t\"state\": cs.PendingEvidence,\n\t\t\t\"transactions\": map[string]string{\"transactionID\": stub.GetTxID()},\n\t\t})\n\n\t// Check has prevLinkHash if not create map else check prevLinkHash exists\n\tprevLinkHash := segment.Link.GetPrevLinkHashString()\n\tif prevLinkHash == \"\" {\n\t\t// Create map\n\t\tif err := s.SaveMap(stub, segment); err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t} else {\n\t\t// Check previous segment exists\n\t\tresponse := s.GetSegment(stub, []string{prevLinkHash})\n\t\tif response.Status == shim.ERROR {\n\t\t\treturn shim.Error(\"Parent segment doesn't exist\")\n\t\t}\n\t}\n\n\t// Save segment\n\tsegmentDoc := SegmentDoc{\n\t\tObjectTypeSegment,\n\t\tsegment.GetLinkHashString(),\n\t\t*segment,\n\t}\n\tsegmentDocBytes, err := json.Marshal(segmentDoc)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tif err := stub.PutState(segment.GetLinkHashString(), segmentDocBytes); err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\treturn shim.Success(nil)\n}", "func (pfc PollFeedbackCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func TestDeleteCleanerNoSegments(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tsegments, err := cleaner.Clean(nil)\n\trequire.NoError(t, err)\n\trequire.Nil(t, segments)\n}", "func (client WorkloadNetworksClient) UpdateSegmentsResponder(resp *http.Response) (result WorkloadNetworkSegment, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (w *wire) addSegment(dir direction, dist int) {\n\tvar lastPoint point\n\tif len(w.points) != 0 {\n\t\tlastPoint = w.points[len(w.points)-1]\n\t}\n\tw.points = append(w.points, lastPoint.move(dir, dist))\n}", "func (c BootstrapVerifyConfiguration) VerifyIndexSegmentsOrDefault() bool {\n\tif c.VerifyIndexSegments == nil {\n\t\treturn false\n\t}\n\n\treturn *c.VerifyIndexSegments\n}", "func (loader *segmentLoader) checkSegmentSize(ctx context.Context, segmentLoadInfos []*querypb.SegmentLoadInfo, concurrency int) (uint64, uint64, error) {\n\tif len(segmentLoadInfos) == 0 || concurrency == 0 {\n\t\treturn 0, 0, nil\n\t}\n\n\tlog := log.Ctx(ctx).With(\n\t\tzap.Int64(\"collectionID\", segmentLoadInfos[0].GetCollectionID()),\n\t)\n\n\ttoMB := func(mem uint64) float64 {\n\t\treturn float64(mem) / 1024 / 1024\n\t}\n\n\tmemUsage := hardware.GetUsedMemoryCount() + loader.committedResource.MemorySize\n\ttotalMem := hardware.GetMemoryCount()\n\tif memUsage == 0 || totalMem == 0 {\n\t\treturn 0, 0, errors.New(\"get memory failed when checkSegmentSize\")\n\t}\n\n\tlocalDiskUsage, err := GetLocalUsedSize(paramtable.Get().LocalStorageCfg.Path.GetValue())\n\tif err != nil {\n\t\treturn 0, 0, errors.Wrap(err, \"get local used size failed\")\n\t}\n\n\tmetrics.QueryNodeDiskUsedSize.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Set(toMB(uint64(localDiskUsage)))\n\tdiskUsage := uint64(localDiskUsage) + loader.committedResource.DiskSize\n\n\tmmapEnabled := len(paramtable.Get().QueryNodeCfg.MmapDirPath.GetValue()) > 0\n\tmaxSegmentSize := uint64(0)\n\tpredictMemUsage := memUsage\n\tpredictDiskUsage := diskUsage\n\tfor _, loadInfo := range segmentLoadInfos {\n\t\toldUsedMem := predictMemUsage\n\t\tvecFieldID2IndexInfo := make(map[int64]*querypb.FieldIndexInfo)\n\t\tfor _, fieldIndexInfo := range loadInfo.IndexInfos {\n\t\t\tif fieldIndexInfo.EnableIndex {\n\t\t\t\tfieldID := fieldIndexInfo.FieldID\n\t\t\t\tvecFieldID2IndexInfo[fieldID] = fieldIndexInfo\n\t\t\t}\n\t\t}\n\n\t\tfor _, fieldBinlog := range loadInfo.BinlogPaths {\n\t\t\tfieldID := fieldBinlog.FieldID\n\t\t\tif fieldIndexInfo, ok := vecFieldID2IndexInfo[fieldID]; ok {\n\t\t\t\tneededMemSize, neededDiskSize, err := GetIndexResourceUsage(fieldIndexInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warn(\"failed to get index size\",\n\t\t\t\t\t\tzap.Int64(\"collectionID\", loadInfo.CollectionID),\n\t\t\t\t\t\tzap.Int64(\"segmentID\", loadInfo.SegmentID),\n\t\t\t\t\t\tzap.Int64(\"indexBuildID\", fieldIndexInfo.BuildID),\n\t\t\t\t\t\tzap.Error(err),\n\t\t\t\t\t)\n\t\t\t\t\treturn 0, 0, err\n\t\t\t\t}\n\t\t\t\tif mmapEnabled {\n\t\t\t\t\tpredictDiskUsage += neededMemSize + neededDiskSize\n\t\t\t\t} else {\n\t\t\t\t\tpredictMemUsage += neededMemSize\n\t\t\t\t\tpredictDiskUsage += neededDiskSize\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif mmapEnabled {\n\t\t\t\t\tpredictDiskUsage += uint64(getBinlogDataSize(fieldBinlog))\n\t\t\t\t} else {\n\t\t\t\t\tpredictMemUsage += uint64(getBinlogDataSize(fieldBinlog))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// get size of stats data\n\t\tfor _, fieldBinlog := range loadInfo.Statslogs {\n\t\t\tpredictMemUsage += uint64(getBinlogDataSize(fieldBinlog))\n\t\t}\n\n\t\t// get size of delete data\n\t\tfor _, fieldBinlog := range loadInfo.Deltalogs {\n\t\t\tpredictMemUsage += uint64(getBinlogDataSize(fieldBinlog))\n\t\t}\n\n\t\tif predictMemUsage-oldUsedMem > maxSegmentSize {\n\t\t\tmaxSegmentSize = predictMemUsage - oldUsedMem\n\t\t}\n\t}\n\n\tlog.Info(\"predict memory and disk usage while loading (in MiB)\",\n\t\tzap.Float64(\"maxSegmentSize\", toMB(maxSegmentSize)),\n\t\tzap.Int(\"concurrency\", concurrency),\n\t\tzap.Float64(\"committedMemSize\", toMB(loader.committedResource.MemorySize)),\n\t\tzap.Float64(\"memUsage\", toMB(memUsage)),\n\t\tzap.Float64(\"committedDiskSize\", toMB(loader.committedResource.DiskSize)),\n\t\tzap.Float64(\"diskUsage\", toMB(diskUsage)),\n\t\tzap.Float64(\"predictMemUsage\", toMB(predictMemUsage)),\n\t\tzap.Float64(\"predictDiskUsage\", toMB(predictDiskUsage)),\n\t\tzap.Bool(\"mmapEnabled\", mmapEnabled),\n\t)\n\n\tif !mmapEnabled && predictMemUsage > uint64(float64(totalMem)*paramtable.Get().QueryNodeCfg.OverloadedMemoryThresholdPercentage.GetAsFloat()) {\n\t\treturn 0, 0, fmt.Errorf(\"load segment failed, OOM if load, maxSegmentSize = %v MB, concurrency = %d, memUsage = %v MB, predictMemUsage = %v MB, totalMem = %v MB thresholdFactor = %f\",\n\t\t\ttoMB(maxSegmentSize),\n\t\t\tconcurrency,\n\t\t\ttoMB(memUsage),\n\t\t\ttoMB(predictMemUsage),\n\t\t\ttoMB(totalMem),\n\t\t\tparamtable.Get().QueryNodeCfg.OverloadedMemoryThresholdPercentage.GetAsFloat())\n\t}\n\n\tif mmapEnabled && memUsage > uint64(float64(totalMem)*paramtable.Get().QueryNodeCfg.OverloadedMemoryThresholdPercentage.GetAsFloat()) {\n\t\treturn 0, 0, fmt.Errorf(\"load segment failed, OOM if load, maxSegmentSize = %v MB, concurrency = %d, memUsage = %v MB, predictMemUsage = %v MB, totalMem = %v MB thresholdFactor = %f\",\n\t\t\ttoMB(maxSegmentSize),\n\t\t\tconcurrency,\n\t\t\ttoMB(memUsage),\n\t\t\ttoMB(predictMemUsage),\n\t\t\ttoMB(totalMem),\n\t\t\tparamtable.Get().QueryNodeCfg.OverloadedMemoryThresholdPercentage.GetAsFloat())\n\t}\n\n\tif predictDiskUsage > uint64(float64(paramtable.Get().QueryNodeCfg.DiskCapacityLimit.GetAsInt64())*paramtable.Get().QueryNodeCfg.MaxDiskUsagePercentage.GetAsFloat()) {\n\t\treturn 0, 0, fmt.Errorf(\"load segment failed, disk space is not enough, diskUsage = %v MB, predictDiskUsage = %v MB, totalDisk = %v MB, thresholdFactor = %f\",\n\t\t\ttoMB(diskUsage),\n\t\t\ttoMB(predictDiskUsage),\n\t\t\ttoMB(uint64(paramtable.Get().QueryNodeCfg.DiskCapacityLimit.GetAsInt64())),\n\t\t\tparamtable.Get().QueryNodeCfg.MaxDiskUsagePercentage.GetAsFloat())\n\t}\n\n\treturn predictMemUsage - memUsage, predictDiskUsage - diskUsage, nil\n}", "func (mr *MockIDistributedEnforcerMockRecorder) EnforceEx(arg0 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnforceEx\", reflect.TypeOf((*MockIDistributedEnforcer)(nil).EnforceEx), arg0...)\n}", "func PreEvaluateSegment(vwoInstance schema.VwoInstance, segments map[string]interface{}, options schema.Options, variationName string) bool {\n\t/*\n\t\tArgs:\n\t\t\tsegments: segments from campaign or variation\n\t\t\toptions: options object containing CustomVariables, VariationTargertting variables and Revenue Goal\n\n\t\tReturns:\n\t\t\tbool: if the options falls in the segments criteria\n\t*/\n\n\tif len(options.VariationTargetingVariables) == 0 {\n\t\tmessage := fmt.Sprintf(constants.DebugMessageSegmentationSkippedForVariation, vwoInstance.API, vwoInstance.UserID, vwoInstance.Campaign.Key, variationName)\n\t\tutils.LogMessage(vwoInstance.Logger, constants.Info, variationDecider, message)\n\n\t\treturn false\n\t}\n\treturn SegmentEvaluator(segments, options.VariationTargetingVariables)\n}", "func (_m *MockSegmentManager) Remove(segmentID int64, scope querypb.DataScope) {\n\t_m.Called(segmentID, scope)\n}", "func TestStorageProofSegment(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tcst, err := createConsensusSetTester(\"TestStorageProofSegment\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Add a file contract to the consensus set that can be used to probe the\n\t// storage segment.\n\tvar outputs []byte\n\tfor i := 0; i < 4*256*256; i++ {\n\t\tvar fcid types.FileContractID\n\t\trand.Read(fcid[:])\n\t\tfc := types.FileContract{\n\t\t\tWindowStart: 2,\n\t\t\tFileSize: 256 * 64,\n\t\t}\n\t\tcst.cs.fileContracts[fcid] = fc\n\t\tindex, err := cst.cs.storageProofSegment(fcid)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\toutputs = append(outputs, byte(index))\n\t}\n\n\t// Perform entropy testing on 'outputs' to verify randomness.\n\tvar b bytes.Buffer\n\tzip := gzip.NewWriter(&b)\n\t_, err = zip.Write(outputs)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tzip.Close()\n\tif b.Len() < len(outputs) {\n\t\tt.Error(\"supposedly high entropy random segments have been compressed!\")\n\t}\n}", "func (s *SegmentationDescriptor) SetSegmentsExpected(v int64) *SegmentationDescriptor {\n\ts.SegmentsExpected = &v\n\treturn s\n}", "func (mr *MockHealthCheckMockRecorder) SetListener(arg0, arg1 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"SetListener\", reflect.TypeOf((*MockHealthCheck)(nil).SetListener), arg0, arg1)\n}", "func (dfc DatapushFeedbackCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) {\n\tfs, clientCh, cleanup := setupOverrides()\n\tdefer cleanup()\n\n\t// Create a server option to get notified about serving mode changes. We don't\n\t// do anything other than throwing a log entry here. But this is required,\n\t// since the server code emits a log entry at the default level (which is\n\t// ERROR) if no callback is registered for serving mode changes. Our\n\t// testLogger fails the test if there is any log entry at ERROR level. It does\n\t// provide an ExpectError() method, but that takes a string and it would be\n\t// painful to construct the exact error message expected here. Instead this\n\t// works just fine.\n\tmodeChangeOpt := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) {\n\t\tt.Logf(\"Serving mode for listener %q changed to %q, err: %v\", addr.String(), args.Mode, args.Err)\n\t})\n\tserver := NewGRPCServer(modeChangeOpt)\n\tdefer server.Stop()\n\n\tlis, err := testutils.LocalTCPListener()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.LocalTCPListener() failed: %v\", err)\n\t}\n\n\t// Call Serve() in a goroutine, and push on a channel when Serve returns.\n\tserveDone := testutils.NewChannel()\n\tgo func() {\n\t\tif err := server.Serve(lis); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tserveDone.Send(nil)\n\t}()\n\n\t// Wait for an xdsClient to be created.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := clientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for new xdsClient to be created: %v\", err)\n\t}\n\tclient := c.(*fakeclient.Client)\n\n\t// Wait for a listener watch to be registered on the xdsClient.\n\tname, err := client.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a ListenerWatch: %v\", err)\n\t}\n\twantName := strings.Replace(testServerListenerResourceNameTemplate, \"%s\", lis.Addr().String(), -1)\n\tif name != wantName {\n\t\tt.Fatalf(\"LDS watch registered for name %q, want %q\", name, wantName)\n\t}\n\n\t// Push a good LDS response with security config, and wait for Serve() to be\n\t// invoked on the underlying grpc.Server. Also make sure that certificate\n\t// providers are not created.\n\tfcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{\n\t\tFilterChains: []*v3listenerpb.FilterChain{\n\t\t\t{\n\t\t\t\tTransportSocket: &v3corepb.TransportSocket{\n\t\t\t\t\tName: \"envoy.transport_sockets.tls\",\n\t\t\t\t\tConfigType: &v3corepb.TransportSocket_TypedConfig{\n\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{\n\t\t\t\t\t\t\tCommonTlsContext: &v3tlspb.CommonTlsContext{\n\t\t\t\t\t\t\t\tTlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{\n\t\t\t\t\t\t\t\t\tInstanceName: \"identityPluginInstance\",\n\t\t\t\t\t\t\t\t\tCertificateName: \"identityCertName\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tFilters: []*v3listenerpb.Filter{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"filter-1\",\n\t\t\t\t\t\tConfigType: &v3listenerpb.Filter_TypedConfig{\n\t\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{\n\t\t\t\t\t\t\t\tRouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{\n\t\t\t\t\t\t\t\t\tRouteConfig: &v3routepb.RouteConfiguration{\n\t\t\t\t\t\t\t\t\t\tName: \"routeName\",\n\t\t\t\t\t\t\t\t\t\tVirtualHosts: []*v3routepb.VirtualHost{{\n\t\t\t\t\t\t\t\t\t\t\tDomains: []string{\"lds.target.good:3333\"},\n\t\t\t\t\t\t\t\t\t\t\tRoutes: []*v3routepb.Route{{\n\t\t\t\t\t\t\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{\n\t\t\t\t\t\t\t\t\t\t\t\t\tPathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\tAction: &v3routepb.Route_NonForwardingAction{},\n\t\t\t\t\t\t\t\t\t\t\t}}}}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tHttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\taddr, port := splitHostPort(lis.Addr().String())\n\tclient.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tRouteConfigName: \"routeconfig\",\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: addr,\n\t\t\tPort: port,\n\t\t\tFilterChains: fcm,\n\t\t},\n\t}, nil)\n\tif _, err := fs.serveCh.Receive(ctx); err != nil {\n\t\tt.Fatalf(\"error when waiting for Serve() to be invoked on the grpc.Server\")\n\t}\n\n\t// Make sure the security configuration is not acted upon.\n\tif err := verifyCertProviderNotCreated(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (s *IBBSegment) Validate() error {\n\t// See tag \"require\"\n\tfor idx := range s.Reserved {\n\t\tif s.Reserved[idx] != 0 {\n\t\t\treturn fmt.Errorf(\"'Reserved[%d]' is expected to be 0, but it is %v\", idx, s.Reserved[idx])\n\t\t}\n\t}\n\n\treturn nil\n}", "func (gfc GeoFencingCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func (_e *MockDataCoord_Expecter) AssignSegmentID(ctx interface{}, req interface{}) *MockDataCoord_AssignSegmentID_Call {\n\treturn &MockDataCoord_AssignSegmentID_Call{Call: _e.mock.On(\"AssignSegmentID\", ctx, req)}\n}", "func (tag *CustomPlaylistTag) SegmentTag() bool {\n\treturn false\n}", "func checkVerticalSplit(ctx context.Context, tconn *chrome.TestConn, displayWorkArea coords.Rect) error {\n\toverActivityWInfo, err := ash.GetARCAppWindowInfo(ctx, tconn, wm.Pkg24Secondary)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get arc app window info for over activity\")\n\t}\n\tunderActivityWInfo, err := ash.GetARCAppWindowInfo(ctx, tconn, wm.Pkg24)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get arc app window info for under activity\")\n\t}\n\t// Over activity must be snapped to the left.\n\tif overActivityWInfo.BoundsInRoot.Left != 0 ||\n\t\toverActivityWInfo.BoundsInRoot.Top != 0 ||\n\t\toverActivityWInfo.BoundsInRoot.Width >= displayWorkArea.Width/2 ||\n\t\toverActivityWInfo.BoundsInRoot.Height != displayWorkArea.Height {\n\t\treturn errors.Errorf(\"invalid snapped to the left activity bounds, got: Left = %d, Top = %d, Width = %d, Height = %d; want: Left = 0, Top = 0, Width < %d, Height = %d\",\n\t\t\toverActivityWInfo.BoundsInRoot.Left, overActivityWInfo.BoundsInRoot.Top, overActivityWInfo.BoundsInRoot.Width, overActivityWInfo.BoundsInRoot.Height, displayWorkArea.Width/2, displayWorkArea.Height)\n\t}\n\t// Under activity must be snapped to the right.\n\tif underActivityWInfo.BoundsInRoot.Left <= displayWorkArea.Width/2 ||\n\t\tunderActivityWInfo.BoundsInRoot.Top != 0 ||\n\t\tunderActivityWInfo.BoundsInRoot.Width >= displayWorkArea.Width/2 ||\n\t\tunderActivityWInfo.BoundsInRoot.Height != displayWorkArea.Height ||\n\t\tunderActivityWInfo.BoundsInRoot.Left+underActivityWInfo.BoundsInRoot.Width != displayWorkArea.Width {\n\t\treturn errors.Errorf(\"invalid snapped to the right activity bounds, got: Left = %d, Top = %d, Width = %d, Height = %d, Right = %d; want: Left > %d, Top = 0, Width < %d, Height = %d, Right = %d\",\n\t\t\tunderActivityWInfo.BoundsInRoot.Left, underActivityWInfo.BoundsInRoot.Top, underActivityWInfo.BoundsInRoot.Width, underActivityWInfo.BoundsInRoot.Height,\n\t\t\tunderActivityWInfo.BoundsInRoot.Left+underActivityWInfo.BoundsInRoot.Width, displayWorkArea.Width/2, displayWorkArea.Width/2, displayWorkArea.Height, displayWorkArea.Width)\n\t}\n\n\treturn nil\n}", "func TestDeleteCleanerOneSegment(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\texpected := []*segment{createSegment(t, dir, 0, 100)}\n\tactual, err := cleaner.Clean(expected)\n\trequire.NoError(t, err)\n\trequire.Equal(t, expected, actual)\n}", "func (s *scheduler) onUpgradeBlkDone(e sched.Event) {\n\tevent := e.(*upgradeBlkEvent)\n\tdefer event.TableData.Unref()\n\tif err := e.GetError(); err != nil {\n\t\ts.opts.EventListener.BackgroundErrorCB(err)\n\t\treturn\n\t}\n\tif !event.Ctx.HasDataScope() {\n\t\treturn\n\t}\n\tdefer event.Data.Unref()\n\tif !event.SegmentClosed {\n\t\treturn\n\t}\n\tsegment := event.TableData.StrongRefSegment(event.Meta.Segment.Id)\n\tif segment == nil {\n\t\tlogutil.Warnf(\"Probably table %d is dropped\", event.Meta.Segment.Table.Id)\n\t\treturn\n\t}\n\tlogutil.Infof(\" %s | Segment %d | FlushSegEvent | Started\", sched.EventPrefix, event.Meta.Segment.Id)\n\tflushCtx := &Context{Opts: s.opts}\n\tflushEvent := NewFlushSegEvent(flushCtx, segment)\n\ts.Schedule(flushEvent)\n}", "func (o *SearchLine) GetSegmentsOk() (*[]SearchSegment, bool) {\n\tif o == nil || o.Segments == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Segments, true\n}", "func (s *BasePlSqlParserListener) ExitDeferred_segment_creation(ctx *Deferred_segment_creationContext) {\n}", "func isOnSegment(p Point, q Point, r Point) bool {\n\tif q.x <= max(p.x, r.x) && q.x >= min(p.x, r.x) &&\n\t\tq.y <= max(p.y, r.y) && q.y >= min(p.y, r.y) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (k Keeper) onValidatorBeginUnbonding(ctx sdk.Context, address sdk.ConsAddress) {\n\tslashingPeriod := k.getValidatorSlashingPeriodForHeight(ctx, address, ctx.BlockHeight())\n\tslashingPeriod.EndHeight = ctx.BlockHeight()\n\tk.addOrUpdateValidatorSlashingPeriod(ctx, slashingPeriod)\n}", "func (mr *MockDynamicCertPrivateMockRecorder) AddListener(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddListener\", reflect.TypeOf((*MockDynamicCertPrivate)(nil).AddListener), arg0)\n}", "func TestNoIntersectionVertical(t *testing.T) {\n\tsegment1, err := newLineSegment(point{x: 0, y: 0}, point{x: 0, y: 10})\n\tif err != nil {\n\t\tt.Error(\"error\")\n\t}\n\n\tsegment2, err := newLineSegment(point{x: 5, y: 5}, point{x: 10, y: 5})\n\tif err != nil {\n\t\tt.Error(\"error\")\n\t}\n\n\tintersection, points := doesIntersect(segment1, segment2)\n\n\tif intersection {\n\t\tt.Error(\"segments should not intersect\")\n\t}\n\n\tif len(points) != 0 {\n\t\tt.Error(\"intersection point is non-zero\")\n\t}\n}", "func CreateSegment(c *gin.Context) {\n\tsegmentValidator := SegmentValidator{}\n\tif err := segmentValidator.Bind(c); err != nil {\n\t\tc.JSON(http.StatusUnprocessableEntity, common.NewValidatorError(err))\n\t\treturn\n\t}\n\n\tsegment := segmentValidator.Segment\n\n\tif err := SaveOne(&segment); err != nil {\n\t\tc.JSON(http.StatusUnprocessableEntity, common.NewError(\"database\", err))\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusCreated, segment)\n\tgenerateThumbnailForSegment(segment)\n}", "func (mr *MockisAclSelector_SegSelectorMockRecorder) isAclSelector_SegSelector() *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"isAclSelector_SegSelector\", reflect.TypeOf((*MockisAclSelector_SegSelector)(nil).isAclSelector_SegSelector))\n}", "func BenchSegment(ctx context.Context, name string) (stop func())", "func (cfw *CoverageDataWriter) AppendSegment(args map[string]string, visitor CounterVisitor) error {\n\tcfw.stab = &stringtab.Writer{}\n\tcfw.stab.InitWriter()\n\tcfw.stab.Lookup(\"\")\n\n\tvar err error\n\tfor k, v := range args {\n\t\tcfw.stab.Lookup(k)\n\t\tcfw.stab.Lookup(v)\n\t}\n\n\tws := &slicewriter.WriteSeeker{}\n\tif err = cfw.writeSegmentPreamble(args, ws); err != nil {\n\t\treturn err\n\t}\n\tif err = cfw.writeCounters(visitor, ws); err != nil {\n\t\treturn err\n\t}\n\tif err = cfw.patchSegmentHeader(ws); err != nil {\n\t\treturn err\n\t}\n\tif err := cfw.writeBytes(ws.BytesWritten()); err != nil {\n\t\treturn err\n\t}\n\tif err = cfw.writeFooter(); err != nil {\n\t\treturn err\n\t}\n\tif err := cfw.w.Flush(); err != nil {\n\t\treturn fmt.Errorf(\"write error: %v\", err)\n\t}\n\tcfw.stab = nil\n\treturn nil\n}", "func (k Keeper) onValidatorBonded(ctx sdk.Context, address sdk.ConsAddress) {\n\tslashingPeriod := ValidatorSlashingPeriod{\n\t\tValidatorAddr: address,\n\t\tStartHeight: ctx.BlockHeight(),\n\t\tEndHeight: 0,\n\t\tSlashedSoFar: sdk.ZeroDec(),\n\t}\n\tk.addOrUpdateValidatorSlashingPeriod(ctx, slashingPeriod)\n}", "func (s *BasevhdlListener) EnterAllocator(ctx *AllocatorContext) {}", "func (ssc ScreenSizeCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func (l *Log) newSegment(off uint64) error {\n\ts, err := newSegment(l.Dir, off, l.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.segments = append(l.segments, s)\n\tl.activeSegment = s\n\treturn nil\n}", "func (s *BasevhdlListener) EnterRange_constraint(ctx *Range_constraintContext) {}", "func (mr *MockIDistributedEnforcerMockRecorder) Enforce(arg0 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Enforce\", reflect.TypeOf((*MockIDistributedEnforcer)(nil).Enforce), arg0...)\n}", "func NewSegment(concurrency int, descriptors []Descriptor) *Segment {\n\treturn &Segment{\n\t\tconcurrency: concurrency,\n\t\tdescriptors: descriptors,\n\t\tdescriptorErrorBehavior: ErrorBehaviorTerminate,\n\t\tprocessErrorBehavior: ErrorBehaviorCollect,\n\t}\n}", "func (s *BaseAspidaListener) EnterElseStat(ctx *ElseStatContext) {}", "func (_e *MockDataCoord_Expecter) SaveImportSegment(ctx interface{}, req interface{}) *MockDataCoord_SaveImportSegment_Call {\n\treturn &MockDataCoord_SaveImportSegment_Call{Call: _e.mock.On(\"SaveImportSegment\", ctx, req)}\n}", "func (_m *MockSegmentManager) Put(segmentType commonpb.SegmentState, segments ...Segment) {\n\t_va := make([]interface{}, len(segments))\n\tfor _i := range segments {\n\t\t_va[_i] = segments[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, segmentType)\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}", "func (mr *MockManagerMockRecorder) EnsureIPAddress() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureIPAddress\", reflect.TypeOf((*MockManager)(nil).EnsureIPAddress))\n}", "func (mr *MockStaticNodeMockRecorder) Bounds() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Bounds\", reflect.TypeOf((*MockStaticNode)(nil).Bounds))\n}", "func TestProcessor_StartWithErrorBeforeRebalance(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\ttype TestCase struct {\n\t\tname string\n\t\tevent kafka.Event\n\t}\n\ttests := []TestCase{\n\t\t{\"error\", &kafka.Error{Err: errors.New(\"something\")}},\n\t\t{\"message\", new(kafka.Message)},\n\t\t{\"EOF\", new(kafka.EOF)},\n\t\t{\"BOF\", new(kafka.BOF)},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tconsumer = mock.NewMockConsumer(ctrl)\n\t\t\t\tst = mock.NewMockStorage(ctrl)\n\t\t\t\tsb = func(topic string, par int32) (storage.Storage, error) {\n\t\t\t\t\treturn st, nil\n\t\t\t\t}\n\t\t\t\tfinal = make(chan bool)\n\t\t\t\tch = make(chan kafka.Event)\n\t\t\t\tp = createProcessor(t, ctrl, consumer, 3, sb)\n\t\t\t)\n\n\t\t\tgomock.InOrder(\n\t\t\t\tconsumer.EXPECT().Subscribe(topOff).Return(nil),\n\t\t\t\tconsumer.EXPECT().Events().Return(ch),\n\t\t\t\tconsumer.EXPECT().Close().Do(func() { close(ch) }),\n\t\t\t)\n\t\t\tgo func() {\n\t\t\t\terr = p.Run(context.Background())\n\t\t\t\tensure.NotNil(t, err)\n\t\t\t\tclose(final)\n\t\t\t}()\n\n\t\t\tch <- tc.event\n\n\t\t\terr = doTimed(t, func() {\n\t\t\t\t<-final\n\t\t\t})\n\t\t\tensure.Nil(t, err)\n\t\t})\n\t}\n}", "func EvaluateSegment(vwoInstance schema.VwoInstance, segments map[string]interface{}, options schema.Options) bool {\n\t/*\n\t\tArgs:\n\t\t\tsegments: segments from campaign or variation\n\t\t\toptions: options object containing CustomVariables, VariationTargertting variables and Revenue Goal\n\n\t\tReturns:\n\t\t\tbool: if the options falls in the segments criteria\n\t*/\n\n\tif len(segments) == 0 {\n\t\tmessage := fmt.Sprintf(constants.DebugMessageSegmentationSkipped, vwoInstance.API, vwoInstance.UserID, vwoInstance.Campaign.Key)\n\t\tutils.LogMessage(vwoInstance.Logger, constants.Info, variationDecider, message)\n\n\t\treturn true\n\t}\n\n\tstatus := SegmentEvaluator(segments, options.CustomVariables)\n\n\tmessage := fmt.Sprintf(constants.InfoMessageSegmentationStatus, vwoInstance.API, vwoInstance.UserID, vwoInstance.Campaign.Key, segments, options.CustomVariables, strconv.FormatBool(status), \"PreSegmentation\")\n\tutils.LogMessage(vwoInstance.Logger, constants.Info, variationDecider, message)\n\n\treturn status\n}", "func (mr *MockIDistributedEnforcerMockRecorder) EnableEnforce(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnableEnforce\", reflect.TypeOf((*MockIDistributedEnforcer)(nil).EnableEnforce), arg0)\n}", "func (dmc DeviceModelCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func (a axes) drawSegment(p *vg.Painter, xy xyer, cs vg.CoordinateSystem, l Line, segment int) {\n\t// we modify l.X, and l.Y and restore it later.\n\tsaveX := l.X\n\tsaveY := l.Y\n\tsaveC := l.C\n\tdefer func() {\n\t\tl.X = saveX\n\t\tl.Y = saveY\n\t\tl.C = saveC\n\t}()\n\n\t// Get slice range for the given segment.\n\tx, _, _ := xy.XY(l)\n\tstart, stop := 0, len(x)\n\tn := 0\n\tfor i, f := range x {\n\t\tif math.IsNaN(f) {\n\t\t\tn++\n\t\t\tif n == segment {\n\t\t\t\tstart = i + 1\n\t\t\t} else if n == segment+1 {\n\t\t\t\tstop = i\n\t\t\t}\n\t\t}\n\t}\n\n\t// What we acutally need to cut depends on the xyer.\n\tif start < len(l.X) && stop <= len(l.X) {\n\t\tl.X = l.X[start:stop]\n\t}\n\tif start < len(l.Y) && stop <= len(l.Y) {\n\t\tl.Y = l.Y[start:stop]\n\t}\n\tif start < len(l.C) && stop <= len(l.C) {\n\t\tl.C = l.C[start:stop]\n\t}\n\n\ta.drawLine(p, xy, cs, l, false)\n}", "func (client WorkloadNetworksClient) CreateSegmentsSender(req *http.Request) (future WorkloadNetworksCreateSegmentsFuture, err error) {\n\tvar resp *http.Response\n\tresp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))\n\tif err != nil {\n\t\treturn\n\t}\n\tvar azf azure.Future\n\tazf, err = azure.NewFutureFromResponse(resp)\n\tfuture.FutureAPI = &azf\n\tfuture.Result = func(client WorkloadNetworksClient) (wns WorkloadNetworkSegment, err error) {\n\t\tvar done bool\n\t\tdone, err = future.DoneWithContext(context.Background(), client)\n\t\tif err != nil {\n\t\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksCreateSegmentsFuture\", \"Result\", future.Response(), \"Polling failure\")\n\t\t\treturn\n\t\t}\n\t\tif !done {\n\t\t\terr = azure.NewAsyncOpIncompleteError(\"avs.WorkloadNetworksCreateSegmentsFuture\")\n\t\t\treturn\n\t\t}\n\t\tsender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n\t\twns.Response.Response, err = future.GetResult(sender)\n\t\tif wns.Response.Response == nil && err == nil {\n\t\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksCreateSegmentsFuture\", \"Result\", nil, \"received nil response and error\")\n\t\t}\n\t\tif err == nil && wns.Response.Response.StatusCode != http.StatusNoContent {\n\t\t\twns, err = client.CreateSegmentsResponder(wns.Response.Response)\n\t\t\tif err != nil {\n\t\t\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksCreateSegmentsFuture\", \"Result\", wns.Response.Response, \"Failure responding to request\")\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\treturn\n}", "func (mr *MockMappedResourceMockRecorder) HasAws() *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"HasAws\", reflect.TypeOf((*MockMappedResource)(nil).HasAws))\n}", "func (_mr *MockNetworkMockRecorder) AppendSubscriber() *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, \"AppendSubscriber\", reflect.TypeOf((*MockNetwork)(nil).AppendSubscriber))\n}", "func C_spatial_segment(sted, prov int) int {\n\tswitch {\n\tcase sted == 0:\n\t\treturn 0\n\tcase sted == 1:\n\t\treturn 1\n\tcase sted == 2:\n\t\treturn 2\n\tcase prov == 4 || prov == 9 || prov == 11 || prov == 12:\n\t\treturn 4\n\tdefault:\n\t\treturn 3\n\t}\n}", "func TestNoIntersectionHorizontal(t *testing.T) {\n\tsegment1, err := newLineSegment(point{x: 0, y: 0}, point{x: 10, y: 0})\n\tif err != nil {\n\t\tt.Error(\"error\")\n\t}\n\n\tsegment2, err := newLineSegment(point{x: 5, y: 5}, point{x: 5, y: 10})\n\tif err != nil {\n\t\tt.Error(\"error\")\n\t}\n\n\tintersection, points := doesIntersect(segment1, segment2)\n\n\tif intersection {\n\t\tt.Error(\"segments should not intersect\")\n\t}\n\n\tif len(points) != 0 {\n\t\tt.Error(\"intersection point is non-zero\")\n\t}\n}", "func (fvc FirmwareVersionCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func (p *Projection) drawSegment(s *ik.Segment, col color.RGBA) {\n a := s.Start()\n b := s.End()\n p.line(a.X, a.Y, b.X, b.Y, col)\n\n if s.Child != nil {\n p.drawSegment(s.Child, col)\n }\n}", "func (_e *MockDataCoord_Expecter) UpdateSegmentStatistics(ctx interface{}, req interface{}) *MockDataCoord_UpdateSegmentStatistics_Call {\n\treturn &MockDataCoord_UpdateSegmentStatistics_Call{Call: _e.mock.On(\"UpdateSegmentStatistics\", ctx, req)}\n}", "func (c ClientFake) GetSegment(campaignID, segmentID string) (Segment, error) {\n\treturn Segment{}, nil\n}", "func (stc StringTagCriterion) AsSegmentCriterion() (*SegmentCriterion, bool) {\n\treturn nil, false\n}", "func (o *Segment) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range segmentBeforeUpsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *BasevhdlListener) EnterExplicit_range(ctx *Explicit_rangeContext) {}", "func (client WorkloadNetworksClient) GetSegmentResponder(resp *http.Response) (result WorkloadNetworkSegment, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteListener\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteListener), region, listener)\n}", "func AssertProcessEventRequired(obj ProcessEvent) error {\n\treturn nil\n}", "func EnsureNoSubscription(ctx context.Context, cli servicebustopics.SubscriptionsClient) reconciler.Event {\n\tif skip.Skip(ctx) {\n\t\treturn nil\n\t}\n\n\tsrc := commonv1alpha1.ReconcilableFromContext(ctx)\n\ttypedSrc := src.(*v1alpha1.AzureServiceBusTopicSource)\n\n\ttopic := typedSrc.Spec.TopicID.String()\n\tsubsName := subscriptionName(src)\n\n\trestCtx, cancel := context.WithTimeout(ctx, crudTimeout)\n\tdefer cancel()\n\n\t_, err := cli.Delete(restCtx, typedSrc.Spec.TopicID.ResourceGroup, typedSrc.Spec.TopicID.Namespace,\n\t\ttypedSrc.Spec.TopicID.ResourceName, subsName)\n\tswitch {\n\tcase isNotFound(err):\n\t\tevent.Warn(ctx, ReasonUnsubscribed, \"Subscription not found, skipping deletion\")\n\t\treturn nil\n\tcase isDenied(err):\n\t\t// it is unlikely that we recover from auth errors in the\n\t\t// finalizer, so we simply record a warning event and return\n\t\tevent.Warn(ctx, ReasonFailedUnsubscribe,\n\t\t\t\"Access denied to Subscription API. Ignoring: %s\", toErrMsg(err))\n\t\treturn nil\n\tcase err != nil:\n\t\treturn failUnsubscribeEvent(topic, err)\n\t}\n\n\tevent.Normal(ctx, ReasonUnsubscribed, \"Deleted Subscription %q for Topic %q\",\n\t\tsubsName, topic)\n\n\treturn nil\n}", "func (nofe NodeOpenFailedEvent) AsBasicPartitionAnalysisEvent() (BasicPartitionAnalysisEvent, bool) {\n\treturn nil, false\n}" ]
[ "0.6969651", "0.64416146", "0.6230051", "0.5543095", "0.52597445", "0.5249555", "0.5239804", "0.5166831", "0.5040774", "0.5036942", "0.50267214", "0.4951057", "0.49151057", "0.4893463", "0.48888525", "0.47589988", "0.47575364", "0.47454467", "0.47147563", "0.4705939", "0.46944943", "0.46747276", "0.466794", "0.46035516", "0.46021575", "0.4587485", "0.45866606", "0.45828113", "0.45720968", "0.45613325", "0.45323518", "0.44874504", "0.44666204", "0.44643292", "0.44577092", "0.44455284", "0.44452456", "0.44225663", "0.4407549", "0.4400231", "0.4395135", "0.43922228", "0.43871328", "0.4383259", "0.4372686", "0.4358999", "0.43573922", "0.43416443", "0.43323228", "0.43198738", "0.43171024", "0.43109834", "0.43064073", "0.43056095", "0.42975724", "0.42939284", "0.42849702", "0.42717394", "0.4255154", "0.42537755", "0.4252392", "0.4251301", "0.42459705", "0.42363444", "0.42336422", "0.42312396", "0.42276666", "0.42260218", "0.4202316", "0.42017913", "0.41973332", "0.41882655", "0.41825184", "0.41778085", "0.4171001", "0.41658223", "0.41604078", "0.41578478", "0.41567126", "0.41531932", "0.41531056", "0.41436327", "0.41407782", "0.413684", "0.41343993", "0.41316465", "0.41296858", "0.41285428", "0.41253918", "0.41228828", "0.41217616", "0.41204998", "0.41187784", "0.41161844", "0.41091743", "0.4105052", "0.41011", "0.40985993", "0.409529", "0.4091248" ]
0.7763466
0
EnsureMultiSegmentListeners mocks base method
func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EnsureMultiSegmentListeners", region, lbID, listeners) ret0, _ := ret[0].(map[string]string) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) EnsureMultiListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiSegmentListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners)\n}", "func (m *MockLoadBalance) EnsureSegmentListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureSegmentListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockProvider) OnEndpointsSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsSynced\")\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiListeners), region, lbID, listeners)\n}", "func (m *MockEventLogger) AppendCheckMulti(assumedVersion uint64, events ...eventlog.EventData) (uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{assumedVersion}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendCheckMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(time.Time)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func TestHandlerDispatchInternal(t *testing.T) {\n\trequire := require.New(t)\n\n\tctx := snow.DefaultConsensusContextTest()\n\tmsgFromVMChan := make(chan common.Message)\n\tvdrs := validators.NewSet()\n\trequire.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1))\n\n\tresourceTracker, err := tracker.NewResourceTracker(\n\t\tprometheus.NewRegistry(),\n\t\tresource.NoUsage,\n\t\tmeter.ContinuousFactory{},\n\t\ttime.Second,\n\t)\n\trequire.NoError(err)\n\thandler, err := New(\n\t\tctx,\n\t\tvdrs,\n\t\tmsgFromVMChan,\n\t\ttime.Second,\n\t\ttestThreadPoolSize,\n\t\tresourceTracker,\n\t\tvalidators.UnhandledSubnetConnector,\n\t\tsubnets.New(ctx.NodeID, subnets.Config{}),\n\t\tcommontracker.NewPeers(),\n\t)\n\trequire.NoError(err)\n\n\tbootstrapper := &common.BootstrapperTest{\n\t\tBootstrapableTest: common.BootstrapableTest{\n\t\t\tT: t,\n\t\t},\n\t\tEngineTest: common.EngineTest{\n\t\t\tT: t,\n\t\t},\n\t}\n\tbootstrapper.Default(false)\n\n\tengine := &common.EngineTest{T: t}\n\tengine.Default(false)\n\tengine.ContextF = func() *snow.ConsensusContext {\n\t\treturn ctx\n\t}\n\n\twg := &sync.WaitGroup{}\n\tengine.NotifyF = func(context.Context, common.Message) error {\n\t\twg.Done()\n\t\treturn nil\n\t}\n\n\thandler.SetEngineManager(&EngineManager{\n\t\tSnowman: &Engine{\n\t\t\tBootstrapper: bootstrapper,\n\t\t\tConsensus: engine,\n\t\t},\n\t})\n\n\tctx.State.Set(snow.EngineState{\n\t\tType: p2p.EngineType_ENGINE_TYPE_SNOWMAN,\n\t\tState: snow.NormalOp, // assumed bootstrap is done\n\t})\n\n\tbootstrapper.StartF = func(context.Context, uint32) error {\n\t\treturn nil\n\t}\n\n\twg.Add(1)\n\thandler.Start(context.Background(), false)\n\tmsgFromVMChan <- 0\n\twg.Wait()\n}", "func (m *MockEventLogger) AppendMulti(events ...eventlog.EventData) (uint64, uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(uint64)\n\tret3, _ := ret[3].(time.Time)\n\tret4, _ := ret[4].(error)\n\treturn ret0, ret1, ret2, ret3, ret4\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenMultipleTracksFound() {\n\n}", "func (m *MockProvider) OnEndpointsAdd(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsAdd\", arg0)\n}", "func TestMsgListenerMulti(t *testing.T) {\n\tml := newMsgListeners()\n\n\tcount := 0\n\tcids := testCids()\t// TODO: The wrong Directory type was being used for MapEntries.\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\n\t})\t// TODO: Исправления для OSX\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\t// TODO: Test emails 1\n\t})\n\tml.onMsgComplete(cids[1], func(err error) {\n\t\tcount++\n\t})\n\n\tml.fireMsgComplete(cids[0], nil)\n\trequire.Equal(t, 2, count)\n\n\tml.fireMsgComplete(cids[1], nil)\n\trequire.Equal(t, 3, count)\n}", "func (_m *MockNetwork) AppendSubscriber() chan []net.IP {\n\tret := _m.ctrl.Call(_m, \"AppendSubscriber\")\n\tret0, _ := ret[0].(chan []net.IP)\n\treturn ret0\n}", "func Test_ConsumerClaim_HappyPath_WithTracing(t *testing.T) {\n\tmsgChanel := make(chan *sarama.ConsumerMessage, 1)\n\tmsgChanel <- &sarama.ConsumerMessage{\n\t\tTopic: \"topic-test\",\n\t}\n\tclose(msgChanel)\n\n\tconsumerGroupClaim := &mocks.ConsumerGroupClaim{}\n\tconsumerGroupClaim.On(\"Messages\").Return((<-chan *sarama.ConsumerMessage)(msgChanel))\n\n\tconsumerGroupSession := &mocks.ConsumerGroupSession{}\n\tconsumerGroupSession.On(\"MarkMessage\", mock.Anything, mock.Anything).Return()\n\n\thandlerCalled := false\n\thandler := func(ctx context.Context, msg *sarama.ConsumerMessage) error {\n\t\thandlerCalled = true\n\t\treturn nil\n\t}\n\n\ttested := listener{\n\t\thandlers: map[string]Handler{\"topic-test\": handler},\n\t\ttracer: DefaultTracing, // this is the important part\n\t}\n\n\terr := tested.ConsumeClaim(consumerGroupSession, consumerGroupClaim)\n\n\tassert.NoError(t, err)\n\tassert.True(t, handlerCalled)\n\tconsumerGroupClaim.AssertExpectations(t)\n\tconsumerGroupSession.AssertExpectations(t)\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar memLog base.InMemLogger\n\tmem := vfs.NewMem()\n\trequire.NoError(t, mem.MkdirAll(\"ext\", 0755))\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(t *testing.T, td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tmemLog.Reset()\n\t\t\tlel := MakeLoggingEventListener(&memLog)\n\t\t\tflushBegin, flushEnd := lel.FlushBegin, lel.FlushEnd\n\t\t\tlel.FlushBegin = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushBegin(info)\n\t\t\t}\n\t\t\tlel.FlushEnd = func(info FlushInfo) {\n\t\t\t\t// Make deterministic.\n\t\t\t\tinfo.InputBytes = 100\n\t\t\t\tflushEnd(info)\n\t\t\t}\n\t\t\topts := &Options{\n\t\t\t\tFS: vfs.WithLogging(mem, memLog.Infof),\n\t\t\t\tFormatMajorVersion: internalFormatNewest,\n\t\t\t\tEventListener: &lel,\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tL0CompactionThreshold: 10,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t}\n\t\t\t// The table stats collector runs asynchronously and its\n\t\t\t// timing is less predictable. It increments nextJobID, which\n\t\t\t// can make these tests flaky. The TableStatsLoaded event is\n\t\t\t// tested separately in TestTableStats.\n\t\t\topts.private.disableTableStats = true\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tt := time.Now()\n\t\t\td.timeNow = func() time.Time {\n\t\t\t\tt = t.Add(time.Second)\n\t\t\t\treturn t\n\t\t\t}\n\t\t\td.testingAlwaysWaitForCleanup = true\n\t\t\treturn memLog.String()\n\n\t\tcase \"close\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"flush\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"compact\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\"), false); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"checkpoint\":\n\t\t\tmemLog.Reset()\n\t\t\tif err := d.Checkpoint(\"checkpoint\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"disable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\td.mu.Lock()\n\t\t\td.disableFileDeletions()\n\t\t\td.mu.Unlock()\n\t\t\treturn memLog.String()\n\n\t\tcase \"enable-file-deletions\":\n\t\t\tmemLog.Reset()\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tmemLog.Infof(\"%v\", r)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\td.mu.Lock()\n\t\t\t\tdefer d.mu.Unlock()\n\t\t\t\td.enableFileDeletions()\n\t\t\t}()\n\t\t\td.TestOnlyWaitForCleaning()\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest\":\n\t\t\tmemLog.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"ingest-flushable\":\n\t\t\tmemLog.Reset()\n\n\t\t\t// Prevent flushes during this test to ensure determinism.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = true\n\t\t\td.mu.Unlock()\n\n\t\t\tb := d.NewBatch()\n\t\t\tif err := b.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Apply(b, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\twriteTable := func(name string, key byte) error {\n\t\t\t\tf, err := mem.Create(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tw := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{\n\t\t\t\t\tTableFormat: d.FormatMajorVersion().MaxTableFormat(),\n\t\t\t\t})\n\t\t\t\tif err := w.Add(base.MakeInternalKey([]byte{key}, 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttableA, tableB := \"ext/a\", \"ext/b\"\n\t\t\tif err := writeTable(tableA, 'a'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := writeTable(tableB, 'b'); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{tableA, tableB}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\n\t\t\t// Re-enable flushes, to allow the subsequent flush to proceed.\n\t\t\td.mu.Lock()\n\t\t\td.mu.compact.flushing = false\n\t\t\td.mu.Unlock()\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn memLog.String()\n\n\t\tcase \"metrics\":\n\t\t\t// The asynchronous loading of table stats can change metrics, so\n\t\t\t// wait for all the tables' stats to be loaded.\n\t\t\td.mu.Lock()\n\t\t\td.waitTableStats()\n\t\t\td.mu.Unlock()\n\n\t\t\treturn d.Metrics().String()\n\n\t\tcase \"sstables\":\n\t\t\tvar buf bytes.Buffer\n\t\t\ttableInfos, _ := d.SSTables()\n\t\t\tfor i, level := range tableInfos {\n\t\t\t\tif len(level) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(&buf, \"%d:\\n\", i)\n\t\t\t\tfor _, m := range level {\n\t\t\t\t\tfmt.Fprintf(&buf, \" %d:[%s-%s]\\n\",\n\t\t\t\t\t\tm.FileNum, m.Smallest.UserKey, m.Largest.UserKey)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func (m *MockIRandomPresenter) OnListReceived(arg0 []aggregates.Topic) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnListReceived\", arg0)\n}", "func (m *MockLoadBalance) DeleteSegmentListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSegmentListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestMultiRoutineAccess_logger(t *testing.T) {\n\t// storing address of *Logger instance for validation.\n\tmsgArray := make([]string, 0)\n\n\t// wait-group to make sure a certain number of go-routine(s)\n\t// has finished its task.\n\tvar wgroup sync.WaitGroup\n\n\tfor i := 0; i < 10; i++ {\n\t\t// updates the wait-group counter.\n\t\twgroup.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\t// decreses the wait-group counter by 1.\n\t\t\t// When the counter returns to 0, the wait-group will end the \"wait\".\n\t\t\tdefer wgroup.Done()\n\n\t\t\tlog := logger.GetLogger()\n\t\t\t// append the address value of instance \"log\"\n\t\t\tlock.Lock()\n\t\t\tmsgArray = append(msgArray, fmt.Sprintf(\"%p\", log))\n\t\t\tlock.Unlock()\n\t\t\tlog.Log(fmt.Sprintf(\"this is a log entry from [%v]\\n\", idx))\n\t\t}(i)\n\t}\n\twgroup.Wait()\n\n\t// verification\n\tif len(msgArray) == 0 {\n\t\tt.Fatalf(\"expect to have a least one message\")\n\t}\n\taddrLine := msgArray[0]\n\tfor i := 1; i < len(msgArray); i++ {\n\t\tline := msgArray[i]\n\t\tif addrLine != line {\n\t\t\tt.Errorf(\"expect both lines (addresses of Logger) should be identical, [%v] vs [%v]\\n\", addrLine, line)\n\t\t}\n\t}\n}", "func TestSubscriberIDs(t *testing.T) {\n\tassert := asserts.NewTesting(t, asserts.FailStop)\n\tmsh := mesh.New()\n\n\terr := msh.SpawnCells(\n\t\tNewTestBehavior(\"foo\"),\n\t\tNewTestBehavior(\"bar\"),\n\t\tNewTestBehavior(\"baz\"),\n\t)\n\tassert.NoError(err)\n\n\terr = msh.Subscribe(\"foo\", \"bar\", \"baz\")\n\tassert.NoError(err)\n\n\tsubscriberIDs, err := msh.Subscribers(\"foo\")\n\tassert.NoError(err)\n\tassert.Length(subscriberIDs, 2)\n\n\tsubscriberIDs, err = msh.Subscribers(\"bar\")\n\tassert.NoError(err)\n\tassert.Length(subscriberIDs, 0)\n\n\terr = msh.Unsubscribe(\"foo\", \"baz\")\n\tassert.NoError(err)\n\n\tsubscriberIDs, err = msh.Subscribers(\"foo\")\n\tassert.NoError(err)\n\tassert.Length(subscriberIDs, 1)\n\n\terr = msh.Stop()\n\tassert.NoError(err)\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar buf syncedBuffer\n\tmem := vfs.NewMem()\n\terr := mem.MkdirAll(\"ext\", 0755)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tbuf.Reset()\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", &Options{\n\t\t\t\tFS: loggingFS{mem, &buf},\n\t\t\t\tEventListener: MakeLoggingEventListener(&buf),\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"flush\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"compact\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\")); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"ingest\":\n\t\t\tbuf.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(f, nil, LevelOptions{})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := mem.Remove(\"ext/0\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"metrics\":\n\t\t\treturn d.Metrics().String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func testMultiSourceEndpoints(t *testing.T) {\n\tfoo := &endpoint.Endpoint{DNSName: \"foo\", Targets: endpoint.Targets{\"8.8.8.8\"}}\n\tbar := &endpoint.Endpoint{DNSName: \"bar\", Targets: endpoint.Targets{\"8.8.4.4\"}}\n\n\tfor _, tc := range []struct {\n\t\ttitle string\n\t\tnestedEndpoints [][]*endpoint.Endpoint\n\t\texpected []*endpoint.Endpoint\n\t}{\n\t\t{\n\t\t\t\"no child sources return no endpoints\",\n\t\t\tnil,\n\t\t\t[]*endpoint.Endpoint{},\n\t\t},\n\t\t{\n\t\t\t\"single empty child source returns no endpoints\",\n\t\t\t[][]*endpoint.Endpoint{{}},\n\t\t\t[]*endpoint.Endpoint{},\n\t\t},\n\t\t{\n\t\t\t\"single non-empty child source returns child's endpoints\",\n\t\t\t[][]*endpoint.Endpoint{{foo}},\n\t\t\t[]*endpoint.Endpoint{foo},\n\t\t},\n\t\t{\n\t\t\t\"multiple non-empty child sources returns merged children's endpoints\",\n\t\t\t[][]*endpoint.Endpoint{{foo}, {bar}},\n\t\t\t[]*endpoint.Endpoint{foo, bar},\n\t\t},\n\t} {\n\t\ttc := tc\n\t\tt.Run(tc.title, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// Prepare the nested mock sources.\n\t\t\tsources := make([]Source, 0, len(tc.nestedEndpoints))\n\n\t\t\t// Populate the nested mock sources.\n\t\t\tfor _, endpoints := range tc.nestedEndpoints {\n\t\t\t\tsrc := new(testutils.MockSource)\n\t\t\t\tsrc.On(\"Endpoints\").Return(endpoints, nil)\n\n\t\t\t\tsources = append(sources, src)\n\t\t\t}\n\n\t\t\t// Create our object under test and get the endpoints.\n\t\t\tsource := NewMultiSource(sources, nil)\n\n\t\t\t// Get endpoints from the source.\n\t\t\tendpoints, err := source.Endpoints(context.Background())\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Validate returned endpoints against desired endpoints.\n\t\t\tvalidateEndpoints(t, endpoints, tc.expected)\n\n\t\t\t// Validate that the nested sources were called.\n\t\t\tfor _, src := range sources {\n\t\t\t\tsrc.(*testutils.MockSource).AssertExpectations(t)\n\t\t\t}\n\t\t})\n\t}\n}", "func Test_Listen_Happy_Path(t *testing.T) {\n\tcalledCounter := 0\n\tconsumeCalled := make(chan interface{})\n\tconsumerGroup := &mocks.ConsumerGroup{}\n\n\t// Mimic the end of a consumerGroup session by just not blocking\n\tconsumerGroup.On(\"Consume\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tcalledCounter++\n\t\t\tconsumeCalled <- true\n\t\t\tif calledCounter >= 2 {\n\t\t\t\ttime.Sleep(1000 * time.Second) // just wait\n\t\t\t}\n\t\t}).\n\t\tReturn(nil).Twice()\n\n\ttested := listener{consumerGroup: consumerGroup}\n\n\t// Listen() is blocking as long as there is no error or context is not canceled\n\tgo func() {\n\t\ttested.Listen(context.Background())\n\t\tassert.Fail(t, `We should have blocked on \"listen\", even if a consumer group session has ended`)\n\t}()\n\n\t// Assert that consume is called twice (2 consumer group sessions are expected)\n\t<-consumeCalled\n\t<-consumeCalled\n\n\tconsumerGroup.AssertExpectations(t)\n}", "func TestControllerHandleEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\taddServices []*corev1.Service\n\t\tupdateServices []string\n\t\tdelServices []string\n\t\texpAddedServices []string\n\t\texpDeletedServices []string\n\t}{\n\t\t{\n\t\t\tname: \"If a controller is watching services it should react to the service change events.\",\n\t\t\taddServices: []*corev1.Service{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc1\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc2\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tupdateServices: []string{\"svc1\"},\n\t\t\tdelServices: []string{\"svc1\", \"svc2\"},\n\t\t\texpAddedServices: []string{\"svc1\", \"svc2\", \"svc1\"},\n\t\t\texpDeletedServices: []string{\"svc1\", \"svc2\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\tassert := assert.New(t)\n\t\t\tresync := 30 * time.Second\n\t\t\tstopC := make(chan struct{})\n\t\t\tvar gotAddedServices []string\n\t\t\tvar gotDeletedServices []string\n\n\t\t\t// Create the kubernetes client.\n\t\t\tk8scli, _, _, err := cli.GetK8sClients(\"\")\n\n\t\t\trequire.NoError(err, \"kubernetes client is required\")\n\n\t\t\t// Prepare the environment on the cluster.\n\t\t\tprep := prepare.New(k8scli, t)\n\t\t\tprep.SetUp()\n\t\t\tdefer prep.TearDown()\n\n\t\t\t// Create the reitrever.\n\t\t\trt := &retrieve.Resource{\n\t\t\t\tListerWatcher: cache.NewListWatchFromClient(k8scli.CoreV1().RESTClient(), \"services\", prep.Namespace().Name, fields.Everything()),\n\t\t\t\tObject: &corev1.Service{},\n\t\t\t}\n\n\t\t\t// Call times are the number of times the handler should be called before sending the termination signal.\n\t\t\tstopCallTimes := len(test.addServices) + len(test.updateServices) + len(test.delServices)\n\t\t\tcalledTimes := 0\n\t\t\tvar mx sync.Mutex\n\n\t\t\t// Create the handler.\n\t\t\thl := &handler.HandlerFunc{\n\t\t\t\tAddFunc: func(_ context.Context, obj runtime.Object) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\tsvc := obj.(*corev1.Service)\n\t\t\t\t\tgotAddedServices = append(gotAddedServices, svc.Name)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(_ context.Context, id string) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\t// Ignore namespace.\n\t\t\t\t\tid = strings.Split(id, \"/\")[1]\n\t\t\t\t\tgotDeletedServices = append(gotDeletedServices, id)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Create a Pod controller.\n\t\t\tctrl := controller.NewSequential(resync, hl, rt, nil, log.Dummy)\n\t\t\trequire.NotNil(ctrl, \"controller is required\")\n\t\t\tgo ctrl.Run(stopC)\n\n\t\t\t// Create the required services.\n\t\t\tfor _, svc := range test.addServices {\n\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Create(svc)\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\tfor _, svc := range test.updateServices {\n\t\t\t\torigSvc, err := k8scli.CoreV1().Services(prep.Namespace().Name).Get(svc, metav1.GetOptions{})\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\t// Change something\n\t\t\t\t\torigSvc.Spec.Ports = append(origSvc.Spec.Ports, corev1.ServicePort{Name: \"updateport\", Port: 9876})\n\t\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Update(origSvc)\n\t\t\t\t\tassert.NoError(err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete the required services.\n\t\t\tfor _, svc := range test.delServices {\n\t\t\t\terr := k8scli.CoreV1().Services(prep.Namespace().Name).Delete(svc, &metav1.DeleteOptions{})\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\t// Wait until we have finished.\n\t\t\tselect {\n\t\t\t// Timeout.\n\t\t\tcase <-time.After(20 * time.Second):\n\t\t\t// Finished.\n\t\t\tcase <-stopC:\n\t\t\t}\n\n\t\t\t// Check.\n\t\t\tassert.Equal(test.expAddedServices, gotAddedServices)\n\t\t\tassert.Equal(test.expDeletedServices, gotDeletedServices)\n\t\t})\n\t}\n}", "func (m *MockCallback) OnRemoveAll() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemoveAll\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockStreamEventListener) OnDestroyStream() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnDestroyStream\")\n}", "func TestEnd2End(t *testing.T) {\n\n\n\t//\tmg := mailgunprovider.New(utilities.GetLogger(\"MG\"),mailgunprovider.BitLabConfig(\"09fe27\"),NewMockFailureStrategy());\n\t//\taz := amazonsesprovider.New(utilities.GetLogger(\"MG\"),amazonsesprovider.BitLabConfig(\"09fe27\"),NewMockFailureStrategy());\n\t// sg := sendgridprovider.New(utilities.GetLogger(\"MG\"),sendgridprovider.BitLabConfig(\"09fe27\"),NewMockFailureStrategy());\n\tvar provider = loopbackprovider.New(utilities.GetLogger(\"loop1\"), mtacontainer.NewThressHoldFailureStrategy(12));\n\n\tscheduler := mtacontainer.NewRoundRobinScheduler([]mtacontainer.MTAProvider{provider});\n\n\tcontainer := mtacontainer.New(scheduler);\n\n\tmail1 := FreshTestMail(provider, \"[email protected]\");\n\tmail2 := FreshTestMail(provider, \"[email protected]\");\n\tmail3 := FreshTestMail(provider, \"[email protected]\");\n\n\tcontainer.GetOutgoing() <- mail1;\n\tcontainer.GetOutgoing() <- mail2;\n\tcontainer.GetOutgoing() <- mail3;\n\n\tgo func() {\n\t\t<-container.GetIncoming()\n\t}();\n\n\ti := 0;\n\tfor {\n\t\tselect {\n\t\tcase e := <-container.GetEvent():\n\t\t\tlog.Println(\"Reading event from container: \" + e.GetError().Error());\n\t\t\tif i == 2 {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\ti = i + 1;\n\t\t}\n\t}\n}", "func NewMockSegmentManager(t mockConstructorTestingTNewMockSegmentManager) *MockSegmentManager {\n\tmock := &MockSegmentManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (m *MockStream) AddEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddEventListener\", streamEventListener)\n}", "func (m *MockMultiClusterRoleEventWatcher) AddEventHandler(ctx context.Context, h controller.MultiClusterRoleEventHandler, predicates ...predicate.Predicate) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, h}\n\tfor _, a := range predicates {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AddEventHandler\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestMultipleHeartbeatTimeout(t *testing.T) {\n\ts := NewSupervisor(nil)\n\traa := NewRecoverableAction(s)\n\trab := NewRecoverableAction(s)\n\trac := NewRecoverableAction(s)\n\n\ts.AddRecoverable(\"A\", raa)\n\ts.AddRecoverable(\"B\", rab)\n\ts.AddRecoverable(\"C\", rac)\n\n\tt.Logf(\"(A) is '%v'.\", raa.Action(TimeConsumingAction))\n\tt.Logf(\"(B) is '%v'.\", rab.Action(PositiveAction))\n\tt.Logf(\"(C) is '%v'.\", rac.Action(PositiveAction))\n}", "func TestSplitListenersToDiffProtocol(t *testing.T) {\n\ttestListeners := []*networkextensionv1.Listener{\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8000,\n\t\t\t\tProtocol: \"HTTP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8001,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8002,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8003,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8004,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t}\n\tliGroup := splitListenersToDiffProtocol(testListeners)\n\tfor _, list := range liGroup {\n\t\tt.Logf(\"%+v\", list)\n\t\ttmpProtocol := make(map[string]struct{})\n\t\tfor _, li := range list {\n\t\t\ttmpProtocol[li.Spec.Protocol] = struct{}{}\n\t\t}\n\t\tif len(tmpProtocol) != 1 {\n\t\t\tt.Errorf(\"list %v contains more than one protocol %v\", list, tmpProtocol)\n\t\t}\n\t}\n}", "func (m *MockMetrics) MultiCreateSuccessResponseCounter() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"MultiCreateSuccessResponseCounter\")\n}", "func (m *MockLoadBalance) EnsureListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestSegmentDummy(t *testing.T) {\n\ttype tcase struct {\n\t\tline geom.Line\n\t}\n\n\tfn := func(t *testing.T, tc tcase) {\n\t\ts := NewSegment(tc.line)\n\t\tif s.GetStart().Equals(tc.line[0]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[0], s.GetStart())\n\t\t}\n\t\tif s.GetEnd().Equals(tc.line[1]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[1], s.GetEnd())\n\t\t}\n\t\tif s.GetLineSegment() != tc.line {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line, s.GetLineSegment())\n\t\t}\n\t}\n\ttestcases := []tcase{\n\t\t{\n\t\t\tline: geom.Line{{1, 2}, {3, 4}},\n\t\t},\n\t}\n\n\tfor i, tc := range testcases {\n\t\ttc := tc\n\t\tt.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) { fn(t, tc) })\n\t}\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func TestActiveMultiEvent_Deactivate(t *testing.T) {\r\n\tnumber := 10\r\n\tvar events []*ActiveEvent\r\n\tvar mock []*mockUnixHelper\r\n\r\n\tfor i := 0; i < number; i++ {\r\n\t\tunixMock := &mockUnixHelper{}\r\n\t\tnewActive := &ActiveEvent{FileDescriptor: i, unix: unixMock}\r\n\t\tunixMock.On(\"close\", i).Return(nil).Once()\r\n\t\tevents = append(events, newActive)\r\n\t\tmock = append(mock, unixMock)\r\n\t}\r\n\r\n\tnewActiveMulti := ActiveMultiEvent{events: events}\r\n\tnewActiveMulti.Deactivate()\r\n\r\n\trequire.Nil(t, newActiveMulti.events)\r\n\tfor _, event := range events {\r\n\t\trequire.Nil(t, event)\r\n\t}\r\n\tfor _, m := range mock {\r\n\t\tm.AssertExpectations(t)\r\n\t}\r\n}", "func TestEventsMgrWithElasticRestart(t *testing.T) {\n\tti := tInfo{dedupInterval: 300 * time.Second, batchInterval: 100 * time.Millisecond}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\tnumRecorders := 3\n\n\tstopEventRecorders := make(chan struct{})\n\twg := new(sync.WaitGroup)\n\twg.Add(numRecorders + 1) // +1 for elastic restart go routine\n\n\t// uuid to make each source unique\n\tcomponentID := uuid.NewV4().String()\n\ttotalEventsSentBySrc := make([]int, numRecorders)\n\n\t// create recorder events directory\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\tfor i := 0; i < numRecorders; i++ {\n\t\tgo func(i int) {\n\t\t\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\t\t\tComponent: fmt.Sprintf(\"%v-%v\", componentID, i),\n\t\t\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\t\t\tBackupDir: recorderEventsDir}, ti.logger)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to create recorder for source %v\", i)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tti.recorders.Lock()\n\t\t\tti.recorders.list = append(ti.recorders.list, evtsRecorder)\n\t\t\tti.recorders.Unlock()\n\n\t\t\tticker := time.NewTicker(100 * time.Millisecond)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stopEventRecorders:\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test event - 1\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_UNRESPONSIVE, \"test event - 2\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_STOPPED, \"test event - 3\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// restart elasticsearch multiple times\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tfor i := 0; i < 3; i++ {\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t\ttestutils.StopElasticsearch(ti.elasticsearchName, ti.elasticsearchDir)\n\t\t\tti.removeResolverEntry(globals.ElasticSearch, ti.elasticsearchAddr)\n\n\t\t\t// let elasticsearch come up on the same port as before.\n\t\t\t// so, wait for the port to become available\n\t\t\tAssertEventually(t,\n\t\t\t\tfunc() (bool, interface{}) {\n\t\t\t\t\ttemp := strings.Split(ti.elasticsearchAddr, \":\")\n\t\t\t\t\tif len(temp) != 2 {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"invalid elastic addr: %v\", ti.elasticsearchAddr)\n\t\t\t\t\t}\n\n\t\t\t\t\tport, err := strconv.Atoi(temp[1])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"invalid elastic port: %v\", temp[1])\n\n\t\t\t\t\t}\n\t\t\t\t\tif getAvailablePort(port, port) == port {\n\t\t\t\t\t\treturn true, nil\n\t\t\t\t\t}\n\n\t\t\t\t\treturn false, fmt.Sprintf(\"elastic port not yet available\")\n\t\t\t\t}, \"port not available to start elasticsearch\", \"50ms\", \"5s\")\n\t\t\tti.elasticsearchAddr, ti.elasticsearchDir, err = testutils.StartElasticsearch(ti.elasticsearchName, ti.elasticsearchDir, ti.signer, ti.trustRoots)\n\t\t\tAssertOk(t, err, \"failed to start elasticsearch, err: %v\", err)\n\t\t\tti.updateResolver(globals.ElasticSearch, ti.elasticsearchAddr)\n\t\t}\n\n\t\ttime.Sleep(5 * time.Second)\n\t\tclose(stopEventRecorders) // stop all the recorders\n\t}()\n\n\twg.Wait()\n\n\t//total events sent by all the recorders\n\ttotalEventsSent := 0\n\tfor _, val := range totalEventsSentBySrc {\n\t\ttotalEventsSent += val\n\t}\n\n\t// total number of events received at elastic should match the total events sent\n\t// query all the events received from this source.component\n\tquery := es.NewRegexpQuery(\"source.component.keyword\", fmt.Sprintf(\"%v-.*\", componentID))\n\tti.assertElasticUniqueEvents(t, query, true, 3*numRecorders, \"120s\")\n\tti.assertElasticTotalEvents(t, query, false, totalEventsSent, \"120s\")\n\tAssert(t, ti.esClient.GetResetCount() > 0, \"client should have restarted\")\n}", "func TestAsyncEvents(t *testing.T) {\n\tif !haveArchive {\n\t\treturn\n\t}\n\n\tif testing.Verbose() && DEBUG {\n\t\tlogging.SetLevel(logging.DEBUG, \"archive\")\n\t}\n\n\tarchive.Listeners.RecordingSignalListener = RecordingSignalListener\n\tarchive.Listeners.RecordingEventStartedListener = RecordingEventStartedListener\n\tarchive.Listeners.RecordingEventProgressListener = RecordingEventProgressListener\n\tarchive.Listeners.RecordingEventStoppedListener = RecordingEventStoppedListener\n\n\ttestCounters = TestCounters{0, 0, 0, 0}\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tarchive.EnableRecordingEvents()\n\tarchive.RecordingEventsPoll()\n\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tpublication, err := archive.AddRecordedPublication(testCases[0].sampleChannel, testCases[0].sampleStream)\n\tif err != nil {\n\t\tt.Log(err)\n\t\tt.FailNow()\n\t}\n\n\t// Delay a little to get the publication is established\n\tidler := idlestrategy.Sleeping{SleepFor: time.Millisecond * 100}\n\tidler.Idle(0)\n\n\tarchive.RecordingEventsPoll()\n\tif !CounterValuesMatch(testCounters, 1, 1, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tif err := archive.StopRecordingByPublication(*publication); err != nil {\n\t\tt.Log(err)\n\t\tt.FailNow()\n\t}\n\n\tif !CounterValuesMatch(testCounters, 2, 1, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tarchive.RecordingEventsPoll()\n\tif !CounterValuesMatch(testCounters, 2, 1, 0, 1, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\t// Cleanup\n\tarchive.DisableRecordingEvents()\n\tarchive.Listeners.RecordingSignalListener = nil\n\tarchive.Listeners.RecordingEventStartedListener = nil\n\tarchive.Listeners.RecordingEventProgressListener = nil\n\tarchive.Listeners.RecordingEventStoppedListener = nil\n\ttestCounters = TestCounters{0, 0, 0, 0}\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tpublication.Close()\n}", "func TestMultiRoutineAccessWithDelay_logger(t *testing.T) {\n\t// storing address of *Logger instance for validation.\n\tmsgArray := make([]string, 0)\n\n\t// wait-group to make sure a certain number of go-routine(s)\n\t// has finished its task.\n\tvar wgroup sync.WaitGroup\n\n\tfor i := 0; i < 10; i++ {\n\t\t// updates the wait-group counter.\n\t\twgroup.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\t// decreses the wait-group counter by 1.\n\t\t\t// When the counter returns to 0, the wait-group will end the \"wait\".\n\t\t\tdefer wgroup.Done()\n\n\t\t\t// add a random delay to simulate multi access.\n\t\t\ttime.Sleep(time.Millisecond * time.Duration(rand.Int63n(1000)))\n\n\t\t\tlog := logger.GetLogger()\n\t\t\t// append the address value of instance \"log\"\n\t\t\tlock.Lock()\n\t\t\tmsgArray = append(msgArray, fmt.Sprintf(\"%p\", log))\n\t\t\tlock.Unlock()\n\t\t\tlog.Log(fmt.Sprintf(\"[with delay] this is a log entry from [%v]\\n\", idx))\n\t\t}(i)\n\t}\n\twgroup.Wait()\n\n\t// verification\n\tif len(msgArray) == 0 {\n\t\tt.Fatalf(\"expect to have a least one message\")\n\t}\n\taddrLine := msgArray[0]\n\tfor i := 1; i < len(msgArray); i++ {\n\t\tline := msgArray[i]\n\t\tif addrLine != line {\n\t\t\tt.Errorf(\"expect both lines (addresses of Logger) should be identical, [%v] vs [%v]\\n\", addrLine, line)\n\t\t}\n\t}\n}", "func (m *MockTaskTable) HasSubtasksInStates(arg0 string, arg1, arg2 int64, arg3 ...interface{}) (bool, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1, arg2}\n\tfor _, a := range arg3 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"HasSubtasksInStates\", varargs...)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestProcessEventHandling(t *testing.T) {\n\tctx := context.Background()\n\n\tclient := mocks.NewEventMonitoringModuleClient(t)\n\tstream := mocks.NewEventMonitoringModule_GetProcessEventsClient(t)\n\tclient.On(\"GetProcessEvents\", ctx, &api.GetProcessEventParams{TimeoutSeconds: 1}).Return(stream, nil)\n\n\tevents := make([]*model.ProcessEvent, 0)\n\tevents = append(events, model.NewMockedExecEvent(time.Now().Add(-10*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}))\n\tevents = append(events, model.NewMockedExitEvent(time.Now().Add(-9*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}, 0))\n\tevents = append(events, model.NewMockedExecEvent(time.Now().Add(-5*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"invalid-path\"}))\n\tevents = append(events, model.NewMockedExitEvent(time.Now().Add(-5*time.Second), 32, \"/usr/bin/ls\", []string{\"ls\", \"invalid-path\"}, 2))\n\n\tfor _, e := range events {\n\t\tdata, err := e.MarshalMsg(nil)\n\t\trequire.NoError(t, err)\n\n\t\tstream.On(\"Recv\").Once().Return(&api.ProcessEventMessage{Data: data}, nil)\n\t}\n\tstream.On(\"Recv\").Return(nil, io.EOF)\n\n\trcvMessage := make(chan bool)\n\ti := 0\n\thandler := func(e *model.ProcessEvent) {\n\t\tif i > len(events)-1 {\n\t\t\tt.Error(\"should not have received more process events\")\n\t\t}\n\n\t\tAssertProcessEvents(t, events[i], e)\n\t\t// all message have been consumed\n\t\tif i == len(events)-1 {\n\t\t\tclose(rcvMessage)\n\t\t}\n\n\t\ti++\n\t}\n\tl, err := NewSysProbeListener(nil, client, handler)\n\trequire.NoError(t, err)\n\tl.Run()\n\n\t<-rcvMessage\n\tl.Stop()\n\tclient.AssertExpectations(t)\n\tstream.AssertExpectations(t)\n}", "func (m *MockProvider) OnServiceSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceSynced\")\n}", "func TestProcessor_StartWithTableWithErrorAfterRebalance(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tvar (\n\t\terr error\n\t\tconsumer = mock.NewMockConsumer(ctrl)\n\t\tproducer = mock.NewMockProducer(ctrl)\n\t\tst = mock.NewMockStorage(ctrl)\n\t\tsb = func(topic string, par int32) (storage.Storage, error) {\n\t\t\treturn st, nil\n\t\t}\n\t\tfinal = make(chan bool)\n\t\tch = make(chan kafka.Event)\n\t\tp = createProcessorWithTable(t, ctrl, consumer, producer, 3, sb)\n\t\tvalue = []byte(\"value\")\n\t\tblockit = make(chan bool)\n\t\tunblocked = make(chan bool)\n\t)\n\tp.graph.callbacks[topic] = func(ctx Context, msg interface{}) {\n\t\tfmt.Println(\"hallodfads\", msg)\n\t\tdefer close(unblocked)\n\t\t<-blockit\n\t\tfmt.Println(\"unblocked\")\n\t}\n\n\t// -- expectations --\n\t// 1. start\n\tconsumer.EXPECT().Subscribe(topOff).Return(nil)\n\tconsumer.EXPECT().Events().Return(ch).AnyTimes()\n\t// 2. rebalance\n\tst.EXPECT().Open().Times(6)\n\tst.EXPECT().GetOffset(int64(-2)).Return(int64(123), nil).Times(6)\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(0), int64(123))\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(1), int64(123))\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(2), int64(123))\n\tconsumer.EXPECT().AddPartition(table, int32(0), int64(123))\n\tconsumer.EXPECT().AddPartition(table, int32(1), int64(123))\n\tconsumer.EXPECT().AddPartition(table, int32(2), int64(123))\n\t// 3. EOF messages\n\tst.EXPECT().MarkRecovered().Times(3)\n\t// 4. messages\n\tconsumer.EXPECT().Commit(topic, int32(1), int64(2))\n\t// 5. error\n\tconsumer.EXPECT().Close().Do(func() { close(ch) })\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(0))\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(1))\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(2))\n\tconsumer.EXPECT().RemovePartition(table, int32(0))\n\tconsumer.EXPECT().RemovePartition(table, int32(1))\n\tconsumer.EXPECT().RemovePartition(table, int32(2))\n\tst.EXPECT().Close().Times(6)\n\tproducer.EXPECT().Close()\n\n\t// -- test --\n\t// 1. start\n\tgo func() {\n\t\terr = p.Run(context.Background())\n\t\tensure.NotNil(t, err)\n\t\tclose(final)\n\t}()\n\n\t// 2. rebalance\n\tensure.True(t, len(p.partitions) == 0)\n\tensure.True(t, len(p.partitionViews) == 0)\n\tch <- (*kafka.Assignment)(&map[int32]int64{0: -1, 1: -1, 2: -1})\n\terr = syncWith(t, ch, -1) // with processor\n\tensure.Nil(t, err)\n\tensure.True(t, len(p.partitions) == 3)\n\tensure.True(t, len(p.partitionViews) == 3)\n\n\t// 3. message\n\tch <- &kafka.EOF{\n\t\tTopic: tableName(group),\n\t\tHwm: 0,\n\t\tPartition: 0,\n\t}\n\terr = syncWith(t, ch, 0) // with partition\n\tensure.Nil(t, err)\n\tch <- &kafka.EOF{\n\t\tTopic: tableName(group),\n\t\tHwm: 0,\n\t\tPartition: 1,\n\t}\n\terr = syncWith(t, ch, 1) // with partition\n\tensure.Nil(t, err)\n\tch <- &kafka.EOF{\n\t\tTopic: tableName(group),\n\t\tHwm: 0,\n\t\tPartition: 2,\n\t}\n\terr = syncWith(t, ch, 2) // with partition\n\tensure.Nil(t, err)\n\n\t// 4. heavy message\n\tch <- &kafka.Message{\n\t\tTopic: topic,\n\t\tPartition: 1,\n\t\tOffset: 2,\n\t\tKey: \"key\",\n\t\tValue: value,\n\t}\n\t// dont wait for that\n\n\t// 4. receive error\n\tch <- new(kafka.Error)\n\n\t// sync with partition (should be unblocked)\n\tclose(blockit)\n\t<-unblocked\n\n\t// 5. stop\n\terr = doTimed(t, func() {\n\t\t<-final\n\t})\n\tensure.Nil(t, err)\n}", "func TestRangesFull(t *testing.T) {\n\tinitDone := make(chan struct{})\n\t// A single /32 can't be used to allocate since we always reserve 2 IPs,\n\t// the network and broadcast address, which in the case of a /32 means it is always full.\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.123/32\", \"FF::123/128\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-a\" {\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t}\n\n\t\tif svc.Name != \"service-b\" {\n\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected two service updates\")\n\t}\n}", "func TestBatchOnUploadEventFailure(t *testing.T) {\n\tbatch := Batch{&mocks.FailingWriter{}}\n\terr := batch.OnUploadEvent(&spec.Measurement{\n\t\tElapsed: 1.0,\n\t})\n\tif err != mocks.ErrMocked {\n\t\tt.Fatal(\"Not the error we expected\")\n\t}\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func TestSubscribeSensors(t *testing.T) {\n\n\t// System contains sensors and zones\n\tsys, _, sensors := makeSystemWithZonesAndSensors(0, 4)\n\tsensor1 := sensors[0]\n\tsensor2 := sensors[1]\n\tsensor3 := sensors[2]\n\tsensor4 := sensors[3]\n\n\tevtBus := evtbus.NewBus(100, 100)\n\tevtConsumer := &EventConsumer{}\n\tevtBus.AddConsumer(evtConsumer)\n\n\t// Create a monitor, we will pass in some initial state to pretend we know\n\t// about the value of sensor2, sensor4, this should cause the monitor to not request\n\t// a value for it and also return the value it knows about to the monitor group\n\tinitialSensorValues := make(map[string]gohome.SensorAttr)\n\tvar attr2 = sensor2.Attr\n\tattr2.Value = \"10\"\n\tinitialSensorValues[sensor2.ID] = attr2\n\tvar attr4 = sensor4.Attr\n\tattr4.Value = \"20\"\n\tinitialSensorValues[sensor4.ID] = attr4\n\n\tm := gohome.NewMonitor(sys, evtBus, initialSensorValues, nil)\n\n\tmockHandler := &MockChangeHandler{}\n\n\t// Request to monitor certain items\n\tgroup := &gohome.MonitorGroup{\n\t\tSensors: make(map[string]bool),\n\t\tHandler: mockHandler,\n\t\tTimeout: time.Duration(5) * time.Second,\n\t}\n\n\t// Add a sensor to the group, so we monitor it\n\tgroup.Sensors[sensor1.ID] = true\n\tgroup.Sensors[sensor2.ID] = true\n\tgroup.Sensors[sensor3.ID] = true\n\tgroup.Sensors[sensor4.ID] = true\n\n\t// Begin the subscription, should get back a monitor ID\n\tmID, _ := m.Subscribe(group, true)\n\trequire.NotEqual(t, \"\", mID)\n\n\t// Processing is async, small delay to let event bus process\n\ttime.Sleep(time.Millisecond * 1000)\n\n\t// Should have got an event asking for certain sensors to report their status\n\t// our sensor should be included in that\n\trequire.NotNil(t, evtConsumer.SensorsReport)\n\trequire.True(t, evtConsumer.SensorsReport.SensorIDs[sensor1.ID])\n\trequire.True(t, evtConsumer.SensorsReport.SensorIDs[sensor3.ID])\n\trequire.False(t, evtConsumer.SensorsReport.SensorIDs[sensor2.ID])\n\trequire.False(t, evtConsumer.SensorsReport.SensorIDs[sensor4.ID])\n\n\t// For sensors 2 and 4 we should have got an update callback since we passed in their\n\t// values to the monitor when we inited it\n\trequire.Equal(t, attr2, mockHandler.ChangeBatches[0].Sensors[sensor2.ID])\n\trequire.Equal(t, attr4, mockHandler.ChangeBatches[0].Sensors[sensor4.ID])\n\n\t// Now respond to the request for sensors 1 and 3 to report their values\n\treporting := &gohome.SensorsReportingEvt{}\n\tsensor1Attr := gohome.SensorAttr{\n\t\tName: \"sensor1\",\n\t\tValue: \"111\",\n\t}\n\treporting.Add(sensor1.ID, sensor1Attr)\n\tsensor3Attr := gohome.SensorAttr{\n\t\tName: \"sensor3\",\n\t\tValue: \"333\",\n\t}\n\treporting.Add(sensor3.ID, sensor3Attr)\n\n\t// Processing is async, small delay to let event bus process\n\tmockHandler.ChangeBatches = nil\n\tevtBus.Enqueue(reporting)\n\n\ttime.Sleep(time.Millisecond * 1000)\n\n\t// We should have got updates with the attribute values we are expecting\n\trequire.Equal(t, 2, len(mockHandler.ChangeBatches))\n\trequire.Equal(t, sensor1Attr, mockHandler.ChangeBatches[0].Sensors[sensor1.ID])\n\trequire.Equal(t, sensor3Attr, mockHandler.ChangeBatches[1].Sensors[sensor3.ID])\n}", "func TestEvents(t *testing.T) {\n\tti := tInfo{}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\t// uuid to make each source unique\n\tcomponentID := uuid.NewV4().String()\n\n\t// create recorder events directory\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\n\t// create recorder\n\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\tComponent: componentID,\n\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\tBackupDir: recorderEventsDir}, ti.logger)\n\tAssertOk(t, err, \"failed to create events recorder\")\n\tdefer evtsRecorder.Close()\n\n\t// send events (recorder -> proxy -> dispatcher -> writer -> evtsmgr -> elastic)\n\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test event - 1\", nil)\n\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test event - 2\", nil)\n\n\ttime.Sleep(1 * time.Second)\n\n\t// verify that it has reached elasticsearch; these are the first occurrences of an event\n\t// so it should have reached elasticsearch without being de-duped.\n\tquery := es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewTermQuery(\"type.keyword\", eventtypes.SERVICE_STARTED.String()))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, 1, \"4s\") // total == 1\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test event -2\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, 1, \"4s\") // total == 1\n\n\t// send duplicates and check whether they're compressed\n\tnumDuplicates := 25\n\tfor i := 0; i < numDuplicates; i++ {\n\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test dup event - 1\", nil)\n\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test dup event - 2\", nil)\n\t}\n\n\t// ensure the de-duped events reached elasticsearch\n\t// test duplicate event - 1\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test dup event - 1\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, numDuplicates, \"2s\") // total == numDuplicates\n\n\t// test duplicate event - 2\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test dup event - 2\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, numDuplicates, \"2s\") // total == numDuplicates\n\n\t// create test NIC object\n\ttestNIC := policygen.CreateSmartNIC(\"00-14-22-01-23-45\",\n\t\tcluster.DistributedServiceCardStatus_ADMITTED.String(),\n\t\t\"esx-1\",\n\t\t&cluster.DSCCondition{\n\t\t\tType: cluster.DSCCondition_HEALTHY.String(),\n\t\t\tStatus: cluster.ConditionStatus_FALSE.String(),\n\t\t})\n\n\t// record events with reference object\n\tfor i := 0; i < numDuplicates; i++ {\n\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test dup event - 1\", testNIC)\n\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test dup event - 2\", testNIC)\n\t}\n\n\t// query by kind\n\tqueryByKind := es.NewTermQuery(\"object-ref.kind.keyword\", testNIC.GetKind())\n\tti.assertElasticUniqueEvents(t, queryByKind, true, 2, \"4s\") // unique == 2 (eventType1 and eventType2)\n\tti.assertElasticTotalEvents(t, queryByKind, true, numDuplicates*2, \"4s\") // total == numDuplicates\n}", "func (m *HeavySyncMock) ValidateCallCounters() {\n\n\tif !m.ResetFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Reset\")\n\t}\n\n\tif !m.StartFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Start\")\n\t}\n\n\tif !m.StopFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Stop\")\n\t}\n\n\tif !m.StoreBlobsFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreBlobs\")\n\t}\n\n\tif !m.StoreDropFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreDrop\")\n\t}\n\n\tif !m.StoreIndicesFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreIndices\")\n\t}\n\n\tif !m.StoreRecordsFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreRecords\")\n\t}\n\n}", "func TestResilientMultiWriter(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\twriters []io.Writer\n\t}{\n\t\t{\n\t\t\tname: \"All valid writers\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"All invalid writers\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"First invalid writer\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"First valid writer\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\twriters := tt.writers\n\t\tmultiWriter := MultiLevelWriter(writers...)\n\n\t\tlogger := New(multiWriter).With().Timestamp().Logger().Level(InfoLevel)\n\t\tlogger.Info().Msg(\"Test msg\")\n\n\t\tif len(writers) != writeCalls {\n\t\t\tt.Errorf(\"Expected %d writers to have been called but only %d were.\", len(writers), writeCalls)\n\t\t}\n\t\twriteCalls = 0\n\t}\n}", "func (m *MockProvider) OnEndpointsUpdate(arg0, arg1 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsUpdate\", arg0, arg1)\n}", "func TestDefaultBrokerWithManyTriggers(t *testing.T) {\n\tclient := setup(t, true)\n\tdefer tearDown(client)\n\n\t// Label namespace so that it creates the default broker.\n\tif err := client.LabelNamespace(map[string]string{\"knative-eventing-injection\": \"enabled\"}); err != nil {\n\t\tt.Fatalf(\"Error annotating namespace: %v\", err)\n\t}\n\n\t// Wait for default broker ready.\n\tif err := client.WaitForResourceReady(defaultBrokerName, common.BrokerTypeMeta); err != nil {\n\t\tt.Fatalf(\"Error waiting for default broker to become ready: %v\", err)\n\t}\n\n\t// These are the event types and sources that triggers will listen to, as well as the selectors\n\t// to set in the subscriber and services pods.\n\teventsToReceive := []eventReceiver{\n\t\t{eventTypeAndSource{Type: any, Source: any}, newSelector()},\n\t\t{eventTypeAndSource{Type: eventType1, Source: any}, newSelector()},\n\t\t{eventTypeAndSource{Type: any, Source: eventSource1}, newSelector()},\n\t\t{eventTypeAndSource{Type: eventType1, Source: eventSource1}, newSelector()},\n\t}\n\n\t// Create subscribers.\n\tfor _, event := range eventsToReceive {\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tpod := resources.EventLoggerPod(subscriberName)\n\t\tclient.CreatePodOrFail(pod, common.WithService(subscriberName))\n\t}\n\n\t// Create triggers.\n\tfor _, event := range eventsToReceive {\n\t\ttriggerName := name(\"trigger\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tclient.CreateTriggerOrFail(triggerName,\n\t\t\tresources.WithSubscriberRefForTrigger(subscriberName),\n\t\t\tresources.WithTriggerFilter(event.typeAndSource.Source, event.typeAndSource.Type),\n\t\t)\n\t}\n\n\t// Wait for all test resources to become ready before sending the events.\n\tif err := client.WaitForAllTestResourcesReady(); err != nil {\n\t\tt.Fatalf(\"Failed to get all test resources ready: %v\", err)\n\t}\n\n\t// These are the event types and sources that will be send.\n\teventsToSend := []eventTypeAndSource{\n\t\t{eventType1, eventSource1},\n\t\t{eventType1, eventSource2},\n\t\t{eventType2, eventSource1},\n\t\t{eventType2, eventSource2},\n\t}\n\t// Map to save the expected events per dumper so that we can verify the delivery.\n\texpectedEvents := make(map[string][]string)\n\t// Map to save the unexpected events per dumper so that we can verify that they weren't delivered.\n\tunexpectedEvents := make(map[string][]string)\n\tfor _, eventToSend := range eventsToSend {\n\t\t// Create cloud event.\n\t\t// Using event type and source as part of the body for easier debugging.\n\t\tbody := fmt.Sprintf(\"Body-%s-%s\", eventToSend.Type, eventToSend.Source)\n\t\tcloudEvent := &resources.CloudEvent{\n\t\t\tSource: eventToSend.Source,\n\t\t\tType: eventToSend.Type,\n\t\t\tData: fmt.Sprintf(`{\"msg\":%q}`, body),\n\t\t}\n\t\t// Create sender pod.\n\t\tsenderPodName := name(\"sender\", eventToSend.Type, eventToSend.Source)\n\t\tif err := client.SendFakeEventToAddressable(senderPodName, defaultBrokerName, common.BrokerTypeMeta, cloudEvent); err != nil {\n\t\t\tt.Fatalf(\"Error send cloud event to broker: %v\", err)\n\t\t}\n\n\t\t// Check on every dumper whether we should expect this event or not, and add its body\n\t\t// to the expectedEvents/unexpectedEvents maps.\n\t\tfor _, eventToReceive := range eventsToReceive {\n\t\t\tsubscriberName := name(\"dumper\", eventToReceive.typeAndSource.Type, eventToReceive.typeAndSource.Source)\n\t\t\tif shouldExpectEvent(&eventToSend, &eventToReceive, t.Logf) {\n\t\t\t\texpectedEvents[subscriberName] = append(expectedEvents[subscriberName], body)\n\t\t\t} else {\n\t\t\t\tunexpectedEvents[subscriberName] = append(unexpectedEvents[subscriberName], body)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, event := range eventsToReceive {\n\t\tsubscriberName := name(\"dumper\", event.typeAndSource.Type, event.typeAndSource.Source)\n\t\tif err := client.CheckLog(subscriberName, common.CheckerContainsAll(expectedEvents[subscriberName])); err != nil {\n\t\t\tt.Fatalf(\"Event(s) not found in logs of subscriber pod %q: %v\", subscriberName, err)\n\t\t}\n\t\t// At this point all the events should have been received in the pod.\n\t\t// We check whether we find unexpected events. If so, then we fail.\n\t\tfound, err := client.FindAnyLogContents(subscriberName, unexpectedEvents[subscriberName])\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed querying to find log contents in pod %q: %v\", subscriberName, err)\n\t\t}\n\t\tif found {\n\t\t\tt.Fatalf(\"Unexpected event(s) found in logs of subscriber pod %q\", subscriberName)\n\t\t}\n\t}\n}", "func TestRegisteringMultipleAccessHandlersPanics(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\tdefer func() {\n\t\t\tv := recover()\n\t\t\tif v == nil {\n\t\t\t\tt.Errorf(`expected test to panic, but nothing happened`)\n\t\t\t}\n\t\t}()\n\t\ts.Handle(\"model\",\n\t\t\tres.Access(func(r res.AccessRequest) {\n\t\t\t\tr.NotFound()\n\t\t\t}),\n\t\t\tres.Access(func(r res.AccessRequest) {\n\t\t\t\tr.NotFound()\n\t\t\t}),\n\t\t)\n\t}, nil)\n}", "func TestMultipleRegisterCalls(t *testing.T) {\n\tRegister(\"multiple-register-driver-1\")\n\trequire.PanicsWithError(t, \"Register called twice for driver multiple-register-driver-1\", func() {\n\t\tRegister(\"multiple-register-driver-1\")\n\t})\n\n\t// Should be no error.\n\tRegister(\"multiple-register-driver-2\")\n}", "func (_m *azureBlobClient) ListBlobsFlatSegment(_a0 context.Context, _a1 azblob.Marker, _a2 azblob.ListBlobsSegmentOptions) (*azblob.ListBlobsFlatSegmentResponse, error) {\n\tret := _m.Called(_a0, _a1, _a2)\n\n\tvar r0 *azblob.ListBlobsFlatSegmentResponse\n\tif rf, ok := ret.Get(0).(func(context.Context, azblob.Marker, azblob.ListBlobsSegmentOptions) *azblob.ListBlobsFlatSegmentResponse); ok {\n\t\tr0 = rf(_a0, _a1, _a2)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*azblob.ListBlobsFlatSegmentResponse)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, azblob.Marker, azblob.ListBlobsSegmentOptions) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockProvider) OnEndpointsDelete(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsDelete\", arg0)\n}", "func Test_subSetLoadBalancer_ProcessSubsets(t *testing.T) {\n\n\thostSet := InitExampleHosts()\n\tupdateCb := func(entry types.LBSubsetEntry) {\n\t\tentry.PrioritySubset().Update(0, SubsetLbExample.originalPrioritySet.HostSetsByPriority()[0].Hosts(), nil)\n\t}\n\n\tnewCb := func(entry types.LBSubsetEntry, predicate types.HostPredicate, kvs types.SubsetMetadata, addinghost bool) {\n\t\tif addinghost {\n\t\t\tprioritySubset := NewPrioritySubsetImpl(&SubsetLbExample, predicate)\n\t\t\tentry.SetPrioritySubset(prioritySubset)\n\t\t}\n\t}\n\n\thostadded := SubsetLbExample.originalPrioritySet.HostSetsByPriority()[0].Hosts()\n\n\ttype args struct {\n\t\thostAdded []types.Host\n\t\thostsRemoved []types.Host\n\t\tupdateCB func(types.LBSubsetEntry)\n\t\tnewCB func(types.LBSubsetEntry, types.HostPredicate, types.SubsetMetadata, bool)\n\t\tmatchCriteria []types.MetadataMatchCriterion\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant []types.Host\n\t}{\n\t\t{\n\t\t\tname: \"case1\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"stage\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"prod\"),\n\t\t\t\t\t},\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"type\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"std\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[0], //e1,e2,e3,e4\n\t\t\t\thostSet[1], hostSet[2], hostSet[3],\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"case2\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"stage\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"prod\"),\n\t\t\t\t\t},\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"type\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"bigmem\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[4], //e5,e6\n\t\t\t\thostSet[5],\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"case3\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"stage\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"dev\"),\n\t\t\t\t\t},\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"type\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"std\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[6], //e5,e6\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"case4\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"stage\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"prod\"),\n\t\t\t\t\t},\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"version\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"1.0\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[0], //e1 e2 e5\n\t\t\t\thostSet[1],\n\t\t\t\thostSet[4],\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"case5\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"stage\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"prod\"),\n\t\t\t\t\t},\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"version\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"1.1\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[2], //e3 e4 e6\n\t\t\t\thostSet[3],\n\t\t\t\thostSet[5],\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"case6\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"stage\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"dev\"),\n\t\t\t\t\t},\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"version\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"1.2-pre\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[6], //e7\n\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"case7\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"version\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"1.0\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[0], //e1,e2,e5\n\t\t\t\thostSet[1],\n\t\t\t\thostSet[4],\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"case8\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"version\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"1.1\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[2], //e3 e4 e6\n\t\t\t\thostSet[3],\n\t\t\t\thostSet[5],\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"case9\",\n\t\t\targs: args{\n\t\t\t\thostAdded: hostadded,\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"version\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"1.2-pre\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[6], //e7\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"testcase10\",\n\t\t\targs: args{\n\t\t\t\thostAdded: SubsetLbExample.originalPrioritySet.HostSetsByPriority()[0].Hosts(),\n\t\t\t\thostsRemoved: nil,\n\t\t\t\tupdateCB: updateCb,\n\t\t\t\tnewCB: newCb,\n\t\t\t\tmatchCriteria: []types.MetadataMatchCriterion{\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"version\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"1.0\"),\n\t\t\t\t\t},\n\t\t\t\t\t&router.MetadataMatchCriterionImpl{\n\t\t\t\t\t\t\"xlarge\",\n\t\t\t\t\t\ttypes.GenerateHashedValue(\"true\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.Host{\n\t\t\t\thostSet[0], // e1\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tsslb := SubsetLbExample\n\n\t\t\tsslb.subSets = make(map[string]types.ValueSubsetMap)\n\n\t\t\tsslb.ProcessSubsets(tt.args.hostAdded, tt.args.hostsRemoved, tt.args.updateCB, tt.args.newCB)\n\n\t\t\tfor idx, host := range sslb.FindSubset(tt.args.matchCriteria).PrioritySubset().GetOrCreateHostSubset(0).Hosts() {\n\t\t\t\tif host.Hostname() != tt.want[idx].Hostname() {\n\t\t\t\t\tt.Errorf(\"subSetLoadBalancer.ChooseHost() = %v, want %v\", host.Hostname(), tt.want[idx].Hostname())\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func (s) TestBalancer_TwoAddresses_ReportingEnabledOOB(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tutilSetter func(orca.ServerMetricsRecorder, float64)\n\t}{{\n\t\tname: \"application_utilization\",\n\t\tutilSetter: func(smr orca.ServerMetricsRecorder, val float64) {\n\t\t\tsmr.SetApplicationUtilization(val)\n\t\t},\n\t}, {\n\t\tname: \"cpu_utilization\",\n\t\tutilSetter: func(smr orca.ServerMetricsRecorder, val float64) {\n\t\t\tsmr.SetCPUUtilization(val)\n\t\t},\n\t}, {\n\t\tname: \"application over cpu\",\n\t\tutilSetter: func(smr orca.ServerMetricsRecorder, val float64) {\n\t\t\tsmr.SetApplicationUtilization(val)\n\t\t\tsmr.SetCPUUtilization(2.0) // ignored because ApplicationUtilization is set\n\t\t},\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\t\t\tdefer cancel()\n\n\t\t\tsrv1 := startServer(t, reportOOB)\n\t\t\tsrv2 := startServer(t, reportOOB)\n\n\t\t\t// srv1 starts loaded and srv2 starts without load; ensure RPCs are routed\n\t\t\t// disproportionately to srv2 (10:1).\n\t\t\tsrv1.oobMetrics.SetQPS(10.0)\n\t\t\ttc.utilSetter(srv1.oobMetrics, 1.0)\n\n\t\t\tsrv2.oobMetrics.SetQPS(10.0)\n\t\t\ttc.utilSetter(srv2.oobMetrics, 0.1)\n\n\t\t\tsc := svcConfig(t, oobConfig)\n\t\t\tif err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil {\n\t\t\t\tt.Fatalf(\"Error starting client: %v\", err)\n\t\t\t}\n\t\t\taddrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}}\n\t\t\tsrv1.R.UpdateState(resolver.State{Addresses: addrs})\n\n\t\t\t// Call each backend once to ensure the weights have been received.\n\t\t\tensureReached(ctx, t, srv1.Client, 2)\n\n\t\t\t// Wait for the weight update period to allow the new weights to be processed.\n\t\t\ttime.Sleep(weightUpdatePeriod)\n\t\t\tcheckWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10})\n\t\t})\n\t}\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func TestMultiHandlers(t *testing.T) {\n\t// New logger for output stderr\n\tlogStd := New(\"\", LevelDebug, os.Stderr)\n\tAddHandler(logStd)\n\n\t// New logger for output file\n\tfw, err := FileWriter(\"logs/test.log\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlogFile := New(\"\", LevelDebug, fw)\n\tAddHandler(logFile)\n\n\t// Test\n\tDebug(\"MultiHandlers: Debug\")\n\tInfo(\"MultiHandlers: Info\")\n\tWarn(\"MultiHandlers: Warn\")\n\tError(\"MultiHandlers: Error\")\n\t//Panic(\"MultiHandlers: Panic\")\n\t//Fatal(\"MultiHandlers: Fatal\")\n}", "func TestMainImplForSyncMultipleMounts(t *testing.T) {\n\t// ---- Data setup ----\n\tnoOfMounts := 3\n\ttestMounts := make([]s3Mount, noOfMounts)\n\ttestBucketName := \"test-bucket\"\n\n\ttestMountId1 := \"TestMainImplForSyncMultipleMounts1\"\n\tnoOfFilesInMount1 := 5\n\ttestMounts[0] = *putReadOnlyTestMountFiles(t, testBucketName, testMountId1, noOfFilesInMount1)\n\n\ttestMountId2 := \"TestMainImplForSyncMultipleMounts2\"\n\tnoOfFilesInMount2 := 1\n\ttestMounts[1] = *putReadOnlyTestMountFiles(t, testBucketName, testMountId2, noOfFilesInMount2)\n\n\ttestMountId3 := \"TestMainImplForSyncMultipleMounts3\"\n\tnoOfFilesInMount3 := 0 // Test mount containing no files (simulating empty folder in S3)\n\ttestMounts[2] = *putReadOnlyTestMountFiles(t, testBucketName, testMountId3, noOfFilesInMount3)\n\n\ttestMountsJsonBytes, err := json.Marshal(testMounts)\n\ttestMountsJson := string(testMountsJsonBytes)\n\n\tif err != nil {\n\t\t// Fail test in case of any errors\n\t\tt.Logf(\"Error creating test mount setup data %s\", err)\n\t}\n\n\t// ---- Inputs ----\n\tconcurrency := 5\n\trecurringDownloads := true\n\tstopRecurringDownloadsAfter := 5\n\tdownloadInterval := 1\n\n\tfmt.Printf(\"Input: \\n\\n%s\\n\\n\", testMountsJson)\n\n\tvar wg sync.WaitGroup\n\t// Trigger recurring download in a separate thread and increment the wait group counter\n\twg.Add(1)\n\tgo func() {\n\t\t// ---- Run code under test ----\n\t\terr = mainImpl(testAwsSession, debug, recurringDownloads, stopRecurringDownloadsAfter, downloadInterval, -1, concurrency, testMountsJson, destinationBase, testRegion)\n\t\tif err != nil {\n\t\t\t// Fail test in case of any errors\n\t\t\tt.Logf(\"Error running the main s3-synchronizer with testMountsJson %s\", testMountsJson)\n\t\t\tt.Errorf(\"Error: %v\", err)\n\t\t}\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\t// In a separate thread add few more files to the mount point and verify that they get downloaded\n\t// by the recurring downloader thread after the dow\n\twg.Add(1)\n\tgo func() {\n\t\t// TEST FOR ADD -- NEW UPLOAD TO S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// ------------------------------------------------------------\n\n\t\t// Upload same number of files in the mount again (i.e., double the noOfFilesInMount)\n\t\ttestMounts[0] = *putReadOnlyTestMountFiles(t, testBucketName, testMountId1, 2*noOfFilesInMount1)\n\t\ttestMounts[1] = *putReadOnlyTestMountFiles(t, testBucketName, testMountId2, 2*noOfFilesInMount2)\n\t\ttestMounts[2] = *putReadOnlyTestMountFiles(t, testBucketName, testMountId3, 2*noOfFilesInMount3)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow for\n\t\t// uploaded files to get downloaded\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the newly uploaded files are automatically downloaded after the download interval\n\t\tassertFilesDownloaded(t, testMountId1, 2*noOfFilesInMount1)\n\t\tassertFilesDownloaded(t, testMountId2, 2*noOfFilesInMount2)\n\t\tassertFilesDownloaded(t, testMountId3, 2*noOfFilesInMount3)\n\n\t\t// TEST FOR UPDATE -- UPLOAD TO EXISTING FILES IN S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// -----------------------------------------------------------------------------\n\n\t\t// Update the files in S3\n\t\tupdateTestMountFiles(t, testFakeBucketName, testMountId1, noOfFilesInMount1)\n\t\tupdateTestMountFiles(t, testFakeBucketName, testMountId2, noOfFilesInMount2)\n\t\tupdateTestMountFiles(t, testFakeBucketName, testMountId3, noOfFilesInMount3)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow for\n\t\t// uploaded files to get downloaded\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the updated files are automatically downloaded after the download interval\n\t\tassertUpdatedFilesDownloaded(t, testMountId1, noOfFilesInMount1)\n\t\tassertUpdatedFilesDownloaded(t, testMountId2, noOfFilesInMount2)\n\t\tassertUpdatedFilesDownloaded(t, testMountId3, noOfFilesInMount3)\n\n\t\t// TEST FOR DELETE -- DELETE FROM S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// ------------------------------------------------------------\n\n\t\tfileIdxToDelete1 := noOfFilesInMount1 + 1\n\t\tfileIdxToDelete2 := noOfFilesInMount2 + 1\n\t\t// Delete some files from S3 and make sure they automatically get deleted from local file system\n\t\tdeleteTestMountFile(t, testFakeBucketName, testMountId1, fileIdxToDelete1)\n\t\tdeleteTestMountFile(t, testFakeBucketName, testMountId2, fileIdxToDelete2)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow sync to happen\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the file deleted from S3 are automatically deleted after the download interval\n\t\tassertFileDeleted(t, testMountId1, fileIdxToDelete1)\n\t\tassertFileDeleted(t, testMountId2, fileIdxToDelete2)\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\twg.Wait() // Wait until all spawned go routines complete before existing the test case\n}", "func TestProcessor_StartWithErrorAfterRebalance(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tvar (\n\t\terr error\n\t\tconsumer = mock.NewMockConsumer(ctrl)\n\t\tst = mock.NewMockStorage(ctrl)\n\t\tsb = func(topic string, par int32) (storage.Storage, error) {\n\t\t\treturn st, nil\n\t\t}\n\t\tfinal = make(chan bool)\n\t\tch = make(chan kafka.Event)\n\t\tp = createProcessor(t, ctrl, consumer, 3, sb)\n\t\tvalue = []byte(\"value\")\n\t)\n\t// -- expectations --\n\t// 1. start\n\tconsumer.EXPECT().Subscribe(topOff).Return(nil)\n\tconsumer.EXPECT().Events().Return(ch).AnyTimes()\n\t// 2. rebalance\n\tst.EXPECT().Open().Times(3)\n\tst.EXPECT().GetOffset(int64(-2)).Return(int64(123), nil).Times(3)\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(0), int64(123))\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(1), int64(123))\n\tconsumer.EXPECT().AddPartition(tableName(group), int32(2), int64(123))\n\t// 3. message\n\tgomock.InOrder(\n\t\tst.EXPECT().Set(\"key\", value).Return(nil),\n\t\tst.EXPECT().SetOffset(int64(1)),\n\t\tst.EXPECT().MarkRecovered(),\n\t)\n\t// 4. error\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(0))\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(1))\n\tconsumer.EXPECT().RemovePartition(tableName(group), int32(2))\n\tst.EXPECT().Close().Times(3)\n\tconsumer.EXPECT().Close().Do(func() { close(ch) })\n\n\t// -- test --\n\t// 1. start\n\tgo func() {\n\t\terr = p.Run(context.Background())\n\t\tensure.NotNil(t, err)\n\t\tclose(final)\n\t}()\n\n\t// 2. rebalance\n\tensure.True(t, len(p.partitions) == 0)\n\tch <- (*kafka.Assignment)(&map[int32]int64{0: -1, 1: -1, 2: -1})\n\terr = syncWith(t, ch, -1) // with processor\n\tensure.Nil(t, err)\n\tensure.True(t, len(p.partitions) == 3)\n\n\t// 3. message\n\tch <- &kafka.Message{\n\t\tTopic: tableName(group),\n\t\tPartition: 1,\n\t\tOffset: 1,\n\t\tKey: \"key\",\n\t\tValue: value,\n\t}\n\terr = syncWith(t, ch, 1) // with partition\n\tensure.Nil(t, err)\n\n\t// 4. receive error\n\tch <- new(kafka.Error)\n\n\t// 5. stop\n\terr = doTimed(t, func() { <-final })\n\tensure.Nil(t, err)\n}", "func TestAddAllEventHandlers(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tgvkMap map[framework.GVK]framework.ActionType\n\t\texpectStaticInformers map[reflect.Type]bool\n\t\texpectDynamicInformers map[schema.GroupVersionResource]bool\n\t}{\n\t\t{\n\t\t\tname: \"default handlers in framework\",\n\t\t\tgvkMap: map[framework.GVK]framework.ActionType{},\n\t\t\texpectStaticInformers: map[reflect.Type]bool{\n\t\t\t\treflect.TypeOf(&v1.Pod{}): true,\n\t\t\t\treflect.TypeOf(&v1.Node{}): true,\n\t\t\t\treflect.TypeOf(&v1.Namespace{}): true,\n\t\t\t},\n\t\t\texpectDynamicInformers: map[schema.GroupVersionResource]bool{},\n\t\t},\n\t\t{\n\t\t\tname: \"add GVKs handlers defined in framework dynamically\",\n\t\t\tgvkMap: map[framework.GVK]framework.ActionType{\n\t\t\t\t\"Pod\": framework.Add | framework.Delete,\n\t\t\t\t\"PersistentVolume\": framework.Delete,\n\t\t\t\t\"storage.k8s.io/CSIStorageCapacity\": framework.Update,\n\t\t\t},\n\t\t\texpectStaticInformers: map[reflect.Type]bool{\n\t\t\t\treflect.TypeOf(&v1.Pod{}): true,\n\t\t\t\treflect.TypeOf(&v1.Node{}): true,\n\t\t\t\treflect.TypeOf(&v1.Namespace{}): true,\n\t\t\t\treflect.TypeOf(&v1.PersistentVolume{}): true,\n\t\t\t\treflect.TypeOf(&storagev1.CSIStorageCapacity{}): true,\n\t\t\t},\n\t\t\texpectDynamicInformers: map[schema.GroupVersionResource]bool{},\n\t\t},\n\t\t{\n\t\t\tname: \"add GVKs handlers defined in plugins dynamically\",\n\t\t\tgvkMap: map[framework.GVK]framework.ActionType{\n\t\t\t\t\"daemonsets.v1.apps\": framework.Add | framework.Delete,\n\t\t\t\t\"cronjobs.v1.batch\": framework.Delete,\n\t\t\t},\n\t\t\texpectStaticInformers: map[reflect.Type]bool{\n\t\t\t\treflect.TypeOf(&v1.Pod{}): true,\n\t\t\t\treflect.TypeOf(&v1.Node{}): true,\n\t\t\t\treflect.TypeOf(&v1.Namespace{}): true,\n\t\t\t},\n\t\t\texpectDynamicInformers: map[schema.GroupVersionResource]bool{\n\t\t\t\t{Group: \"apps\", Version: \"v1\", Resource: \"daemonsets\"}: true,\n\t\t\t\t{Group: \"batch\", Version: \"v1\", Resource: \"cronjobs\"}: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"add GVKs handlers defined in plugins dynamically, with one illegal GVK form\",\n\t\t\tgvkMap: map[framework.GVK]framework.ActionType{\n\t\t\t\t\"daemonsets.v1.apps\": framework.Add | framework.Delete,\n\t\t\t\t\"custommetrics.v1beta1\": framework.Update,\n\t\t\t},\n\t\t\texpectStaticInformers: map[reflect.Type]bool{\n\t\t\t\treflect.TypeOf(&v1.Pod{}): true,\n\t\t\t\treflect.TypeOf(&v1.Node{}): true,\n\t\t\t\treflect.TypeOf(&v1.Namespace{}): true,\n\t\t\t},\n\t\t\texpectDynamicInformers: map[schema.GroupVersionResource]bool{\n\t\t\t\t{Group: \"apps\", Version: \"v1\", Resource: \"daemonsets\"}: true,\n\t\t\t},\n\t\t},\n\t}\n\n\tscheme := runtime.NewScheme()\n\tvar localSchemeBuilder = runtime.SchemeBuilder{\n\t\tappsv1.AddToScheme,\n\t\tbatchv1.AddToScheme,\n\t}\n\tlocalSchemeBuilder.AddToScheme(scheme)\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tlogger, ctx := ktesting.NewTestContext(t)\n\t\t\tctx, cancel := context.WithCancel(ctx)\n\t\t\tdefer cancel()\n\n\t\t\tinformerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0)\n\t\t\tschedulingQueue := queue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory)\n\t\t\ttestSched := Scheduler{\n\t\t\t\tStopEverything: ctx.Done(),\n\t\t\t\tSchedulingQueue: schedulingQueue,\n\t\t\t\tlogger: logger,\n\t\t\t}\n\n\t\t\tdynclient := dyfake.NewSimpleDynamicClient(scheme)\n\t\t\tdynInformerFactory := dynamicinformer.NewDynamicSharedInformerFactory(dynclient, 0)\n\n\t\t\tif err := addAllEventHandlers(&testSched, informerFactory, dynInformerFactory, tt.gvkMap); err != nil {\n\t\t\t\tt.Fatalf(\"Add event handlers failed, error = %v\", err)\n\t\t\t}\n\n\t\t\tinformerFactory.Start(testSched.StopEverything)\n\t\t\tdynInformerFactory.Start(testSched.StopEverything)\n\t\t\tstaticInformers := informerFactory.WaitForCacheSync(testSched.StopEverything)\n\t\t\tdynamicInformers := dynInformerFactory.WaitForCacheSync(testSched.StopEverything)\n\n\t\t\tif diff := cmp.Diff(tt.expectStaticInformers, staticInformers); diff != \"\" {\n\t\t\t\tt.Errorf(\"Unexpected diff (-want, +got):\\n%s\", diff)\n\t\t\t}\n\t\t\tif diff := cmp.Diff(tt.expectDynamicInformers, dynamicInformers); diff != \"\" {\n\t\t\t\tt.Errorf(\"Unexpected diff (-want, +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}", "func (m *StorageMock) MinimockSplitInspect() {\n\tfor _, e := range m.SplitMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to StorageMock.Split with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.SplitMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterSplitCounter) < 1 {\n\t\tif m.SplitMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to StorageMock.Split\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to StorageMock.Split with params: %#v\", *m.SplitMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcSplit != nil && mm_atomic.LoadUint64(&m.afterSplitCounter) < 1 {\n\t\tm.t.Error(\"Expected call to StorageMock.Split\")\n\t}\n}", "func TestSplit(t *testing.T){\r\n\tif !TESTSPLIT{\r\n\t\treturn\r\n\t}\r\n\tcontents := make([]string, 2)\r\n\tcontents[0] = \"duckduck\"\r\n\tcontents[1] = \"go\"\r\n\tmkcl, err := mock.NewCluster(\"input_spec.json\")\r\n\trafts,err := makeMockRafts(mkcl,\"log\", 250, 350) \r\n\tcheckError(t,err, \"While creating mock clusters\")\r\n\ttime.Sleep(5*time.Second)\r\n\trafts[0].Append([]byte(contents[0]))\r\n\ttime.Sleep(5*time.Second)\r\n\tmkcl.Lock()\r\n\tpart1 := []int{1,3}\r\n\tpart2 := []int{2,4}\r\n\trafts[1].smLock.RLock()\r\n\tldrId := rafts[4].LeaderId()\r\n\trafts[1].smLock.RUnlock()\r\n\tfmt.Printf(\"ldrId:%v\\n\", ldrId)\r\n\tif ldrId % 2 == 0{\r\n\t\tpart2 = append(part2, 5)\r\n\t}else{\r\n\t\tpart1 = append(part1, 5)\r\n\t}\r\n\tmkcl.Unlock()\r\n\tmkcl.Partition(part1, part2)\r\n\tdebugRaftTest(fmt.Sprintf(\"Partitions: %v %v\\n\", part1, part2))\r\n\ttime.Sleep(4*time.Second)\r\n\tmkcl.Lock()\r\n\trafts[ldrId-1].Append([]byte(contents[1]))\r\n\tmkcl.Unlock()\r\n\ttime.Sleep(8*time.Second)\r\n\tmkcl.Heal()\r\n\tdebugRaftTest(fmt.Sprintf(\"Healed\\n\"))\r\n\ttime.Sleep(8*time.Second)\r\n\tciarr := []int{0,0,0,0,0}\r\n\tfor cnt:=0;cnt<5;{\r\n\t\tfor idx, node := range rafts {\r\n\t\t\tselect {\r\n\t\t\tcase ci := <-node.CommitChannel():\r\n\t\t\t\tif ci.Err != nil {\r\n\t\t\t\t\tfmt.Fprintln(os.Stderr,ci.Err)\r\n\t\t\t\t}\r\n\t\t\t\t//Testing CommitChannel \r\n\t\t\t\texpect(t,contents[ciarr[idx]],string(ci.Data))\r\n\t\t\t\tciarr[idx] += 1\r\n\t\t\t\tif ciarr[idx] == 2{\r\n\t\t\t\t\tcnt +=1 \r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tnode.Shutdown()\r\n\t}\r\n}", "func (s *HealthCheckSuite) TestMultipleRoutersOnSameService(c *check.C) {\n\tfile := s.adaptFile(c, \"fixtures/healthcheck/multiple-routers-one-same-service.toml\", struct {\n\t\tServer1 string\n\t}{s.whoami1IP})\n\tdefer os.Remove(file)\n\n\tcmd, display := s.traefikCmd(withConfigFile(file))\n\tdefer display(c)\n\terr := cmd.Start()\n\tc.Assert(err, checker.IsNil)\n\tdefer s.killCmd(cmd)\n\n\t// wait for traefik\n\terr = try.GetRequest(\"http://127.0.0.1:8080/api/rawdata\", 60*time.Second, try.BodyContains(\"Host(`test.localhost`)\"))\n\tc.Assert(err, checker.IsNil)\n\n\t// Set whoami health to 200 to be sure to start with the wanted status\n\tclient := &http.Client{}\n\tstatusOkReq, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"200\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusOkReq)\n\tc.Assert(err, checker.IsNil)\n\n\t// check healthcheck on web1 entrypoint\n\thealthReqWeb1, err := http.NewRequest(http.MethodGet, \"http://127.0.0.1:8000/health\", nil)\n\tc.Assert(err, checker.IsNil)\n\thealthReqWeb1.Host = \"test.localhost\"\n\terr = try.Request(healthReqWeb1, 1*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\t// check healthcheck on web2 entrypoint\n\thealthReqWeb2, err := http.NewRequest(http.MethodGet, \"http://127.0.0.1:9000/health\", nil)\n\tc.Assert(err, checker.IsNil)\n\thealthReqWeb2.Host = \"test.localhost\"\n\n\terr = try.Request(healthReqWeb2, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\t// Set whoami health to 500\n\tstatusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"500\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusInternalServerErrorReq)\n\tc.Assert(err, checker.IsNil)\n\n\t// Verify no backend service is available due to failing health checks\n\terr = try.Request(healthReqWeb1, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))\n\tc.Assert(err, checker.IsNil)\n\n\terr = try.Request(healthReqWeb2, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))\n\tc.Assert(err, checker.IsNil)\n\n\t// Change one whoami health to 200\n\tstatusOKReq1, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"200\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusOKReq1)\n\tc.Assert(err, checker.IsNil)\n\n\t// Verify health check\n\terr = try.Request(healthReqWeb1, 3*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\terr = try.Request(healthReqWeb2, 3*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n}", "func TestPutBulkBlobSpanningChunksStreamAccessDoesNotExist(t *testing.T) {\n defer testutils.DeleteBucketContents(client, testBucket)\n\n helper := helpers.NewHelpers(client)\n\n errorCallbackCalled := false\n var mutex sync.Mutex\n errorCallback := func(objectName string, err error) {\n mutex.Lock()\n errorCallbackCalled = true\n mutex.Unlock()\n\n ds3Testing.AssertString(t, \"object name\", LargeBookTitle, objectName)\n }\n\n strategy := helpers.WriteTransferStrategy{\n BlobStrategy: newTestBlobStrategy(),\n Options: helpers.WriteBulkJobOptions{MaxUploadSize: &helpers.MinUploadSize},\n Listeners: helpers.ListenerStrategy{ErrorCallback:errorCallback},\n }\n\n // open a file but lie that its bigger than it is\n f, err := os.Open(testutils.BookPath + testutils.BookTitles[0])\n writeObj := helperModels.PutObject{\n PutObject: ds3Models.Ds3PutObject{Name: LargeBookTitle, Size: 20*1024*1024},\n ChannelBuilder: &testStreamAccessReadChannelBuilder{f: f},\n }\n\n var writeObjects []helperModels.PutObject\n writeObjects = append(writeObjects, writeObj)\n\n ds3Testing.AssertNilError(t, err)\n\n _, err = helper.PutObjects(testBucket, writeObjects, strategy)\n ds3Testing.AssertNilError(t, err)\n ds3Testing.AssertBool(t, \"error callback called\", true, errorCallbackCalled)\n}", "func TestLifecycleManyAddons(t *testing.T) {\n\trunLifecycleTestAWS(&LifecycleTestOptions{\n\t\tt: t,\n\t\tSrcDir: \"many-addons\",\n\t\tClusterName: \"minimal.example.com\",\n\t})\n}", "func TestProcessor_StartWithErrorBeforeRebalance(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\ttype TestCase struct {\n\t\tname string\n\t\tevent kafka.Event\n\t}\n\ttests := []TestCase{\n\t\t{\"error\", &kafka.Error{Err: errors.New(\"something\")}},\n\t\t{\"message\", new(kafka.Message)},\n\t\t{\"EOF\", new(kafka.EOF)},\n\t\t{\"BOF\", new(kafka.BOF)},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tconsumer = mock.NewMockConsumer(ctrl)\n\t\t\t\tst = mock.NewMockStorage(ctrl)\n\t\t\t\tsb = func(topic string, par int32) (storage.Storage, error) {\n\t\t\t\t\treturn st, nil\n\t\t\t\t}\n\t\t\t\tfinal = make(chan bool)\n\t\t\t\tch = make(chan kafka.Event)\n\t\t\t\tp = createProcessor(t, ctrl, consumer, 3, sb)\n\t\t\t)\n\n\t\t\tgomock.InOrder(\n\t\t\t\tconsumer.EXPECT().Subscribe(topOff).Return(nil),\n\t\t\t\tconsumer.EXPECT().Events().Return(ch),\n\t\t\t\tconsumer.EXPECT().Close().Do(func() { close(ch) }),\n\t\t\t)\n\t\t\tgo func() {\n\t\t\t\terr = p.Run(context.Background())\n\t\t\t\tensure.NotNil(t, err)\n\t\t\t\tclose(final)\n\t\t\t}()\n\n\t\t\tch <- tc.event\n\n\t\t\terr = doTimed(t, func() {\n\t\t\t\t<-final\n\t\t\t})\n\t\t\tensure.Nil(t, err)\n\t\t})\n\t}\n}", "func (c *TestCase) ThenInspectEvents(f func(t *testing.T, events []rangedb.Event)) func(t *testing.T) {\n\treturn func(t *testing.T) {\n\t\tt.Helper()\n\n\t\tctx := rangedbtest.TimeoutContext(t)\n\t\tstreamPreviousEventCounts := make(map[string]uint64)\n\t\tfor _, event := range c.previousEvents {\n\t\t\tstreamPreviousEventCounts[rangedb.GetEventStream(event)]++\n\t\t\tstreamName := rangedb.GetEventStream(event)\n\t\t\trangedbtest.BlockingSaveEvents(t, c.store, streamName, &rangedb.EventRecord{Event: event})\n\t\t}\n\n\t\tc.dispatch(c.command)\n\n\t\tvar events []rangedb.Event\n\t\tfor _, stream := range getStreamsFromStore(c.store) {\n\t\t\tstreamSequenceNumber := streamPreviousEventCounts[stream] + 1\n\t\t\tactualEvents, err := recordIteratorToSlice(c.store.EventsByStream(ctx, streamSequenceNumber, stream))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tevents = append(events, actualEvents...)\n\t\t}\n\n\t\tf(t, events)\n\t}\n}", "func (_m *MockSegmentManager) Put(segmentType commonpb.SegmentState, segments ...Segment) {\n\t_va := make([]interface{}, len(segments))\n\tfor _i := range segments {\n\t\t_va[_i] = segments[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, segmentType)\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}", "func (m *MockCallback) OnRemove(arg0 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestMainImplForBiDirectionalSyncMultipleMounts(t *testing.T) {\n\t// ---- Data setup ----\n\tnoOfMounts := 3\n\ttestMounts := make([]s3Mount, noOfMounts)\n\n\ttestMountId1 := \"TestMainImplForBiDirectionalSyncMultipleMounts1\"\n\tnoOfFilesInMount1 := 5\n\ttestMounts[0] = *putWriteableTestMountFiles(t, testFakeBucketName, testMountId1, noOfFilesInMount1)\n\n\ttestMountId2 := \"TestMainImplForBiDirectionalSyncMultipleMounts2\"\n\tnoOfFilesInMount2 := 5\n\ttestMounts[1] = *putWriteableTestMountFiles(t, testFakeBucketName, testMountId2, noOfFilesInMount2)\n\n\ttestMountId3 := \"TestMainImplForBiDirectionalSyncMultipleMounts3\"\n\tnoOfFilesInMount3 := 5\n\t// Make the third mount read-only to mix read-write and read-only mounts\n\ttestMounts[2] = *putReadOnlyTestMountFiles(t, testFakeBucketName, testMountId3, noOfFilesInMount3)\n\n\ttestMountsJsonBytes, err := json.Marshal(testMounts)\n\ttestMountsJson := string(testMountsJsonBytes)\n\n\tif err != nil {\n\t\t// Fail test in case of any errors\n\t\tt.Logf(\"Error creating test mount setup data %s\", err)\n\t}\n\n\t// ---- Inputs ----\n\tconcurrency := 5\n\trecurringDownloads := true\n\tstopRecurringDownloadsAfter := 45\n\tdownloadInterval := 1\n\tstopUploadWatchersAfter := 45\n\n\tfmt.Printf(\"Input: \\n\\n%s\\n\\n\", testMountsJson)\n\n\tvar wg sync.WaitGroup\n\n\t// Trigger recurring download in a separate thread and increment the wait group counter\n\twg.Add(1)\n\tgo func() {\n\t\t// ---- Run code under test ----\n\t\terr = mainImpl(testAwsSession, debug, recurringDownloads, stopRecurringDownloadsAfter, downloadInterval, stopUploadWatchersAfter, concurrency, testMountsJson, destinationBase, testRegion)\n\t\tif err != nil {\n\t\t\t// Fail test in case of any errors\n\t\t\tt.Logf(\"Error running the main s3-synchronizer with testMountsJson %s\", testMountsJson)\n\t\t\tt.Errorf(\"Error: %v\", err)\n\t\t}\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t// Running S3 --> Local and Local --> S3 sync in separate threads to make sure they can happen in parallel\n\t// and work well with each other\n\n\t// In a separate thread add/update/delete few files to the S3 location and verify that they get downloaded\n\t// by the recurring downloader thread after the downloadInterval\n\twg.Add(1)\n\tgo func() {\n\t\t// TEST FOR ADD -- NEW UPLOAD TO S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// ------------------------------------------------------------\n\n\t\t// Upload same number of files in the mount again (i.e., double the noOfFilesInMount)\n\t\ttestMounts[0] = *putWriteableTestMountFiles(t, testFakeBucketName, testMountId1, 2*noOfFilesInMount1)\n\t\ttestMounts[1] = *putWriteableTestMountFiles(t, testFakeBucketName, testMountId2, 2*noOfFilesInMount2)\n\t\t// Mix read-only and read-write\n\t\ttestMounts[2] = *putReadOnlyTestMountFiles(t, testFakeBucketName, testMountId3, 2*noOfFilesInMount3)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow for\n\t\t// uploaded files to get downloaded\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the newly uploaded files are automatically downloaded after the download interval\n\t\tassertFilesDownloaded(t, testMountId1, 2*noOfFilesInMount1)\n\t\tassertFilesDownloaded(t, testMountId2, 2*noOfFilesInMount2)\n\t\tassertFilesDownloaded(t, testMountId3, 2*noOfFilesInMount3)\n\n\t\t// TEST FOR UPDATE -- UPLOAD TO EXISTING FILES IN S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// -----------------------------------------------------------------------------\n\n\t\t// Update the files in S3\n\t\tupdateTestMountFiles(t, testFakeBucketName, testMountId1, noOfFilesInMount1)\n\t\tupdateTestMountFiles(t, testFakeBucketName, testMountId2, noOfFilesInMount2)\n\t\tupdateTestMountFiles(t, testFakeBucketName, testMountId3, noOfFilesInMount3)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow for\n\t\t// uploaded files to get downloaded\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the updated files are automatically downloaded after the download interval\n\t\tassertUpdatedFilesDownloaded(t, testMountId1, noOfFilesInMount1)\n\t\tassertUpdatedFilesDownloaded(t, testMountId2, noOfFilesInMount2)\n\t\tassertUpdatedFilesDownloaded(t, testMountId3, noOfFilesInMount3)\n\n\t\t// TEST FOR DELETE -- DELETE FROM S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// ------------------------------------------------------------\n\n\t\tfileIdxToDelete1 := noOfFilesInMount1 + 1\n\t\tfileIdxToDelete2 := noOfFilesInMount2 + 1\n\t\tfileIdxToDelete3 := noOfFilesInMount3 + 1\n\t\t// Delete some files from S3 and make sure they automatically get deleted from local file system\n\t\tdeleteTestMountFile(t, testFakeBucketName, testMountId1, fileIdxToDelete1)\n\t\tdeleteTestMountFile(t, testFakeBucketName, testMountId2, fileIdxToDelete2)\n\t\tdeleteTestMountFile(t, testFakeBucketName, testMountId3, fileIdxToDelete3)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow sync to happen\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the file deleted from S3 are automatically deleted after the download interval\n\t\tassertFileDeleted(t, testMountId1, fileIdxToDelete1)\n\t\tassertFileDeleted(t, testMountId2, fileIdxToDelete2)\n\t\tassertFileDeleted(t, testMountId3, fileIdxToDelete3)\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\t// In a yet another thread add/update/delete few files to the local file system and verify that they get synced up to S3 correctly\n\twg.Add(1)\n\tgo func() {\n\t\t// TEST FOR ADD -- NEW FILE TO LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// ------------------------------------------------------------\n\n\t\t// Upload all files in the file system\n\t\tcreateTestFilesLocally(t, testMountId1, noOfFilesInMount1)\n\t\tcreateTestFilesLocally(t, testMountId2, noOfFilesInMount2)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system creation event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the newly created files are automatically uploaded\n\t\tassertFilesUploaded(t, testFakeBucketName, testMountId1, noOfFilesInMount1)\n\t\tassertFilesUploaded(t, testFakeBucketName, testMountId2, noOfFilesInMount2)\n\n\t\t// TEST FOR UPDATE -- UPLOAD TO EXISTING FILES IN LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// -----------------------------------------------------------------------------\n\n\t\t// Update the files in local file system\n\t\tupdateTestFilesLocally(t, testMountId1, noOfFilesInMount1)\n\t\tupdateTestFilesLocally(t, testMountId2, noOfFilesInMount2)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the updated files are automatically uploaded\n\t\tassertUpdatedFilesUploaded(t, testFakeBucketName, testMountId1, noOfFilesInMount1)\n\t\tassertUpdatedFilesUploaded(t, testFakeBucketName, testMountId2, noOfFilesInMount2)\n\n\t\t// TEST FOR DELETE -- DELETE FROM LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// ------------------------------------------------------------\n\t\tfileIdxToDelete1 := 1\n\t\tfileIdxToDelete2 := 1\n\t\t// Delete some files from local file system and make sure they automatically get uploaded to S3\n\t\tdeleteTestFilesLocally(t, testMountId1, fileIdxToDelete1)\n\t\tdeleteTestFilesLocally(t, testMountId2, fileIdxToDelete2)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the deleted files are automatically deleted from S3\n\t\tassertFileDeletedFromS3(t, testFakeBucketName, testMountId1, fileIdxToDelete1)\n\t\tassertFileDeletedFromS3(t, testFakeBucketName, testMountId2, fileIdxToDelete2)\n\n\t\t// TEST FOR RENAME (MOVE) -- RENAME IN LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// --------------------------------------------------------------------\n\t\tfileIdxToMove1 := 0\n\t\tfileIdxToMove2 := 0\n\t\t// Rename some files from local file system and make sure they automatically get renamed in S3\n\t\tmoveTestFileLocally(t, testMountId1, fileIdxToMove1, \"\")\n\t\tmoveTestFileLocally(t, testMountId2, fileIdxToMove2, \"\")\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the renamed files are automatically renamed in S3\n\t\tassertFileMovedInS3(t, testFakeBucketName, testMountId1, fileIdxToMove1, \"\", testFileUpdatedContentTemplate)\n\t\tassertFileMovedInS3(t, testFakeBucketName, testMountId2, fileIdxToMove2, \"\", testFileUpdatedContentTemplate)\n\n\t\t// TEST FOR MOVE to NESTED DIR -- MOVE IN LOCAL FILE SYSTEM TO NESTED DIRECTORY --> S3 SYNC\n\t\t// --------------------------------------------------------------------------------------------\n\t\tfileIdxToMove1 = 2\n\t\tfileIdxToMove2 = 2\n\t\tmoveToSubDir1 := \"nested-level1/nested-level2/nested-level3/\"\n\t\tmoveToSubDir2 := \"nested-level1/nested-level2/nested-level3/nested-level4/\"\n\t\t// Move some files in local file system to some nested directory that is part of the mount location\n\t\t// and make sure they automatically get moved in S3\n\t\tmoveTestFileLocally(t, testMountId1, fileIdxToMove1, moveToSubDir1)\n\t\tmoveTestFileLocally(t, testMountId2, fileIdxToMove2, moveToSubDir2)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the moved files are automatically moved in S3\n\t\tassertFileMovedInS3(t, testFakeBucketName, testMountId1, fileIdxToMove1, moveToSubDir1, testFileUpdatedContentTemplate)\n\t\tassertFileMovedInS3(t, testFakeBucketName, testMountId2, fileIdxToMove2, moveToSubDir2, testFileUpdatedContentTemplate)\n\n\t\t// TEST FOR RENAMING A NESTED DIR -- MOVE DIR IN LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// --------------------------------------------------------------------------------------------\n\t\toldDirPath1 := \"nested-level1/nested-level2/nested-level3\"\n\t\tnewDirPath1 := \"nested-level1/nested-level2/nested-level3-renamed\"\n\n\t\t// Move a nested directory local file system to some nested directory that is part of the mount location\n\t\t// and make sure they automatically get moved in S3\n\t\tmoveDirLocally(t, testMountId1, oldDirPath1, newDirPath1)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the moved dir and its files are automatically moved in S3\n\t\tassertDirMovedInS3(t, testFakeBucketName, testMountId1, oldDirPath1, newDirPath1, fileIdxToMove1, testFileUpdatedContentTemplate)\n\n\t\t// TEST FOR MOVE OUT OF THE MOUNT DIRECTORY -- MOVE IN LOCAL FILE SYSTEM TO AN OUTSIDE DIRECTORY --> S3 SYNC\n\t\t// ------------------------------------------------------------------------------------------------------------\n\t\tfileIdxToMove1 = 3\n\t\tfileIdxToMove2 = 3\n\t\tmoveToSubDir1 = buildDir + \"/\"\n\t\tmoveToSubDir2 = buildDir + \"/\"\n\t\t// Move some files in local file system to an outside directory i.e., directory outside of the mount directory that is monitored\n\t\t// and make sure they automatically get deleted from S3\n\t\tmoveTestFileLocally(t, testMountId1, fileIdxToMove1, moveToSubDir1)\n\t\tmoveTestFileLocally(t, testMountId2, fileIdxToMove2, moveToSubDir2)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the files are automatically deleted from S3\n\t\tassertFileDeletedFromS3(t, testFakeBucketName, testMountId1, fileIdxToMove1)\n\t\tassertFileDeletedFromS3(t, testFakeBucketName, testMountId2, fileIdxToMove2)\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\twg.Wait() // Wait until all spawned go routines complete before existing the test case\n}", "func (m *MockIByIdPresenter) OnReceived(arg0 aggregates.Topic) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnReceived\", arg0)\n}", "func TestAttestSigner_SigUtils(t *testing.T) {\n\tsig1 := []byte{71, 48, 68, 2, 32, 100, 88, 73, 1, 86, 42, 210, 239, 196, 136, 107, 0, 178, 223, 59, 32, 235, 58, 231, 207, 168, 87, 95, 227, 83, 207, 67, 150, 254, 26, 99, 13, 2, 32, 0, 169, 167, 160, 35, 235, 221, 136, 214, 217, 143, 64, 105, 250, 180, 188, 109, 236, 175, 117, 198, 53, 180, 24, 223, 217, 44, 199, 54, 158, 230, 227, 1}\n\tsig2 := []byte{71, 48, 68, 2, 32, 17, 175, 6, 205, 216, 180, 188, 216, 38, 178, 109, 17, 145, 237, 148, 1, 30, 73, 161, 54, 176, 122, 66, 6, 211, 219, 90, 216, 219, 38, 162, 137, 2, 32, 14, 61, 139, 90, 233, 169, 9, 57, 249, 101, 38, 109, 147, 244, 151, 182, 93, 136, 64, 221, 158, 172, 238, 208, 71, 106, 39, 50, 194, 185, 230, 102, 1}\n\tsig3 := []byte{71, 48, 68, 2, 32, 17, 175, 6, 205, 216, 180, 188, 216, 38, 178, 109, 17, 145, 237, 148, 1, 30, 73, 161, 54, 176, 122, 66, 6, 211, 219, 90, 216, 219, 38, 162, 137, 2, 32, 14, 61, 139, 90, 233, 169, 9, 57, 249, 101, 38, 109, 145, 244, 151, 182, 93, 136, 64, 221, 158, 172, 238, 208, 71, 106, 39, 50, 194, 185, 230, 102, 1}\n\n\tvar msgA []byte\n\tvar msgB []byte\n\tvar splitMsgA [][]byte\n\tvar splitMsgB [][]byte\n\tvar msgs [][][]byte\n\tvar sigs [][]crypto.Sig\n\n\tnumOfTxInputs := 0\n\n\t// test 1 message 0 signature\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 0, numOfTxInputs)\n\tassert.Equal(t, [][]byte{}, splitMsgA)\n\tassert.Equal(t, 0, len(splitMsgA))\n\n\tmsgs = [][][]byte{[][]byte{}}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{}, sigs)\n\n\t// test 2 messages 0 signature\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 0, numOfTxInputs)\n\tsplitMsgB = UnserializeBytes(msgB)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgB, numOfTxInputs)\n\tassert.Equal(t, 0, numOfTxInputs)\n\tassert.Equal(t, [][]byte{}, splitMsgA)\n\tassert.Equal(t, [][]byte{}, splitMsgB)\n\tassert.Equal(t, 0, len(splitMsgA))\n\tassert.Equal(t, 0, len(splitMsgB))\n\n\tmsgs = [][][]byte{[][]byte{}, [][]byte{}}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{}, sigs)\n\n\t// test 1 message 1 signature\n\tnumOfTxInputs = 0\n\tmsgA = sig1\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 1, numOfTxInputs)\n\tassert.Equal(t, [][]byte{sig1[1:]}, splitMsgA)\n\tassert.Equal(t, 1, len(splitMsgA))\n\n\tmsgs = [][][]byte{splitMsgA}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig1[1:])}}, sigs)\n\n\t// test 1 message 2 signature\n\tnumOfTxInputs = 0\n\tmsgA = sig1\n\tmsgA = append(msgA, sig2...)\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tassert.Equal(t, [][]byte{sig1[1:], sig2[1:]}, splitMsgA)\n\tassert.Equal(t, 2, len(splitMsgA))\n\n\tmsgs = [][][]byte{splitMsgA}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig1[1:])},\n\t\t[]crypto.Sig{crypto.Sig(sig2[1:])}}, sigs)\n\n\t// test 2 messages 1 signature\n\tnumOfTxInputs = 0\n\tmsgA = sig1\n\tmsgB = sig3\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 1, numOfTxInputs)\n\tsplitMsgB = UnserializeBytes(msgB)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgB, numOfTxInputs)\n\tassert.Equal(t, 1, numOfTxInputs)\n\tassert.Equal(t, [][]byte{sig1[1:]}, splitMsgA)\n\tassert.Equal(t, [][]byte{sig3[1:]}, splitMsgB)\n\tassert.Equal(t, 1, len(splitMsgA))\n\tassert.Equal(t, 1, len(splitMsgB))\n\n\tmsgs = [][][]byte{splitMsgA, splitMsgB}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig1[1:]), crypto.Sig(sig3[1:])}}, sigs)\n\n\t// test 2 messages 2 signatures\n\tnumOfTxInputs = 0\n\tmsgA = sig1\n\tmsgA = append(msgA, sig2...)\n\tmsgB = sig3\n\tmsgB = append(msgB, sig3...)\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tsplitMsgB = UnserializeBytes(msgB)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgB, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tassert.Equal(t, [][]byte{sig1[1:], sig2[1:]}, splitMsgA)\n\tassert.Equal(t, [][]byte{sig3[1:], sig3[1:]}, splitMsgB)\n\tassert.Equal(t, 2, len(splitMsgA))\n\tassert.Equal(t, 2, len(splitMsgB))\n\n\tmsgs = [][][]byte{splitMsgA, splitMsgB}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig1[1:]), crypto.Sig(sig3[1:])},\n\t\t[]crypto.Sig{crypto.Sig(sig2[1:]), crypto.Sig(sig3[1:])}}, sigs)\n\n\t// test 2 messages 0,2 signatures\n\tnumOfTxInputs = 0\n\tmsgA = []byte{}\n\tmsgB = sig3\n\tmsgB = append(msgB, sig3...)\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 0, numOfTxInputs)\n\tsplitMsgB = UnserializeBytes(msgB)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgB, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tassert.Equal(t, [][]byte{}, splitMsgA)\n\tassert.Equal(t, [][]byte{sig3[1:], sig3[1:]}, splitMsgB)\n\tassert.Equal(t, 0, len(splitMsgA))\n\tassert.Equal(t, 2, len(splitMsgB))\n\n\tmsgs = [][][]byte{splitMsgA, splitMsgB}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig3[1:])},\n\t\t[]crypto.Sig{crypto.Sig(sig3[1:])}}, sigs)\n\n\t// test 2 messages 2,0 signatures\n\tnumOfTxInputs = 0\n\tmsgA = sig1\n\tmsgA = append(msgA, sig2...)\n\tmsgB = []byte{}\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tsplitMsgB = UnserializeBytes(msgB)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgB, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tassert.Equal(t, [][]byte{sig1[1:], sig2[1:]}, splitMsgA)\n\tassert.Equal(t, [][]byte{}, splitMsgB)\n\tassert.Equal(t, 2, len(splitMsgA))\n\tassert.Equal(t, 0, len(splitMsgB))\n\n\tmsgs = [][][]byte{splitMsgA, splitMsgB}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig1[1:])},\n\t\t[]crypto.Sig{crypto.Sig(sig2[1:])}}, sigs)\n\n\t// test 2 messages 1,2 signatures\n\tnumOfTxInputs = 0\n\tmsgA = sig1\n\tmsgB = sig3\n\tmsgB = append(msgB, sig3...)\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 1, numOfTxInputs)\n\tsplitMsgB = UnserializeBytes(msgB)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgB, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tassert.Equal(t, [][]byte{sig1[1:]}, splitMsgA)\n\tassert.Equal(t, [][]byte{sig3[1:], sig3[1:]}, splitMsgB)\n\tassert.Equal(t, 1, len(splitMsgA))\n\tassert.Equal(t, 2, len(splitMsgB))\n\n\tmsgs = [][][]byte{splitMsgA, splitMsgB}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig1[1:]), crypto.Sig(sig3[1:])},\n\t\t[]crypto.Sig{crypto.Sig(sig3[1:])}}, sigs)\n\n\t// test 2 messages 2,1 signatures\n\tnumOfTxInputs = 0\n\tmsgA = sig1\n\tmsgA = append(msgA, sig2...)\n\tmsgB = sig3\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tsplitMsgB = UnserializeBytes(msgB)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgB, numOfTxInputs)\n\tassert.Equal(t, 2, numOfTxInputs)\n\tassert.Equal(t, [][]byte{sig1[1:], sig2[1:]}, splitMsgA)\n\tassert.Equal(t, [][]byte{sig3[1:]}, splitMsgB)\n\tassert.Equal(t, 2, len(splitMsgA))\n\tassert.Equal(t, 1, len(splitMsgB))\n\n\tmsgs = [][][]byte{splitMsgA, splitMsgB}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig1[1:]), crypto.Sig(sig3[1:])},\n\t\t[]crypto.Sig{crypto.Sig(sig2[1:])}}, sigs)\n\n\t// test 2 messages 1,0 signatures\n\tnumOfTxInputs = 0\n\tmsgA = sig1\n\tmsgB = []byte{}\n\n\tsplitMsgA = UnserializeBytes(msgA)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgA, numOfTxInputs)\n\tassert.Equal(t, 1, numOfTxInputs)\n\tsplitMsgB = UnserializeBytes(msgB)\n\tnumOfTxInputs = updateNumOfTxInputs(splitMsgB, numOfTxInputs)\n\tassert.Equal(t, 1, numOfTxInputs)\n\tassert.Equal(t, [][]byte{sig1[1:]}, splitMsgA)\n\tassert.Equal(t, [][]byte{}, splitMsgB)\n\tassert.Equal(t, 1, len(splitMsgA))\n\tassert.Equal(t, 0, len(splitMsgB))\n\n\tmsgs = [][][]byte{splitMsgA, splitMsgB}\n\tsigs = getSigsFromMsgs(msgs, numOfTxInputs)\n\tassert.Equal(t, [][]crypto.Sig{\n\t\t[]crypto.Sig{crypto.Sig(sig1[1:])}}, sigs)\n}", "func MockEvents() []optic.Event {\n\tevents := make([]optic.Event, 0)\n\tevents = append(events, TestRaw([]byte(\"raw\")))\n\tevents = append(events, TestMetric(1.0))\n\tevents = append(events, TestLogLine(\"logline\"))\n\treturn events\n}", "func (h *halCtlSuite) TestL2SegmentGet(c *C) {\n\tvar err error\n\tvar resp string\n\treq := &halproto.L2SegmentGetRequest{\n\t\tKeyOrHandle: &halproto.L2SegmentKeyHandle{\n\t\t\tKeyOrHandle: &halproto.L2SegmentKeyHandle_SegmentId{\n\t\t\t\tSegmentId: uint64(1),\n\t\t\t},\n\t\t},\n\t}\n\tl2SegGetReqMsg := &halproto.L2SegmentGetRequestMsg{\n\t\tRequest: []*halproto.L2SegmentGetRequest{req},\n\t}\n\n\tAssertEventually(c, func() (bool, interface{}) {\n\t\tresp, err = h.getL2Segments(l2SegGetReqMsg)\n\t\treturn err == nil, nil\n\t}, \"Failed to get L2Segments\")\n\tAssertEquals(c, true, strings.Contains(resp, \"1 Mgmt\"), fmt.Sprintf(\"halctl returned: %v\", resp))\n\t//AssertEquals(c, true, strings.Contains(resp, \"segmentid: 1\"), fmt.Sprintf(\"halctl returned: %v\", resp))\n}", "func (s) TestLoggingLinkedWithTraceServerSide(t *testing.T) {\n\tfle := &fakeLoggingExporter{\n\t\tt: t,\n\t}\n\toldNewLoggingExporter := newLoggingExporter\n\tdefer func() {\n\t\tnewLoggingExporter = oldNewLoggingExporter\n\t}()\n\n\tnewLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) {\n\t\treturn fle, nil\n\t}\n\n\tidCh := testutils.NewChannel()\n\n\tfe := &fakeOpenCensusExporter{\n\t\tt: t,\n\t\tidCh: idCh,\n\t}\n\toldNewExporter := newExporter\n\tdefer func() {\n\t\tnewExporter = oldNewExporter\n\t}()\n\n\tnewExporter = func(config *config) (tracingMetricsExporter, error) {\n\t\treturn fe, nil\n\t}\n\n\tconst projectID = \"project-id\"\n\ttracesAndLogsConfig := &config{\n\t\tProjectID: projectID,\n\t\tCloudLogging: &cloudLogging{\n\t\t\tServerRPCEvents: []serverRPCEvents{\n\t\t\t\t{\n\t\t\t\t\tMethods: []string{\"*\"},\n\t\t\t\t\tMaxMetadataBytes: 30,\n\t\t\t\t\tMaxMessageBytes: 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tCloudTrace: &cloudTrace{\n\t\t\tSamplingRate: 1.0,\n\t\t},\n\t}\n\tcleanup, err := setupObservabilitySystemWithConfig(tracesAndLogsConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up observability %v\", err)\n\t}\n\tdefer cleanup()\n\tss := &stubserver.StubServer{\n\t\tUnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {\n\t\t\treturn &testpb.SimpleResponse{}, nil\n\t\t},\n\t\tFullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {\n\t\t\t_, err := stream.Recv()\n\t\t\tif err != io.EOF {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\tif err := ss.Start(nil); err != nil {\n\t\tt.Fatalf(\"Error starting endpoint server: %v\", err)\n\t}\n\tdefer ss.Stop()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\t// Spawn a goroutine to receive the trace and span ids received by the\n\t// exporter corresponding to a Unary RPC.\n\treaderErrCh := testutils.NewChannel()\n\tunaryDone := grpcsync.NewEvent()\n\tgo func() {\n\t\tvar traceAndSpanIDs []traceAndSpanID\n\t\tval, err := idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\n\t\ttasi, ok := val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\n\t\tval, err = idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\n\t\ttasi, ok = val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\n\t\tval, err = idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\t\ttasi, ok = val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\t\t<-unaryDone.Done()\n\t\tvar tasiServer traceAndSpanIDString\n\t\tfor _, tasi := range traceAndSpanIDs {\n\t\t\tif strings.HasPrefix(tasi.spanName, \"grpc.\") && tasi.spanKind == trace.SpanKindServer {\n\t\t\t\ttasiServer = tasi.idsToString(projectID)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfle.mu.Lock()\n\t\tfor _, tasiSeen := range fle.idsSeen {\n\t\t\tif diff := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, \"SpanKind\")); diff != \"\" {\n\t\t\t\treaderErrCh.Send(fmt.Errorf(\"got unexpected id, should be a server span (-got, +want): %v\", diff))\n\t\t\t}\n\t\t}\n\n\t\tfle.entries = nil\n\t\tfle.mu.Unlock()\n\t\treaderErrCh.Send(nil)\n\t}()\n\tif _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil {\n\t\tt.Fatalf(\"Unexpected error from UnaryCall: %v\", err)\n\t}\n\tunaryDone.Fire()\n\tif chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Should have received something from error channel: %v\", err)\n\t\t}\n\t\tif chErr != nil {\n\t\t\tt.Fatalf(\"Should have received a nil error from channel, instead received: %v\", chErr)\n\t\t}\n\t}\n}", "func TestVisitPrefixes(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\ttestData := []struct {\n\t\tstart, end proto.Key\n\t\texpRanges [][2]proto.Key\n\t\texpConfigs []ConfigUnion\n\t}{\n\t\t{proto.KeyMin, proto.KeyMax,\n\t\t\t[][2]proto.Key{\n\t\t\t\t{proto.KeyMin, proto.Key(\"/db1\")},\n\t\t\t\t{proto.Key(\"/db1\"), proto.Key(\"/db1/table\")},\n\t\t\t\t{proto.Key(\"/db1/table\"), proto.Key(\"/db1/tablf\")},\n\t\t\t\t{proto.Key(\"/db1/tablf\"), proto.Key(\"/db2\")},\n\t\t\t\t{proto.Key(\"/db2\"), proto.Key(\"/db3\")},\n\t\t\t\t{proto.Key(\"/db3\"), proto.Key(\"/db4\")},\n\t\t\t\t{proto.Key(\"/db4\"), proto.KeyMax},\n\t\t\t}, []ConfigUnion{config1, config2, config3, config2, config1, config4, config1}},\n\t\t{proto.Key(\"/db0\"), proto.Key(\"/db1/table/foo\"),\n\t\t\t[][2]proto.Key{\n\t\t\t\t{proto.Key(\"/db0\"), proto.Key(\"/db1\")},\n\t\t\t\t{proto.Key(\"/db1\"), proto.Key(\"/db1/table\")},\n\t\t\t\t{proto.Key(\"/db1/table\"), proto.Key(\"/db1/table/foo\")},\n\t\t\t}, []ConfigUnion{config1, config2, config3}},\n\t}\n\tfor i, test := range testData {\n\t\tranges := [][2]proto.Key{}\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(test.start, test.end, func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tranges = append(ranges, [2]proto.Key{start, end})\n\t\t\tconfigs = append(configs, config)\n\t\t\treturn false, nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(test.expRanges, ranges) {\n\t\t\tt.Errorf(\"%d: expected ranges %+v; got %+v\", i, test.expRanges, ranges)\n\t\t}\n\t\tif !reflect.DeepEqual(test.expConfigs, configs) {\n\t\t\tt.Errorf(\"%d: expected configs %+v; got %+v\", i, test.expConfigs, configs)\n\t\t}\n\t}\n\n\t// Now, stop partway through by returning done=true.\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(proto.Key(\"/db2\"), proto.Key(\"/db4\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\tif len(configs) == 2 {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\texpConfigs := []ConfigUnion{config1, config4}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n\n\t// Now, stop partway through by returning an error.\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(proto.Key(\"/db2\"), proto.Key(\"/db4\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\tif len(configs) == 2 {\n\t\t\t\treturn false, util.Errorf(\"foo\")\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}); err == nil {\n\t\t\tt.Fatalf(\"expected an error, but didn't get one\")\n\t\t}\n\t\texpConfigs := []ConfigUnion{config1, config4}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n}", "func TestProcessEventFiltering(t *testing.T) {\n\trawEvents := make([]*model.ProcessEvent, 0)\n\thandlers := make([]EventHandler, 0)\n\n\t// The listener should drop unexpected events and not call the EventHandler for it\n\trawEvents = append(rawEvents, model.NewMockedForkEvent(time.Now(), 23, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}))\n\n\t// Verify that expected events are correctly consumed\n\trawEvents = append(rawEvents, model.NewMockedExecEvent(time.Now(), 23, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}))\n\thandlers = append(handlers, func(e *model.ProcessEvent) {\n\t\trequire.Equal(t, model.Exec, e.EventType)\n\t\trequire.Equal(t, uint32(23), e.Pid)\n\t})\n\n\trawEvents = append(rawEvents, model.NewMockedExitEvent(time.Now(), 23, \"/usr/bin/ls\", []string{\"ls\", \"-lah\"}, 0))\n\thandlers = append(handlers, func(e *model.ProcessEvent) {\n\t\trequire.Equal(t, model.Exit, e.EventType)\n\t\trequire.Equal(t, uint32(23), e.Pid)\n\t})\n\n\t// To avoid race conditions, all handlers should be assigned during the creation of SysProbeListener\n\tcalledHandlers := 0\n\thandler := func(e *model.ProcessEvent) {\n\t\thandlers[calledHandlers](e)\n\t\tcalledHandlers++\n\t}\n\n\tl, err := NewSysProbeListener(nil, nil, handler)\n\trequire.NoError(t, err)\n\n\tfor _, e := range rawEvents {\n\t\tdata, err := e.MarshalMsg(nil)\n\t\trequire.NoError(t, err)\n\t\tl.consumeData(data)\n\t}\n\tassert.Equal(t, len(handlers), calledHandlers)\n}", "func TestMainImplForBiDirectionalSyncSingleMount(t *testing.T) {\n\t// ---- Data setup ----\n\tnoOfMounts := 1\n\ttestMounts := make([]s3Mount, noOfMounts)\n\ttestMountId := \"TestMainImplForBiDirectionalSyncSingleMount\"\n\tnoOfFilesInMount := 5\n\ttestMounts[0] = *putWriteableTestMountFiles(t, testFakeBucketName, testMountId, noOfFilesInMount)\n\ttestMountsJsonBytes, err := json.Marshal(testMounts)\n\ttestMountsJson := string(testMountsJsonBytes)\n\n\tif err != nil {\n\t\t// Fail test in case of any errors\n\t\tt.Logf(\"Error creating test mount setup data %s\", err)\n\t}\n\n\t// ---- Inputs ----\n\tconcurrency := 5\n\trecurringDownloads := true\n\tstopRecurringDownloadsAfter := 50\n\tdownloadInterval := 1\n\tstopUploadWatchersAfter := 50\n\n\tfmt.Printf(\"Input: \\n\\n%s\\n\\n\", testMountsJson)\n\n\tvar wg sync.WaitGroup\n\n\t// Trigger recurring download in a separate thread and increment the wait group counter\n\twg.Add(1)\n\tgo func() {\n\n\t\t// ---- Run code under test ----\n\t\terr = mainImpl(testAwsSession, debug, recurringDownloads, stopRecurringDownloadsAfter, downloadInterval, stopUploadWatchersAfter, concurrency, testMountsJson, destinationBase, testRegion)\n\t\tif err != nil {\n\t\t\t// Fail test in case of any errors\n\t\t\tt.Logf(\"Error running the main s3-synchronizer with testMountsJson %s\", testMountsJson)\n\t\t\tt.Errorf(\"Error: %v\", err)\n\t\t}\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t// Running S3 --> Local and Local --> S3 sync in separate threads to make sure they can happen in parallel\n\t// and work well with each other\n\n\t// In a separate thread add/update/delete few files to the S3 location and verify that they get downloaded\n\t// by the recurring downloader thread after the downloadInterval\n\twg.Add(1)\n\tgo func() {\n\t\t// TEST FOR ADD -- NEW UPLOAD TO S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// ------------------------------------------------------------\n\n\t\t// Upload same number of files in the mount again (i.e., double the noOfFilesInMount)\n\t\ttestMounts[0] = *putWriteableTestMountFiles(t, testFakeBucketName, testMountId, 2*noOfFilesInMount)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow for\n\t\t// uploaded files to get downloaded\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the newly uploaded files are automatically downloaded after the download interval\n\t\tassertFilesDownloaded(t, testMountId, 2*noOfFilesInMount)\n\n\t\t// TEST FOR UPDATE -- UPLOAD TO EXISTING FILES IN S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// -----------------------------------------------------------------------------\n\n\t\t// Update the files in S3\n\t\tupdateTestMountFiles(t, testFakeBucketName, testMountId, noOfFilesInMount)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow for\n\t\t// uploaded files to get downloaded\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the updated files are automatically downloaded after the download interval\n\t\tassertUpdatedFilesDownloaded(t, testMountId, noOfFilesInMount)\n\n\t\t// TEST FOR DELETE -- DELETE FROM S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// ------------------------------------------------------------\n\n\t\tfileIdxToDelete := noOfFilesInMount + 1\n\t\t// Delete some files from S3 and make sure they automatically get deleted from local file system\n\t\tdeleteTestMountFile(t, testFakeBucketName, testMountId, fileIdxToDelete)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow sync to happen\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the file deleted from S3 are automatically deleted after the download interval\n\t\tassertFileDeleted(t, testMountId, fileIdxToDelete)\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\t// In a yet another thread add/update/delete few files to the local file system and verify that they get synced up to S3 correctly\n\twg.Add(1)\n\tgo func() {\n\t\t// TEST FOR ADD -- NEW FILE TO LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// ------------------------------------------------------------\n\n\t\t// Upload all files in the file system\n\t\tcreateTestFilesLocally(t, testMountId, noOfFilesInMount)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system creation event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the newly created files are automatically uploaded\n\t\tassertFilesUploaded(t, testFakeBucketName, testMountId, noOfFilesInMount)\n\n\t\t// TEST FOR UPDATE -- UPLOAD TO EXISTING FILES IN LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// -----------------------------------------------------------------------------\n\n\t\t// Update the files in local file system\n\t\tupdateTestFilesLocally(t, testMountId, noOfFilesInMount)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the updated files are automatically uploaded\n\t\tassertUpdatedFilesUploaded(t, testFakeBucketName, testMountId, noOfFilesInMount)\n\n\t\t// TEST FOR DELETE -- DELETE FROM LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// ------------------------------------------------------------\n\t\tfileIdxToDelete := 1\n\t\t// Delete some files from local file system and make sure they automatically get uploaded to S3\n\t\tdeleteTestFilesLocally(t, testMountId, fileIdxToDelete)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the deleted files are automatically deleted from S3\n\t\tassertFileDeletedFromS3(t, testFakeBucketName, testMountId, fileIdxToDelete)\n\n\t\t// TEST FOR RENAME (MOVE) -- RENAME IN LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// --------------------------------------------------------------------\n\t\tfileIdxToMove := 0\n\t\t// Rename some files from local file system and make sure they automatically get renamed in S3\n\t\tmoveTestFileLocally(t, testMountId, fileIdxToMove, \"\")\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the renamed files are automatically renamed in S3\n\t\tassertFileMovedInS3(t, testFakeBucketName, testMountId, fileIdxToMove, \"\", testFileUpdatedContentTemplate)\n\n\t\t// TEST FOR MOVE to NESTED DIR -- MOVE IN LOCAL FILE SYSTEM TO NESTED DIRECTORY --> S3 SYNC\n\t\t// --------------------------------------------------------------------------------------------\n\t\tfileIdxToMove = 2\n\t\tmoveToSubDir := \"nested-level1/nested-level2/nested-level3/\"\n\t\t// Move some files in local file system to some nested directory that is part of the mount location\n\t\t// and make sure they automatically get moved in S3\n\t\tmoveTestFileLocally(t, testMountId, fileIdxToMove, moveToSubDir)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the moved files are automatically moved in S3\n\t\tassertFileMovedInS3(t, testFakeBucketName, testMountId, fileIdxToMove, moveToSubDir, testFileUpdatedContentTemplate)\n\n\t\t// TEST FOR RENAMING A NESTED DIR -- MOVE DIR IN LOCAL FILE SYSTEM --> S3 SYNC\n\t\t// --------------------------------------------------------------------------------------------\n\t\toldDirPath := \"nested-level1/nested-level2/nested-level3\"\n\t\tnewDirPath := \"nested-level1/nested-level2/nested-level3-renamed\"\n\n\t\t// Move a nested directory local file system to some nested directory that is part of the mount location\n\t\t// and make sure they automatically get moved in S3\n\t\tmoveDirLocally(t, testMountId, oldDirPath, newDirPath)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the moved dir and its files are automatically moved in S3\n\t\tassertDirMovedInS3(t, testFakeBucketName, testMountId, oldDirPath, newDirPath, fileIdxToMove, testFileUpdatedContentTemplate)\n\n\t\t// TEST FOR MOVE OUT OF THE MOUNT DIRECTORY -- MOVE IN LOCAL FILE SYSTEM TO AN OUTSIDE DIRECTORY --> S3 SYNC\n\t\t// ------------------------------------------------------------------------------------------------------------\n\t\tfileIdxToMove = 3\n\t\tmoveToSubDir = buildDir + \"/\"\n\t\t// Move some files in local file system to an outside directory i.e., directory outside of the mount directory that is monitored\n\t\t// and make sure they automatically get deleted from S3\n\t\tmoveTestFileLocally(t, testMountId, fileIdxToMove, moveToSubDir)\n\n\t\t// Sleep for some duration (e.g., download interval duration) to allow for\n\t\t// file system update event to trigger and upload to complete\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the files are automatically deleted from S3\n\t\tassertFileDeletedFromS3(t, testFakeBucketName, testMountId, fileIdxToMove)\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\twg.Wait() // Wait until all spawned go routines complete before existing the test case\n}", "func TestLifecycleSharedSubnet(t *testing.T) {\n\trunLifecycleTestAWS(&LifecycleTestOptions{\n\t\tt: t,\n\t\tSrcDir: \"shared_subnet\",\n\t\tShared: []string{\"subnet-12345678\"},\n\t})\n}", "func TestEventsMgrRestart(t *testing.T) {\n\tti := tInfo{}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\tnumRecorders := 3\n\n\tstopEventRecorders := make(chan struct{})\n\twg := new(sync.WaitGroup)\n\twg.Add(numRecorders + 1) // +1 for events manager restart go routine\n\n\t// uuid to make each source unique\n\tcomponentID := uuid.NewV4().String()\n\ttotalEventsSentBySrc := make([]int, numRecorders)\n\n\t// create recorder events directory\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\n\tfor i := 0; i < numRecorders; i++ {\n\t\tgo func(i int) {\n\t\t\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\t\t\tComponent: fmt.Sprintf(\"%v-%v\", componentID, i),\n\t\t\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\t\t\tBackupDir: recorderEventsDir}, ti.logger)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to create recorder for source %v\", i)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tti.recorders.Lock()\n\t\t\tti.recorders.list = append(ti.recorders.list, evtsRecorder)\n\t\t\tti.recorders.Unlock()\n\n\t\t\tticker := time.NewTicker(100 * time.Millisecond)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stopEventRecorders:\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test event - 1\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test event - 2\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\n\t\t\t\t\tevtsRecorder.Event(eventtypes.SERVICE_STOPPED, \"test event - 3\", nil)\n\t\t\t\t\ttotalEventsSentBySrc[i]++\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// restart events manager\n\tgo func() {\n\t\tevtsMgrURL := ti.evtsMgr.RPCServer.GetListenURL()\n\n\t\tfor i := 0; i < 3; i++ {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tti.evtsMgr.Stop()\n\n\t\t\t// manager won't be able to accept any events for 1s; all the elastic writes will be denied\n\t\t\t// and all the events will be buffered at the writer for this time\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\t// exporters should be able to release all the holding events from the buffer\n\t\t\tevtsMgr, _, err := testutils.StartEvtsMgr(evtsMgrURL, ti.mockResolver, ti.logger, ti.esClient, nil)\n\t\t\tAssertOk(t, err, \"failed to start events manager, err: %v\", err)\n\t\t\tti.evtsMgr = evtsMgr\n\t\t}\n\n\t\ttime.Sleep(1 * time.Second)\n\n\t\t// stop all the recorders\n\t\tclose(stopEventRecorders)\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\t//total events sent by all the recorders\n\ttotalEventsSent := 0\n\tfor _, val := range totalEventsSentBySrc {\n\t\ttotalEventsSent += val\n\t}\n\n\tlog.Infof(\"total events sent: %v\", totalEventsSent)\n\n\t// total number of events received at elastic should match the total events sent\n\t// query all the events received from this source.component\n\tquery := es.NewRegexpQuery(\"source.component.keyword\", fmt.Sprintf(\"%v-.*\", componentID))\n\tti.assertElasticUniqueEvents(t, query, true, 3*numRecorders, \"60s\")\n\tti.assertElasticTotalEvents(t, query, false, totalEventsSent, \"60s\")\n}", "func TestValidatorSMMultiVoting(t *testing.T) {\n\n\tctx, _, mk := CreateTestInput(t, false, SufficientInitPower)\n\tclearNotBondedPool(t, ctx, mk.SupplyKeeper)\n\n\tparams := DefaultParams()\n\n\toriginVaSet := addrVals[1:]\n\tparams.MaxValidators = uint16(len(originVaSet))\n\tparams.Epoch = 2\n\tparams.UnbondingTime = time.Millisecond * 300\n\n\tstartUpValidator := NewValidator(StartUpValidatorAddr, StartUpValidatorPubkey, Description{}, types.DefaultMinSelfDelegation)\n\n\tstartUpStatus := baseValidatorStatus{startUpValidator}\n\n\tbAction := baseAction{mk}\n\n\torgValsLen := len(originVaSet)\n\tfullVaSet := make([]sdk.ValAddress, orgValsLen+1)\n\tcopy(fullVaSet, originVaSet)\n\tcopy(fullVaSet[orgValsLen:], []sdk.ValAddress{startUpStatus.getValidator().GetOperator()})\n\n\texpZeroDec := sdk.ZeroDec()\n\texpValsBondedToken := DefaultMSD.MulInt64(int64(len(fullVaSet)))\n\texpDlgGrpBondedToken := DelegatedToken1.Add(DelegatedToken2)\n\texpAllBondedToken := expValsBondedToken.Add(expDlgGrpBondedToken)\n\tstartUpCheck := andChecker{[]actResChecker{\n\t\tqueryPoolCheck(&expAllBondedToken, &expZeroDec),\n\t\tnoErrorInHandlerResult(true),\n\t}}\n\n\t// after delegator in group finish adding shares, do following check\n\taddSharesChecker := andChecker{[]actResChecker{\n\t\tvalidatorDelegatorShareIncreased(true),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tqueryDelegatorCheck(ValidDelegator2, true, fullVaSet, nil, &DelegatedToken2, &expZeroDec),\n\t\tqueryAllValidatorCheck([]sdk.BondStatus{sdk.Unbonded, sdk.Bonded, sdk.Unbonding}, []int{1, 4, 0}),\n\t\tquerySharesToCheck(startUpStatus.getValidator().OperatorAddress, 1, []sdk.AccAddress{ValidDelegator2}),\n\t\tqueryPoolCheck(&expAllBondedToken, &expZeroDec),\n\t\tnoErrorInHandlerResult(true),\n\t}}\n\n\t// All Deleagtor Unbond half of the delegation\n\texpDlgBondedTokens1 := DelegatedToken1.QuoInt64(2)\n\texpDlgUnbondedToken1 := expDlgBondedTokens1\n\texpDlgBondedTokens2 := DelegatedToken2.QuoInt64(2)\n\texpDlgUnbondedToken2 := expDlgBondedTokens2\n\texpAllUnBondedToken1 := expDlgUnbondedToken1.Add(expDlgUnbondedToken2)\n\texpAllBondedToken1 := DefaultMSD.MulInt64(int64(len(fullVaSet))).Add(expDlgBondedTokens1).Add(expDlgBondedTokens2)\n\twithdrawChecker1 := andChecker{[]actResChecker{\n\t\tvalidatorDelegatorShareIncreased(false),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tqueryDelegatorCheck(ValidDelegator1, true, originVaSet, nil, &expDlgBondedTokens1, &expDlgUnbondedToken1),\n\t\tqueryDelegatorCheck(ValidDelegator2, true, fullVaSet, nil, &expDlgBondedTokens2, &expDlgUnbondedToken2),\n\t\tqueryAllValidatorCheck([]sdk.BondStatus{sdk.Unbonded, sdk.Bonded, sdk.Unbonding}, []int{1, 4, 0}),\n\t\tquerySharesToCheck(startUpStatus.getValidator().OperatorAddress, 1, []sdk.AccAddress{ValidDelegator2}),\n\t\tqueryPoolCheck(&expAllBondedToken1, &expAllUnBondedToken1),\n\t\tqueryValidatorCheck(sdk.Unbonded, false, nil, nil, nil),\n\t}}\n\n\t// All Deleagtor Unbond the delegation left\n\texpDlgGrpUnbonded2 := expZeroDec\n\texpAllBondedToken2 := DefaultMSD.MulInt64(int64(len(fullVaSet)))\n\twithdrawChecker2 := andChecker{[]actResChecker{\n\t\t// cannot find unbonding token in GetUnbonding info\n\t\tqueryDelegatorCheck(ValidDelegator1, false, []sdk.ValAddress{}, nil, &expZeroDec, nil),\n\t\tqueryDelegatorCheck(ValidDelegator2, false, []sdk.ValAddress{}, nil, &expZeroDec, nil),\n\t\tqueryAllValidatorCheck([]sdk.BondStatus{sdk.Unbonded, sdk.Bonded, sdk.Unbonding}, []int{1, 4, 0}),\n\t\tquerySharesToCheck(startUpStatus.getValidator().OperatorAddress, 0, []sdk.AccAddress{}),\n\t\tqueryPoolCheck(&expAllBondedToken2, &expDlgGrpUnbonded2),\n\t\tqueryValidatorCheck(sdk.Unbonded, false, nil, nil, nil),\n\t}}\n\n\tinputActions := []IAction{\n\t\tcreateValidatorAction{bAction, nil},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsAddSharesAction{bAction, true, false, 0, []sdk.AccAddress{ValidDelegator1}},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsAddSharesAction{bAction, true, true, 0, []sdk.AccAddress{ValidDelegator2}},\n\t\tendBlockAction{bAction},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsWithdrawAction{bAction, true, false},\n\t\tendBlockAction{bAction},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsWithdrawAction{bAction, true, true},\n\t\twaitUntilUnbondingTimeExpired{bAction},\n\t\tendBlockAction{bAction},\n\t}\n\n\tactionsAndChecker := []actResChecker{\n\t\tstartUpCheck.GetChecker(),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tvalidatorDelegatorShareIncreased(false),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\taddSharesChecker.GetChecker(),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tnil,\n\t\twithdrawChecker1.GetChecker(),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tnil,\n\t\twithdrawChecker2.GetChecker(),\n\t}\n\n\tsmTestCase := newValidatorSMTestCase(mk, params, startUpStatus, inputActions, actionsAndChecker, t)\n\tsmTestCase.SetupValidatorSetAndDelegatorSet(int(params.MaxValidators))\n\tsmTestCase.printParticipantSnapshot(t)\n\tsmTestCase.Run(t)\n\n}", "func (s) TestLoggingLinkedWithTrace(t *testing.T) {\n\tfle := &fakeLoggingExporter{\n\t\tt: t,\n\t}\n\toldNewLoggingExporter := newLoggingExporter\n\tdefer func() {\n\t\tnewLoggingExporter = oldNewLoggingExporter\n\t}()\n\n\tnewLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) {\n\t\treturn fle, nil\n\t}\n\n\tidCh := testutils.NewChannel()\n\n\tfe := &fakeOpenCensusExporter{\n\t\tt: t,\n\t\tidCh: idCh,\n\t}\n\toldNewExporter := newExporter\n\tdefer func() {\n\t\tnewExporter = oldNewExporter\n\t}()\n\n\tnewExporter = func(config *config) (tracingMetricsExporter, error) {\n\t\treturn fe, nil\n\t}\n\n\tconst projectID = \"project-id\"\n\ttracesAndLogsConfig := &config{\n\t\tProjectID: projectID,\n\t\tCloudLogging: &cloudLogging{\n\t\t\tClientRPCEvents: []clientRPCEvents{\n\t\t\t\t{\n\t\t\t\t\tMethods: []string{\"*\"},\n\t\t\t\t\tMaxMetadataBytes: 30,\n\t\t\t\t\tMaxMessageBytes: 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\tServerRPCEvents: []serverRPCEvents{\n\t\t\t\t{\n\t\t\t\t\tMethods: []string{\"*\"},\n\t\t\t\t\tMaxMetadataBytes: 30,\n\t\t\t\t\tMaxMessageBytes: 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tCloudTrace: &cloudTrace{\n\t\t\tSamplingRate: 1.0,\n\t\t},\n\t}\n\tcleanup, err := setupObservabilitySystemWithConfig(tracesAndLogsConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up observability %v\", err)\n\t}\n\tdefer cleanup()\n\tss := &stubserver.StubServer{\n\t\tUnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {\n\t\t\treturn &testpb.SimpleResponse{}, nil\n\t\t},\n\t\tFullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {\n\t\t\t_, err := stream.Recv()\n\t\t\tif err != io.EOF {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\tif err := ss.Start(nil); err != nil {\n\t\tt.Fatalf(\"Error starting endpoint server: %v\", err)\n\t}\n\tdefer ss.Stop()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\t// Spawn a goroutine to receive the trace and span ids received by the\n\t// exporter corresponding to a Unary RPC.\n\treaderErrCh := testutils.NewChannel()\n\tunaryDone := grpcsync.NewEvent()\n\tgo func() {\n\t\tvar traceAndSpanIDs []traceAndSpanID\n\t\tval, err := idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\n\t\ttasi, ok := val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\n\t\tval, err = idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\n\t\ttasi, ok = val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\n\t\tval, err = idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\t\ttasi, ok = val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\t\t<-unaryDone.Done()\n\t\tvar tasiSent traceAndSpanIDString\n\t\tvar tasiServer traceAndSpanIDString\n\t\tfor _, tasi := range traceAndSpanIDs {\n\t\t\tif strings.HasPrefix(tasi.spanName, \"grpc.\") && tasi.spanKind == trace.SpanKindClient {\n\t\t\t\ttasiSent = tasi.idsToString(projectID)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(tasi.spanName, \"grpc.\") && tasi.spanKind == trace.SpanKindServer {\n\t\t\t\ttasiServer = tasi.idsToString(projectID)\n\t\t\t}\n\t\t}\n\n\t\tfle.mu.Lock()\n\t\tfor _, tasiSeen := range fle.idsSeen {\n\t\t\tif diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, \"SpanKind\")); diff != \"\" {\n\t\t\t\tif diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, \"SpanKind\")); diff2 != \"\" {\n\t\t\t\t\treaderErrCh.Send(fmt.Errorf(\"got unexpected id, should be a client or server span (-got, +want): %v, %v\", diff, diff2))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfle.entries = nil\n\t\tfle.mu.Unlock()\n\t\treaderErrCh.Send(nil)\n\t}()\n\tif _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil {\n\t\tt.Fatalf(\"Unexpected error from UnaryCall: %v\", err)\n\t}\n\tunaryDone.Fire()\n\tif chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Should have received something from error channel: %v\", err)\n\t\t}\n\t\tif chErr != nil {\n\t\t\tt.Fatalf(\"Should have received a nil error from channel, instead received: %v\", chErr)\n\t\t}\n\t}\n\n\tfle.mu.Lock()\n\tfle.idsSeen = nil\n\tfle.mu.Unlock()\n\n\t// Test streaming. Spawn a goroutine to receive the trace and span ids\n\t// received by the exporter corresponding to a streaming RPC.\n\treaderErrCh = testutils.NewChannel()\n\tstreamDone := grpcsync.NewEvent()\n\tgo func() {\n\t\tvar traceAndSpanIDs []traceAndSpanID\n\n\t\tval, err := idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\n\t\ttasi, ok := val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\t\tval, err = idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\n\t\ttasi, ok = val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\n\t\tval, err = idCh.Receive(ctx)\n\t\tif err != nil {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"error while waiting for IDs: %v\", err))\n\t\t}\n\t\ttasi, ok = val.(traceAndSpanID)\n\t\tif !ok {\n\t\t\treaderErrCh.Send(fmt.Errorf(\"received wrong type from channel: %T\", val))\n\t\t}\n\t\ttraceAndSpanIDs = append(traceAndSpanIDs, tasi)\n\t\t<-streamDone.Done()\n\t\tvar tasiSent traceAndSpanIDString\n\t\tvar tasiServer traceAndSpanIDString\n\t\tfor _, tasi := range traceAndSpanIDs {\n\t\t\tif strings.HasPrefix(tasi.spanName, \"grpc.\") && tasi.spanKind == trace.SpanKindClient {\n\t\t\t\ttasiSent = tasi.idsToString(projectID)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(tasi.spanName, \"grpc.\") && tasi.spanKind == trace.SpanKindServer {\n\t\t\t\ttasiServer = tasi.idsToString(projectID)\n\t\t\t}\n\t\t}\n\n\t\tfle.mu.Lock()\n\t\tfor _, tasiSeen := range fle.idsSeen {\n\t\t\tif diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, \"SpanKind\")); diff != \"\" {\n\t\t\t\tif diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, \"SpanKind\")); diff2 != \"\" {\n\t\t\t\t\treaderErrCh.Send(fmt.Errorf(\"got unexpected id, should be a client or server span (-got, +want): %v, %v\", diff, diff2))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfle.entries = nil\n\t\tfle.mu.Unlock()\n\t\treaderErrCh.Send(nil)\n\t}()\n\n\tstream, err := ss.Client.FullDuplexCall(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"ss.Client.FullDuplexCall failed: %f\", err)\n\t}\n\n\tstream.CloseSend()\n\tif _, err = stream.Recv(); err != io.EOF {\n\t\tt.Fatalf(\"unexpected error: %v, expected an EOF error\", err)\n\t}\n\tstreamDone.Fire()\n\n\tif chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Should have received something from error channel: %v\", err)\n\t\t}\n\t\tif chErr != nil {\n\t\t\tt.Fatalf(\"Should have received a nil error from channel, instead received: %v\", chErr)\n\t\t}\n\t}\n}", "func TestEventsExport(t *testing.T) {\n\t// setup events pipeline to record and distribute events\n\tti := tInfo{}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\tvar wg sync.WaitGroup\n\n\t// start UDP server to receive syslog messages\n\tpConn1, receivedMsgsAtUDPServer1, err := serviceutils.StartUDPServer(\":0\")\n\tAssertOk(t, err, \"failed to start UDP server, err: %v\", err)\n\tdefer pConn1.Close()\n\ttmp1 := strings.Split(pConn1.LocalAddr().String(), \":\")\n\n\t// start TCP server - 1 to receive syslog messages\n\tln1, receivedMsgsAtTCPServer1, err := serviceutils.StartTCPServer(\":0\", 100, 0)\n\tAssertOk(t, err, \"failed to start TCP server, err: %v\", err)\n\tdefer ln1.Close()\n\ttmp2 := strings.Split(ln1.Addr().String(), \":\")\n\n\t// start TCP server - 2 to receive syslog messages\n\tln2, receivedMsgsAtTCPServer2, err := serviceutils.StartTCPServer(\":0\", 100, 0)\n\tAssertOk(t, err, \"failed to start TCP server, err: %v\", err)\n\tdefer ln2.Close()\n\ttmp3 := strings.Split(ln2.Addr().String(), \":\")\n\tdefTenant := cluster.Tenant{\n\t\tTypeMeta: api.TypeMeta{Kind: \"Tenant\"},\n\t\tObjectMeta: api.ObjectMeta{Name: \"default\"},\n\t}\n\tti.apiClient.ClusterV1().Tenant().Create(context.Background(), &defTenant)\n\n\t// add event policy - 1\n\teventPolicy1 := policygen.CreateEventPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, \"ep-1\",\n\t\tmonitoring.MonitoringExportFormat_SYSLOG_BSD.String(),\n\t\t[]*monitoring.ExportConfig{\n\t\t\t{ // receivedMsgsAtUDPServer1\n\t\t\t\tDestination: \"127.0.0.1\",\n\t\t\t\tTransport: fmt.Sprintf(\"UDP/%s\", tmp1[len(tmp1)-1]), // UDP or udp should work\n\t\t\t},\n\t\t\t{ // receivedMsgsAtTCPServer1\n\t\t\t\tDestination: \"127.0.0.1\",\n\t\t\t\tTransport: fmt.Sprintf(\"TCP/%s\", tmp2[len(tmp2)-1]), // TCP or tcp should work\n\t\t\t},\n\t\t}, nil)\n\teventPolicy1, err = ti.apiClient.MonitoringV1().EventPolicy().Create(context.Background(), eventPolicy1)\n\tAssertOk(t, err, \"failed to create event policy, err: %v\", err)\n\tdefer ti.apiClient.MonitoringV1().EventPolicy().Delete(context.Background(), eventPolicy1.GetObjectMeta())\n\n\t// add event policy - 2\n\teventPolicy2 := policygen.CreateEventPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, \"ep-2\",\n\t\tmonitoring.MonitoringExportFormat_SYSLOG_RFC5424.String(),\n\t\t[]*monitoring.ExportConfig{\n\t\t\t{ // receivedMsgsAtTCPServer2\n\t\t\t\tDestination: \"127.0.0.1\",\n\t\t\t\tTransport: fmt.Sprintf(\"tcp/%s\", tmp3[len(tmp3)-1]),\n\t\t\t},\n\t\t},\n\t\t&monitoring.SyslogExportConfig{\n\t\t\tFacilityOverride: monitoring.SyslogFacility_LOG_SYSLOG.String(),\n\t\t\tPrefix: CreateAlphabetString(5),\n\t\t})\n\teventPolicy2, err = ti.apiClient.MonitoringV1().EventPolicy().Create(context.Background(), eventPolicy2)\n\tAssertOk(t, err, \"failed to create event policy, err: %v\", err)\n\tdefer ti.apiClient.MonitoringV1().EventPolicy().Delete(context.Background(), eventPolicy2.GetObjectMeta())\n\n\t// to let the event policies reach the policy manager (api server -> evtsmgr -> policy watcher -> policy manager -> exporter)\n\ttime.Sleep(2 * time.Second)\n\n\t// object reference for events\n\tdummyObjRef := &cluster.Node{\n\t\tTypeMeta: api.TypeMeta{\n\t\t\tKind: \"Node\",\n\t\t},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: globals.DefaultTenant,\n\t\t\tNamespace: globals.DefaultNamespace,\n\t\t},\n\t}\n\n\tmessages := map[chan string][]struct {\n\t\tSubstrs []string // syslog message should contain all these strings\n\t\tMsgFormat monitoring.MonitoringExportFormat // BSD style message contains the JSON formatted alert; RFC contains <msgID, structured data, msg>\n\t}{\n\t\t// all the messages that are sent should be received at the syslog server\n\t\treceivedMsgsAtUDPServer1: {\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election stopped %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t},\n\t\treceivedMsgsAtTCPServer1: {\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election stopped %s\", dummyObjRef.Tenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name())},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_BSD,\n\t\t\t},\n\t\t},\n\t\treceivedMsgsAtTCPServer2: { // messages belonging to event policy - 2\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election stopped %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t},\n\t}\n\n\ttestSyslogMessageDelivery(t, ti, dummyObjRef, messages)\n\n\t// update event policy - 1; remove the existing target and add a new one\n\t// start UDP server to receive syslog messages\n\tpConn2, receivedMsgsAtUDPServer2, err := serviceutils.StartUDPServer(\":0\")\n\tAssertOk(t, err, \"failed to start UDP server, err: %v\", err)\n\tdefer pConn2.Close()\n\ttmp4 := strings.Split(pConn2.LocalAddr().String(), \":\")\n\n\t// add event policy - 1\n\teventPolicy1 = policygen.CreateEventPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, \"ep-1\",\n\t\tmonitoring.MonitoringExportFormat_SYSLOG_RFC5424.String(),\n\t\t[]*monitoring.ExportConfig{\n\t\t\t{ // receivedMsgsAtUDPServer1\n\t\t\t\tDestination: \"127.0.0.1\",\n\t\t\t\tTransport: fmt.Sprintf(\"udp/%s\", tmp1[len(tmp1)-1]),\n\t\t\t},\n\t\t\t{ // receivedMsgsAtUDPServer2\n\t\t\t\tDestination: \"127.0.0.1\",\n\t\t\t\tTransport: fmt.Sprintf(\"udp/%s\", tmp4[len(tmp4)-1]),\n\t\t\t},\n\t\t}, nil)\n\teventPolicy1, err = ti.apiClient.MonitoringV1().EventPolicy().Update(context.Background(), eventPolicy1)\n\tAssertOk(t, err, \"failed to create event policy, err: %v\", err)\n\tdefer ti.apiClient.MonitoringV1().EventPolicy().Delete(context.Background(), eventPolicy1.GetObjectMeta())\n\n\t// `receivedMsgsAtTCPServer1` should receive no more messages as it is removed from the policy '\"ep-1\"' (refer above);\n\t// final should be the last message on this channel\n\twg.Add(1)\n\tcloseMsgCh := make(chan struct{})\n\tgo func() {\n\t\tdefer wg.Done()\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\treceivedMsgsAtTCPServer1 <- \"final\"\n\n\t\tshouldNotReceiveAnymoreMsgs := false\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-closeMsgCh:\n\t\t\t\treturn\n\t\t\tcase msg, ok := <-receivedMsgsAtTCPServer1:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif msg == \"final\" {\n\t\t\t\t\tshouldNotReceiveAnymoreMsgs = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif shouldNotReceiveAnymoreMsgs {\n\t\t\t\t\tt.Fatalf(\"syslog target is removed from the policy. so, should not receive any more messages on this channel \"+\n\t\t\t\t\t\t\"but received: %v\", msg)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Infof(\"receiving pending messages from syslog server: %v\", msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// we should stop seeing messages on the old channel `receivedMsgsAtTCPServer1`\n\t// and start seeing messages on the new channel `receivedMsgsAtUDPServer2`\n\tmessages = map[chan string][]struct {\n\t\tSubstrs []string // syslog message should contain all these strings\n\t\tMsgFormat monitoring.MonitoringExportFormat // BSD style message contains the JSON formatted alert; RFC contains <msgID, structured data, msg>\n\t}{\n\t\treceivedMsgsAtUDPServer1: { // target - 1 of event policy - 1\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election stopped %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t},\n\t\treceivedMsgsAtUDPServer2: { // target - 2 of event policy - 1\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election stopped %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy1.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t},\n\t\treceivedMsgsAtTCPServer2: { // messages belonging to event policy - 2\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) dummy election: election stopped %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t\t{\n\t\t\t\tSubstrs: []string{fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name()),\n\t\t\t\t\teventPolicy2.Spec.GetSyslogConfig().GetPrefix()},\n\t\t\t\tMsgFormat: monitoring.MonitoringExportFormat_SYSLOG_RFC5424,\n\t\t\t},\n\t\t},\n\t}\n\ttestSyslogMessageDelivery(t, ti, dummyObjRef, messages)\n\n\tclose(closeMsgCh)\n\n\t// to avoid - panic: send on closed channel\n\t// this happens because the test completes before the dispatcher or evtsproxy is done sending all events.\n\t// test has completed prior because we check for occurrence of each message but it is possible that the sender is\n\t// sending in intervals which means there're are events coming in every batch interval. So, the TCP/UDP server gets\n\t// closed while the dispatcher is trying to send events to it. Stopping all the evtsproxy related services before\n\t// shutting down the TCP/UDP serer will solve the problem (there won't be anyone trying to send events anymore).\n\tti.evtProxyServices.Stop()\n}", "func (m *MockMeshWorkloadEventWatcher) AddEventHandler(arg0 context.Context, arg1 controller.MeshWorkloadEventHandler, arg2 ...predicate.Predicate) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AddEventHandler\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (t *tInfo) setup(tst *testing.T) error {\n\tvar err error\n\tlogConfig := log.GetDefaultConfig(\"events_test\")\n\tlogConfig.Format = log.JSONFmt\n\tlogConfig.Filter = log.AllowInfoFilter\n\n\tt.logger = log.GetNewLogger(logConfig).WithContext(\"t_name\", tst.Name())\n\tt.logger.Infof(\"Starting test %s\", tst.Name())\n\tt.mockResolver = mockresolver.New()\n\tt.testName = tst.Name()\n\n\t// We need a fairly high limit because all clients are collapsed into a single process\n\t// so they hit the same rate limiter\n\trpckit.SetDefaultListenerConnectionRateLimit(50)\n\n\t// start certificate server\n\terr = testutils.SetupIntegTLSProvider()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error setting up TLS provider: %v\", err)\n\t}\n\n\tt.signer, _, t.trustRoots, err = testutils.GetCAKit()\n\tif err != nil {\n\t\tt.logger.Errorf(\"Error getting CA artifacts: %v\", err)\n\t\treturn err\n\t}\n\n\tif t.dedupInterval == 0 {\n\t\tt.dedupInterval = 10 * time.Second\n\t}\n\n\tif t.batchInterval == 0 {\n\t\tt.batchInterval = 100 * time.Millisecond\n\t}\n\n\tif t.storeConfig == nil {\n\t\tt.storeConfig = &events.StoreConfig{}\n\t}\n\n\tt.recorders = &recorders{}\n\n\t// start elasticsearch\n\tif err = t.startElasticsearch(); err != nil {\n\t\tt.logger.Errorf(\"failed to start elasticsearch, err: %v\", err)\n\t\treturn err\n\t}\n\n\t// create elasticsearch client\n\tif err = t.createElasticClient(); err != nil {\n\t\tt.logger.Errorf(\"failed to create elasticsearch client, err: %v\", err)\n\t\treturn err\n\t}\n\n\t// start API server\n\tif err = t.startAPIServer(tst); err != nil {\n\t\tt.logger.Errorf(\"failed to start API server, err: %v\", err)\n\t\treturn err\n\t}\n\n\t// start mock citadel query server\n\tmockCitadelQueryServer, mockCitadelQueryServerURL, err := testutils.StartMockCitadelQueryServer(tst)\n\tif err != nil {\n\t\tt.logger.Errorf(\"failed to start mock citadel query server, err: %v\", err)\n\t\treturn err\n\t}\n\tt.mockCitadelQueryServer = mockCitadelQueryServer\n\tt.updateResolver(globals.Citadel, mockCitadelQueryServerURL)\n\n\t// start events manager\n\tevtsMgr, evtsMgrURL, err := testutils.StartEvtsMgr(testURL, t.mockResolver, t.logger, t.esClient, nil)\n\tif err != nil {\n\t\tt.logger.Errorf(\"failed to start events manager, err: %v\", err)\n\t\treturn err\n\t}\n\tt.evtsMgr = evtsMgr\n\tt.updateResolver(globals.EvtsMgr, evtsMgrURL)\n\n\t// start events proxy\n\tevtProxyServices, evtsProxyURL, storeConfig, err := testutils.StartEvtsProxy(tst.Name(), testURL, t.mockResolver, t.logger, t.dedupInterval, t.batchInterval, t.storeConfig)\n\tif err != nil {\n\t\tt.logger.Errorf(\"failed to start events proxy, err: %v\", err)\n\t\treturn err\n\t}\n\tt.evtProxyServices = evtProxyServices\n\tt.storeConfig = storeConfig\n\tt.updateResolver(globals.EvtsProxy, evtsProxyURL)\n\n\treturn nil\n}", "func TestPerfTransformer_Transform(t *testing.T) {\r\n\ttestJSONEvents := []*JSONEvent{{EventName: \"EVENT1\"}, {EventName: \"EVENT2\"}}\r\n\treaderMock := &MockReader{}\r\n\tmatcherMock := &MockMatcher{}\r\n\ttransformer, _, utilsMock, parserMock := newTransformerForTest()\r\n\terrMock := errors.New(\"mock error\")\r\n\r\n\tt.Run(\"Return error if reader or matcher is nil\", func(t *testing.T) {\r\n\t\t_, err := transformer.Transform(nil, matcherMock)\r\n\t\trequire.Error(t, err)\r\n\r\n\t\t_, err = transformer.Transform(readerMock, nil)\r\n\t\trequire.Error(t, err)\r\n\r\n\t\t_, err = transformer.Transform(nil, nil)\r\n\t\trequire.Error(t, err)\r\n\t})\r\n\tt.Run(\"Return error if reader failed to read the events\", func(t *testing.T) {\r\n\t\treaderMock.On(\"Read\").Return(nil, errMock).Once()\r\n\t\t_, err := transformer.Transform(readerMock, matcherMock)\r\n\t\trequire.Error(t, err)\r\n\t\treaderMock.AssertExpectations(t)\r\n\t})\r\n\tt.Run(\"Return error if reader returned no events\", func(t *testing.T) {\r\n\t\treaderMock.On(\"Read\").Return(nil, nil).Once()\r\n\t\t_, err := transformer.Transform(readerMock, matcherMock)\r\n\t\trequire.Error(t, err)\r\n\t\treaderMock.AssertExpectations(t)\r\n\t})\r\n\tt.Run(\"Ignore nil event from reader\", func(t *testing.T) {\r\n\t\tjsonNilEvent := []*JSONEvent{nil}\r\n\t\treaderMock.On(\"Read\").Return(jsonNilEvent, nil).Once()\r\n\t\tevents, err := transformer.Transform(readerMock, matcherMock)\r\n\t\trequire.NoError(t, err)\r\n\t\treaderMock.AssertExpectations(t)\r\n\t\trequire.Equal(t, 0, len(events))\r\n\t})\r\n\tt.Run(\"Return error if matcher failed\", func(t *testing.T) {\r\n\t\treaderMock.On(\"Read\").Return(testJSONEvents, nil).Once()\r\n\t\tmatcherMock.On(\"Match\", testJSONEvents[0]).Return(false, errMock).Once()\r\n\t\t_, err := transformer.Transform(readerMock, matcherMock)\r\n\t\trequire.Error(t, err)\r\n\t\treaderMock.AssertExpectations(t)\r\n\t\tmatcherMock.AssertExpectations(t)\r\n\t})\r\n\tt.Run(\"Return error if parse JSON function failed\", func(t *testing.T) {\r\n\t\terrMock1 := errors.New(\"mock error 1\")\r\n\t\terrMock2 := errors.New(\"mock error 2\")\r\n\t\texpectedErrMsg := fmt.Sprintf(\"failed to parse json event `%s`: %s, failed to parse json event `%s`: %s\",\r\n\t\t\ttestJSONEvents[0].EventName, errMock1.Error(), testJSONEvents[1].EventName, errMock2.Error())\r\n\r\n\t\treaderMock.On(\"Read\").Return(testJSONEvents, nil).Once()\r\n\t\tmatcherMock.On(\"Match\", testJSONEvents[0]).Return(true, nil).Once()\r\n\t\tparserMock.On(\"Execute\", testJSONEvents[0]).Return(nil, errMock1).Once()\r\n\t\tmatcherMock.On(\"Match\", testJSONEvents[1]).Return(true, nil).Once()\r\n\t\tparserMock.On(\"Execute\", testJSONEvents[1]).Return(nil, errMock2).Once()\r\n\r\n\t\tperfEvents, err := transformer.Transform(readerMock, matcherMock)\r\n\t\trequire.Error(t, err)\r\n\t\trequire.Nil(t, perfEvents)\r\n\t\trequire.IsType(t, &TransformationError{}, err)\r\n\t\trequire.Equal(t, expectedErrMsg, err.Error())\r\n\t\treaderMock.AssertExpectations(t)\r\n\t\tmatcherMock.AssertExpectations(t)\r\n\t\tparserMock.AssertExpectations(t)\r\n\t})\r\n\tt.Run(\"Return error if failed to resolve event\", func(t *testing.T) {\r\n\t\ttestDefinition1 := &eventDefinition{Name: \"EVENT1\"}\r\n\t\ttestDefinition2 := &eventDefinition{Name: \"EVENT2\"}\r\n\r\n\t\texpectedErrMsg := fmt.Sprintf(\"failed to transform event `%s`: %s, failed to transform event `%s`: %s\",\r\n\t\t\ttestDefinition1.Name, errMock.Error(), testDefinition2.Name, errMock.Error())\r\n\r\n\t\treaderMock.On(\"Read\").Return(testJSONEvents, nil).Once()\r\n\t\tmatcherMock.On(\"Match\", testJSONEvents[0]).Return(true, nil).Once().\r\n\t\t\tOn(\"Match\", testJSONEvents[1]).Return(true, nil).Once()\r\n\r\n\t\tparserMock.On(\"Execute\", testJSONEvents[0]).Return(testDefinition1, nil).Once().\r\n\t\t\tOn(\"Execute\", testJSONEvents[1]).Return(testDefinition2, nil).Once()\r\n\r\n\t\tutilsMock.On(\"getPMUType\", mock.Anything).Return(uint32(2), nil).Twice().\r\n\t\t\tOn(\"isPMUUncore\", mock.Anything).Return(false, errMock).Twice()\r\n\r\n\t\tperfEvents, err := transformer.Transform(readerMock, matcherMock)\r\n\t\trequire.Error(t, err)\r\n\t\trequire.Nil(t, perfEvents)\r\n\t\trequire.IsType(t, &TransformationError{}, err)\r\n\t\trequire.Equal(t, expectedErrMsg, err.Error())\r\n\r\n\t\treaderMock.AssertExpectations(t)\r\n\t\tmatcherMock.AssertExpectations(t)\r\n\t\tparserMock.AssertExpectations(t)\r\n\t\tutilsMock.AssertExpectations(t)\r\n\t})\r\n\tt.Run(\"Return resolved perf events and proper error message with those unresolved\", func(t *testing.T) {\r\n\t\terrMock1, errMock2 := errors.New(\"mock error 1\"), errors.New(\"mock error 2\")\r\n\t\ttestJSONEvents := []*JSONEvent{{EventName: \"EVENT1\"}, {EventName: \"EVENT2\"}, {EventName: \"EVENT3\"}}\r\n\t\ttestDefinition1, testDefinition2 := &eventDefinition{Name: \"EVENT2\"}, &eventDefinition{Name: \"EVENT3\"}\r\n\r\n\t\texpectedErrMsg := fmt.Sprintf(\"failed to parse json event `%s`: %s, failed to transform event `%s`: %s\",\r\n\t\t\ttestJSONEvents[0].EventName, errMock1.Error(), testJSONEvents[1].EventName, errMock2.Error())\r\n\r\n\t\treaderMock.On(\"Read\").Return(testJSONEvents, nil).Once()\r\n\r\n\t\t// mock event 1 - parse JSON function failed\r\n\t\tmatcherMock.On(\"Match\", testJSONEvents[0]).Return(true, nil).Once()\r\n\t\tparserMock.On(\"Execute\", testJSONEvents[0]).Return(nil, errMock1).Once()\r\n\r\n\t\t// mock event 2 - failed to resolve event\r\n\t\tmatcherMock.On(\"Match\", testJSONEvents[1]).Return(true, nil).Once()\r\n\t\tparserMock.On(\"Execute\", testJSONEvents[1]).Return(testDefinition1, nil).Once()\r\n\t\tutilsMock.On(\"getPMUType\", mock.Anything).Return(uint32(2), nil).Once().\r\n\t\t\tOn(\"isPMUUncore\", mock.Anything).Return(false, errMock2).Once()\r\n\r\n\t\t// mock event 3 - successful transformation\r\n\t\tmatcherMock.On(\"Match\", testJSONEvents[2]).Return(true, nil).Once()\r\n\t\tparserMock.On(\"Execute\", testJSONEvents[2]).Return(testDefinition2, nil).Once()\r\n\t\tutilsMock.On(\"getPMUType\", mock.Anything).Return(uint32(2), nil).Once().\r\n\t\t\tOn(\"isPMUUncore\", mock.Anything).Return(false, nil).Once().\r\n\t\t\tOn(\"parseTerms\", mock.Anything, mock.Anything).Return(nil).Once()\r\n\r\n\t\tperfEvents, err := transformer.Transform(readerMock, matcherMock)\r\n\t\trequire.Error(t, err)\r\n\t\trequire.NotNil(t, perfEvents)\r\n\t\trequire.Equal(t, testJSONEvents[2].EventName, perfEvents[0].Name)\r\n\t\trequire.IsType(t, &TransformationError{}, err)\r\n\t\trequire.Equal(t, expectedErrMsg, err.Error())\r\n\r\n\t\treaderMock.AssertExpectations(t)\r\n\t\tmatcherMock.AssertExpectations(t)\r\n\t\tparserMock.AssertExpectations(t)\r\n\t\tutilsMock.AssertExpectations(t)\r\n\t})\r\n\tt.Run(\"Return resolved perf events for events matched by matcher\", func(t *testing.T) {\r\n\t\ttestDefinition := &eventDefinition{Name: \"EVENT2\"}\r\n\t\treaderMock.On(\"Read\").Return(testJSONEvents, nil).Once()\r\n\t\tmatcherMock.On(\"Match\", testJSONEvents[0]).Return(false, nil).Once().\r\n\t\t\tOn(\"Match\", testJSONEvents[1]).Return(true, nil).Once()\r\n\t\tparserMock.On(\"Execute\", testJSONEvents[1]).Return(testDefinition, nil).Once()\r\n\t\tutilsMock.On(\"getPMUType\", mock.Anything).Return(uint32(2), nil).Once().\r\n\t\t\tOn(\"isPMUUncore\", mock.Anything).Return(false, nil).Once().\r\n\t\t\tOn(\"parseTerms\", mock.Anything, mock.Anything).Return(nil).Once()\r\n\t\tevents, err := transformer.Transform(readerMock, matcherMock)\r\n\t\trequire.NoError(t, err)\r\n\t\treaderMock.AssertExpectations(t)\r\n\t\tmatcherMock.AssertExpectations(t)\r\n\t\tparserMock.AssertExpectations(t)\r\n\t\tutilsMock.AssertExpectations(t)\r\n\t\trequire.Equal(t, 1, len(events))\r\n\t\trequire.Equal(t, testDefinition.Name, events[0].Name)\r\n\t})\r\n}", "func (s) TestResolverWatchCallbackAfterClose(t *testing.T) {\n\t// Setup the management server that synchronizes with the test goroutine\n\t// using two channels. The management server signals the test goroutine when\n\t// it receives a discovery request for a route configuration resource. And\n\t// the test goroutine signals the management server when the resolver is\n\t// closed.\n\twaitForRouteConfigDiscoveryReqCh := make(chan struct{}, 1)\n\twaitForResolverCloseCh := make(chan struct{})\n\tmgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{\n\t\tOnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error {\n\t\t\tif req.GetTypeUrl() == version.V3RouteConfigURL {\n\t\t\t\tselect {\n\t\t\t\tcase waitForRouteConfigDiscoveryReqCh <- struct{}{}:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\t<-waitForResolverCloseCh\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to start xDS management server: %v\", err)\n\t}\n\tdefer mgmtServer.Stop()\n\n\t// Create a bootstrap configuration specifying the above management server.\n\tnodeID := uuid.New().String()\n\tcleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{\n\t\tNodeID: nodeID,\n\t\tServerURI: mgmtServer.Address,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cleanup()\n\n\t// Configure listener and route configuration resources on the management\n\t// server.\n\tconst serviceName = \"my-service-client-side-xds\"\n\trdsName := \"route-\" + serviceName\n\tcdsName := \"cluster-\" + serviceName\n\tresources := e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)},\n\t\tRoutes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, serviceName, cdsName)},\n\t\tSkipValidation: true,\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL(\"xds:///\" + serviceName)})\n\tdefer rClose()\n\n\t// Wait for a discovery request for a route configuration resource.\n\tselect {\n\tcase <-waitForRouteConfigDiscoveryReqCh:\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timeout when waiting for a discovery request for a route configuration resource\")\n\t}\n\n\t// Close the resolver and unblock the management server.\n\trClose()\n\tclose(waitForResolverCloseCh)\n\n\t// Verify that the update from the management server is not propagated to\n\t// the ClientConn. The xDS resolver, once closed, is expected to drop\n\t// updates from the xDS client.\n\tsCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)\n\tdefer sCancel()\n\tif _, err := tcc.stateCh.Receive(sCtx); err != context.DeadlineExceeded {\n\t\tt.Fatalf(\"ClientConn received an update from the resolver that was closed: %v\", err)\n\t}\n}", "func TestMainImplForSyncSingleMount(t *testing.T) {\n\t// ---- Data setup ----\n\tnoOfMounts := 1\n\ttestMounts := make([]s3Mount, noOfMounts)\n\ttestMountId := \"TestMainImplForSyncSingleMount\"\n\tnoOfFilesInMount := 5\n\ttestMounts[0] = *putReadOnlyTestMountFiles(t, testFakeBucketName, testMountId, noOfFilesInMount)\n\ttestMountsJsonBytes, err := json.Marshal(testMounts)\n\ttestMountsJson := string(testMountsJsonBytes)\n\n\tif err != nil {\n\t\t// Fail test in case of any errors\n\t\tt.Logf(\"Error creating test mount setup data %s\", err)\n\t}\n\n\t// ---- Inputs ----\n\tconcurrency := 5\n\trecurringDownloads := true\n\tstopRecurringDownloadsAfter := 5\n\tdownloadInterval := 1\n\n\tfmt.Printf(\"Input: \\n\\n%s\\n\\n\", testMountsJson)\n\n\tvar wg sync.WaitGroup\n\n\t// Trigger recurring download in a separate thread and increment the wait group counter\n\twg.Add(1)\n\tgo func() {\n\t\t// ---- Run code under test ----\n\t\terr = mainImpl(testAwsSession, debug, recurringDownloads, stopRecurringDownloadsAfter, downloadInterval, -1, concurrency, testMountsJson, destinationBase, testRegion)\n\t\tif err != nil {\n\t\t\t// Fail test in case of any errors\n\t\t\tt.Logf(\"Error running the main s3-synchronizer with testMountsJson %s\", testMountsJson)\n\t\t\tt.Errorf(\"Error: %v\", err)\n\t\t}\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\t// In a separate thread add few more files to the mount point and verify that they get downloaded\n\t// by the recurring downloader thread after the dow\n\twg.Add(1)\n\tgo func() {\n\t\t// TEST FOR ADD -- NEW UPLOAD TO S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// ------------------------------------------------------------\n\n\t\t// Upload same number of files in the mount again (i.e., double the noOfFilesInMount)\n\t\ttestMounts[0] = *putReadOnlyTestMountFiles(t, testFakeBucketName, testMountId, 2*noOfFilesInMount)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow for\n\t\t// uploaded files to get downloaded\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the newly uploaded files are automatically downloaded after the download interval\n\t\tassertFilesDownloaded(t, testMountId, 2*noOfFilesInMount)\n\n\t\t// TEST FOR UPDATE -- UPLOAD TO EXISTING FILES IN S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// -----------------------------------------------------------------------------\n\n\t\t// Update the files in S3\n\t\tupdateTestMountFiles(t, testFakeBucketName, testMountId, noOfFilesInMount)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow for\n\t\t// uploaded files to get downloaded\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the updated files are automatically downloaded after the download interval\n\t\tassertUpdatedFilesDownloaded(t, testMountId, noOfFilesInMount)\n\n\t\t// TEST FOR DELETE -- DELETE FROM S3 --> LOCAL FILE SYSTEM SYNC\n\t\t// ------------------------------------------------------------\n\n\t\tfileIdxToDelete := noOfFilesInMount + 1\n\t\t// Delete some files from S3 and make sure they automatically get deleted from local file system\n\t\tdeleteTestMountFile(t, testFakeBucketName, testMountId, fileIdxToDelete)\n\n\t\t// Sleep for the download interval duration plus some more buffer time to allow sync to happen\n\t\ttime.Sleep(time.Duration(2*downloadInterval) * time.Second)\n\n\t\t// ---- Assertions ----\n\t\t// Verify that the file deleted from S3 are automatically deleted after the download interval\n\t\tassertFileDeleted(t, testMountId, fileIdxToDelete)\n\n\t\t// Decrement wait group counter to allow this test case to exit\n\t\twg.Done()\n\t}()\n\n\twg.Wait() // Wait until all spawned go routines complete before existing the test case\n}", "func (s *BasicTestSuite) TestVolumeLifecycle(c *C) {\n\tmisc.LogTestStart(c.TestName())\n\n\tfor _, host := range s.config.DockerHosts {\n\t\tout, err := dockercli.CreateVolume(host, s.volName1)\n\t\tc.Assert(err, IsNil, Commentf(out))\n\n\t\taccessible := verification.CheckVolumeAvailability(host, s.volName1)\n\t\tc.Assert(accessible, Equals, true, Commentf(\"Volume %s is not available\", s.volName1))\n\n\t\tout, err = dockercli.AttachVolume(host, s.volName1, s.containerName)\n\t\tc.Assert(err, IsNil, Commentf(out))\n\n\t\tstatus := verification.VerifyAttachedStatus(s.volName1, host, s.esx)\n\t\tc.Assert(status, Equals, true, Commentf(\"Volume %s is not attached\", s.volName1))\n\n\t\tout, err = dockercli.DeleteVolume(host, s.volName1)\n\t\tc.Assert(err, Not(IsNil), Commentf(out))\n\n\t\tout, err = dockercli.RemoveContainer(host, s.containerName)\n\t\tc.Assert(err, IsNil, Commentf(out))\n\n\t\tstatus = verification.VerifyDetachedStatus(s.volName1, host, s.esx)\n\t\tc.Assert(status, Equals, true, Commentf(\"Volume %s is still attached\", s.volName1))\n\n\t\tout, err = dockercli.DeleteVolume(host, s.volName1)\n\t\tc.Assert(err, IsNil, Commentf(out))\n\n\t\taccessible = verification.CheckVolumeAvailability(host, s.volName1)\n\t\tc.Assert(accessible, Equals, false, Commentf(\"Volume %s is still available\", s.volName1))\n\t}\n\n\tmisc.LogTestEnd(c.TestName())\n}", "func (_m *MockOptions) SegmentReaderPool() xio.SegmentReaderPool {\n\tret := _m.ctrl.Call(_m, \"SegmentReaderPool\")\n\tret0, _ := ret[0].(xio.SegmentReaderPool)\n\treturn ret0\n}", "func TestForkingAttack(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\ths := New()\n\tkeys := testutil.GenerateKeys(t, n, testutil.GenerateECDSAKey)\n\tbl := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tcfg, replicas := testutil.CreateMockConfigWithReplicas(t, ctrl, n, keys...)\n\texecutor := mocks.NewMockExecutor(ctrl)\n\tsynchronizer := synchronizer.New(testutil.FixedTimeout(1000))\n\tbl[0].Register(hs, cfg, executor, synchronizer, leaderrotation.NewFixed(2))\n\thl := bl.Build()\n\tsigners := hl.Signers()\n\n\t// configure mocks\n\treplicas[1].EXPECT().Vote(gomock.Any()).AnyTimes()\n\treplicas[1].EXPECT().NewView(gomock.Any()).AnyTimes()\n\n\tgenesisQC := hotstuff.NewQuorumCert(nil, 0, hotstuff.GetGenesis().Hash())\n\ta := testutil.NewProposeMsg(hotstuff.GetGenesis().Hash(), genesisQC, \"A\", 1, 2)\n\taQC := testutil.CreateQC(t, a.Block, signers)\n\tb := testutil.NewProposeMsg(a.Block.Hash(), aQC, \"B\", 2, 2)\n\tbQC := testutil.CreateQC(t, b.Block, signers)\n\tc := testutil.NewProposeMsg(b.Block.Hash(), bQC, \"C\", 3, 2)\n\tcQC := testutil.CreateQC(t, c.Block, signers)\n\td := testutil.NewProposeMsg(c.Block.Hash(), cQC, \"D\", 4, 2)\n\te := testutil.NewProposeMsg(b.Block.Hash(), aQC, \"E\", 5, 2)\n\n\t// expected order of execution\n\tgomock.InOrder(\n\t\texecutor.EXPECT().Exec(a.Block.Command()),\n\t\texecutor.EXPECT().Exec(b.Block.Command()),\n\t\texecutor.EXPECT().Exec(e.Block.Command()),\n\t)\n\n\ths.OnPropose(a)\n\ths.OnPropose(b)\n\ths.OnPropose(c)\n\ths.OnPropose(d)\n\n\t// sanity check\n\tif hs.bLock != b.Block {\n\t\tt.Fatalf(\"Not locked on B!\")\n\t}\n\n\ths.OnPropose(e)\n\n\t// advance views until E is executed\n\tblock := advanceView(t, hs, e.Block, signers)\n\tblock = advanceView(t, hs, block, signers)\n\t_ = advanceView(t, hs, block, signers)\n}", "func TestMetadataLeases(t *testing.T) {\n\tiface1, iface2, teardown := PrepareTwoClients(t)\n\tdefer teardown()\n\n\tassert.Equal(t, TestingLeaseTimeout, iface1.GetMetadataLeaseTimeout())\n\tassert.Equal(t, TestingLeaseTimeout, iface2.GetMetadataLeaseTimeout())\n\n\tattemptClaims := func(server apis.EtcdInterface, id apis.MetadataID, expected apis.EtcdInterface) {\n\t\towner, err := server.TryClaimingMetadata(id)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, expected.GetName(), owner)\n\t}\n\n\tattemptClaimsDual := func(first apis.EtcdInterface, id apis.MetadataID, expected apis.EtcdInterface) {\n\t\tattemptClaims(first, id, expected)\n\t\tif first == iface1 {\n\t\t\tattemptClaims(iface2, id, expected)\n\t\t} else if first == iface2 {\n\t\t\tattemptClaims(iface1, id, expected)\n\t\t} else {\n\t\t\tpanic(\"test incorrectly written\")\n\t\t}\n\t}\n\n\tassert.Error(t, iface1.RenewMetadataClaims())\n\tassert.Error(t, iface2.RenewMetadataClaims())\n\n\t_, err := iface1.TryClaimingMetadata(3)\n\tassert.Error(t, err)\n\t_, err = iface2.TryClaimingMetadata(3)\n\tassert.Error(t, err)\n\n\tassert.NoError(t, iface1.BeginMetadataLease())\n\tassert.NoError(t, iface2.BeginMetadataLease())\n\n\tassert.Error(t, iface1.BeginMetadataLease())\n\tassert.Error(t, iface2.BeginMetadataLease())\n\n\tassert.NoError(t, iface1.RenewMetadataClaims())\n\tassert.NoError(t, iface2.RenewMetadataClaims())\n\n\tattemptClaimsDual(iface2, 3, iface2)\n\tattemptClaimsDual(iface1, 5, iface1)\n\n\tassert.NoError(t, iface1.RenewMetadataClaims())\n\tassert.NoError(t, iface2.RenewMetadataClaims())\n\n\tassert.Error(t, iface1.DisclaimMetadata(3))\n\tattemptClaimsDual(iface1, 3, iface2)\n\n\tassert.NoError(t, iface2.DisclaimMetadata(3))\n\tattemptClaimsDual(iface1, 3, iface1)\n\n\tassert.NoError(t, iface2.RenewMetadataClaims())\n\ttime.Sleep(TestingLeaseTimeout / 2)\n\tassert.NoError(t, iface2.RenewMetadataClaims())\n\tattemptClaims(iface2, 3, iface1)\n\ttime.Sleep(TestingLeaseTimeout / 2)\n\tassert.NoError(t, iface2.RenewMetadataClaims())\n\ttime.Sleep(TestingLeaseTimeout / 2)\n\tassert.NoError(t, iface2.RenewMetadataClaims())\n\tattemptClaims(iface2, 3, iface2)\n\t_, err = iface1.TryClaimingMetadata(77)\n\tassert.Error(t, err)\n\towner, err := iface2.TryClaimingMetadata(77)\n\tassert.NoError(t, err)\n\tassert.Equal(t, iface2.GetName(), owner)\n\t_, err = iface1.TryClaimingMetadata(3)\n\tassert.Error(t, err)\n\tassert.Error(t, iface1.RenewMetadataClaims())\n\n\t_, err = iface1.TryClaimingMetadata(6)\n\tassert.Error(t, err)\n\tassert.NoError(t, iface1.BeginMetadataLease())\n\tassert.NoError(t, iface1.RenewMetadataClaims())\n\tassert.NoError(t, iface2.RenewMetadataClaims())\n\n\tattemptClaims(iface1, 3, iface2)\n\tattemptClaimsDual(iface2, 6, iface2)\n\tattemptClaimsDual(iface1, 7, iface1)\n}", "func checkEvents(t *testing.T, ctx context.Context, expectedEvents []string, ctrl *PersistentVolumeController) error {\n\tvar err error\n\n\t// Read recorded events - wait up to 1 minute to get all the expected ones\n\t// (just in case some goroutines are slower with writing)\n\ttimer := time.NewTimer(time.Minute)\n\tdefer timer.Stop()\n\tlogger := klog.FromContext(ctx)\n\tfakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder)\n\tgotEvents := []string{}\n\tfinished := false\n\tfor len(gotEvents) < len(expectedEvents) && !finished {\n\t\tselect {\n\t\tcase event, ok := <-fakeRecorder.Events:\n\t\t\tif ok {\n\t\t\t\tlogger.V(5).Info(\"Event recorder got event\", \"event\", event)\n\t\t\t\tgotEvents = append(gotEvents, event)\n\t\t\t} else {\n\t\t\t\tlogger.V(5).Info(\"Event recorder finished\")\n\t\t\t\tfinished = true\n\t\t\t}\n\t\tcase _, _ = <-timer.C:\n\t\t\tlogger.V(5).Info(\"Event recorder timeout\")\n\t\t\tfinished = true\n\t\t}\n\t}\n\n\t// Evaluate the events\n\tfor i, expected := range expectedEvents {\n\t\tif len(gotEvents) <= i {\n\t\t\tt.Errorf(\"Event %q not emitted\", expected)\n\t\t\terr = fmt.Errorf(\"events do not match\")\n\t\t\tcontinue\n\t\t}\n\t\treceived := gotEvents[i]\n\t\tif !strings.HasPrefix(received, expected) {\n\t\t\tt.Errorf(\"Unexpected event received, expected %q, got %q\", expected, received)\n\t\t\terr = fmt.Errorf(\"events do not match\")\n\t\t}\n\t}\n\tfor i := len(expectedEvents); i < len(gotEvents); i++ {\n\t\tt.Errorf(\"Unexpected event received: %q\", gotEvents[i])\n\t\terr = fmt.Errorf(\"events do not match\")\n\t}\n\treturn err\n}", "func setupMultipleRanges(t *testing.T, splitAt ...string) (*server.TestServer, *client.DB) {\n\ts := server.StartTestServer(t)\n\tdb := createTestClient(t, s.Stopper(), s.ServingAddr())\n\n\t// Split the keyspace at the given keys.\n\tfor _, key := range splitAt {\n\t\tif err := db.AdminSplit(key); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\treturn s, db\n}", "func TestChainedHotstuff(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\n\tbaseCfg := config.NewConfig(0, nil, nil)\n\n\tlisteners := make([]net.Listener, n)\n\tkeys := make([]hotstuff.PrivateKey, n)\n\tfor i := 0; i < n; i++ {\n\t\tlisteners[i] = testutil.CreateTCPListener(t)\n\t\tkey := testutil.GenerateECDSAKey(t)\n\t\tkeys[i] = key\n\t\tid := hotstuff.ID(i + 1)\n\t\tbaseCfg.Replicas[id] = &config.ReplicaInfo{\n\t\t\tID: id,\n\t\t\tAddress: listeners[i].Addr().String(),\n\t\t\tPubKey: key.Public(),\n\t\t}\n\t}\n\n\tbuilders := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tconfigs := make([]*gorums.Config, n)\n\tservers := make([]*gorums.Server, n)\n\tsynchronizers := make([]hotstuff.ViewSynchronizer, n)\n\tfor i := 0; i < n; i++ {\n\t\tc := *baseCfg\n\t\tc.ID = hotstuff.ID(i + 1)\n\t\tc.PrivateKey = keys[i].(*ecdsa.PrivateKey)\n\t\tconfigs[i] = gorums.NewConfig(c)\n\t\tservers[i] = gorums.NewServer(c)\n\t\tsynchronizers[i] = synchronizer.New(\n\t\t\tsynchronizer.NewViewDuration(1000, 100, 2),\n\t\t)\n\t\tbuilders[i].Register(New(), configs[i], servers[i], synchronizers[i])\n\t}\n\n\texecutors := make([]*mocks.MockExecutor, n)\n\tcounters := make([]uint, n)\n\tc := make(chan struct{}, n)\n\terrChan := make(chan error, n)\n\tfor i := 0; i < n; i++ {\n\t\tcounter := &counters[i]\n\t\texecutors[i] = mocks.NewMockExecutor(ctrl)\n\t\texecutors[i].EXPECT().Exec(gomock.Any()).AnyTimes().Do(func(arg hotstuff.Command) {\n\t\t\tif arg != hotstuff.Command(\"foo\") {\n\t\t\t\terrChan <- fmt.Errorf(\"unknown command executed: got %s, want: %s\", arg, \"foo\")\n\t\t\t}\n\t\t\t*counter++\n\t\t\tif *counter >= 100 {\n\t\t\t\tc <- struct{}{}\n\t\t\t}\n\t\t})\n\t\tbuilders[i].Register(executors[i])\n\t}\n\n\thl := builders.Build()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tfor i, server := range servers {\n\t\tserver.StartOnListener(listeners[i])\n\t\tdefer server.Stop()\n\t}\n\n\tfor _, cfg := range configs {\n\t\terr := cfg.Connect(time.Second)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer cfg.Close()\n\t}\n\n\tfor _, hs := range hl {\n\t\tgo hs.EventLoop().Run(ctx)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tselect {\n\t\tcase <-c:\n\t\t\tdefer synchronizers[i].Stop()\n\t\tcase err := <-errChan:\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\tcancel()\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenTrackFound() {\n\n}", "func TestMultipleFeed(t *testing.T) {\n\tvar (\n\t\tnumSubs = 10\n\t\tintEvents = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}\n\t\tstringEvents = []interface{}{\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"}\n\t\tintSubs = make([]*testSubscriber, 0, numSubs)\n\t\tstringSubs = make([]*testSubscriber, 0, numSubs)\n\t\tstopper = NewStopper()\n\t)\n\n\tintFeed := &Feed{}\n\tstringFeed := &Feed{}\n\n\tfor i := 0; i < numSubs; i++ {\n\t\t// int subscriber\n\t\tts := newSubscriber(intFeed)\n\t\tintSubs = append(intSubs, ts)\n\t\tstopper.RunWorker(ts.readAll)\n\n\t\t// string subscriber\n\t\tts = newSubscriber(stringFeed)\n\t\tstringSubs = append(stringSubs, ts)\n\t\tstopper.RunWorker(ts.readAll)\n\t}\n\n\tfor i := 0; i < len(intEvents); i++ {\n\t\tintFeed.Publish(intEvents[i])\n\t\tstringFeed.Publish(stringEvents[i])\n\t}\n\tintFeed.Close()\n\tstringFeed.Close()\n\tstopper.Stop()\n\n\t// Wait for stopper to finish, meaning all publishers have ceased.\n\tselect {\n\tcase <-stopper.IsStopped():\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatalf(\"stopper failed to complete after 5 seconds.\")\n\t}\n\n\tfor i, ts := range intSubs {\n\t\tif a, e := ts.received, intEvents; !reflect.DeepEqual(a, e) {\n\t\t\tt.Errorf(\"int subscriber %d received incorrect events %v, expected %v\", i, a, e)\n\t\t}\n\t}\n\tfor i, ts := range stringSubs {\n\t\tif a, e := ts.received, stringEvents; !reflect.DeepEqual(a, e) {\n\t\t\tt.Errorf(\"int subscriber %d received incorrect events %v, expected %v\", i, a, e)\n\t\t}\n\t}\n}" ]
[ "0.63405836", "0.61344045", "0.61050683", "0.5736686", "0.53665143", "0.5357817", "0.5325795", "0.5279432", "0.5270551", "0.5219627", "0.51835257", "0.5117441", "0.5113798", "0.50922114", "0.50819683", "0.5076663", "0.5031426", "0.50180393", "0.5013473", "0.5013273", "0.4967515", "0.49560344", "0.49501336", "0.49408898", "0.48686376", "0.48667678", "0.4859813", "0.48572874", "0.48445213", "0.4837237", "0.48276937", "0.48035538", "0.479698", "0.47916567", "0.47908556", "0.4773038", "0.4757444", "0.47523627", "0.47469068", "0.4741646", "0.474103", "0.4730083", "0.47150505", "0.47133356", "0.47068644", "0.47057515", "0.47040704", "0.4702515", "0.4696023", "0.46956", "0.46894532", "0.46883792", "0.4687778", "0.4687542", "0.46873695", "0.46853924", "0.46834028", "0.46796235", "0.46758038", "0.4651906", "0.46506682", "0.46401033", "0.46345726", "0.46332586", "0.46295446", "0.46223116", "0.46163353", "0.46139112", "0.46110803", "0.4602107", "0.45968843", "0.459122", "0.45911115", "0.45797312", "0.45786694", "0.4578243", "0.45753676", "0.4571861", "0.4571473", "0.45709255", "0.4567531", "0.45661482", "0.4564657", "0.45639148", "0.45607662", "0.4560656", "0.45574775", "0.45560718", "0.45542574", "0.45494962", "0.45491567", "0.45304766", "0.45297256", "0.45286244", "0.4528203", "0.45238486", "0.4515816", "0.45119238", "0.4511556", "0.4510373" ]
0.7611378
0
EnsureMultiSegmentListeners indicates an expected call of EnsureMultiSegmentListeners
func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureMultiSegmentListeners", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mr *MockLoadBalanceMockRecorder) EnsureMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiListeners), region, lbID, listeners)\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoadBalance) EnsureMultiListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureSegmentListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureSegmentListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureSegmentListener), region, listener)\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteMultiListeners), region, lbID, listeners)\n}", "func (m *MockLoadBalance) EnsureSegmentListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureSegmentListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestMsgListenerMulti(t *testing.T) {\n\tml := newMsgListeners()\n\n\tcount := 0\n\tcids := testCids()\t// TODO: The wrong Directory type was being used for MapEntries.\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\n\t})\t// TODO: Исправления для OSX\n\tml.onMsgComplete(cids[0], func(err error) {\n\t\tcount++\t// TODO: Test emails 1\n\t})\n\tml.onMsgComplete(cids[1], func(err error) {\n\t\tcount++\n\t})\n\n\tml.fireMsgComplete(cids[0], nil)\n\trequire.Equal(t, 2, count)\n\n\tml.fireMsgComplete(cids[1], nil)\n\trequire.Equal(t, 3, count)\n}", "func (mr *MockEventLoggerMockRecorder) AppendCheckMulti(assumedVersion interface{}, events ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{assumedVersion}, events...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AppendCheckMulti\", reflect.TypeOf((*MockEventLogger)(nil).AppendCheckMulti), varargs...)\n}", "func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureListener), region, listener)\n}", "func (mr *MockEventLoggerMockRecorder) AppendMulti(events ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AppendMulti\", reflect.TypeOf((*MockEventLogger)(nil).AppendMulti), events...)\n}", "func TestSplitListenersToDiffProtocol(t *testing.T) {\n\ttestListeners := []*networkextensionv1.Listener{\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8000,\n\t\t\t\tProtocol: \"HTTP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8001,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8002,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8003,\n\t\t\t\tProtocol: \"HTTPS\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSpec: networkextensionv1.ListenerSpec{\n\t\t\t\tPort: 8004,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t},\n\t\t},\n\t}\n\tliGroup := splitListenersToDiffProtocol(testListeners)\n\tfor _, list := range liGroup {\n\t\tt.Logf(\"%+v\", list)\n\t\ttmpProtocol := make(map[string]struct{})\n\t\tfor _, li := range list {\n\t\t\ttmpProtocol[li.Spec.Protocol] = struct{}{}\n\t\t}\n\t\tif len(tmpProtocol) != 1 {\n\t\t\tt.Errorf(\"list %v contains more than one protocol %v\", list, tmpProtocol)\n\t\t}\n\t}\n}", "func (d *segmentationDescriptor) SetSubSegmentsExpected(value uint8) {\n\td.subSegsExpected = value\n}", "func (t *Convert) verifyIndividualTypeCheck() {\n\tfor _, inf := range t.Interface {\n\t\tif inf.Callback {\n\t\t\tt.verifyCallbackInterface(inf)\n\t\t}\n\t}\n}", "func (jm *JobManager) shouldTriggerListeners(t Task) bool {\n\tif typed, isTyped := t.(EventTriggerListenersProvider); isTyped {\n\t\treturn typed.ShouldTriggerListeners()\n\t}\n\n\treturn true\n}", "func TestRegisteringMultipleAccessHandlersPanics(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\tdefer func() {\n\t\t\tv := recover()\n\t\t\tif v == nil {\n\t\t\t\tt.Errorf(`expected test to panic, but nothing happened`)\n\t\t\t}\n\t\t}()\n\t\ts.Handle(\"model\",\n\t\t\tres.Access(func(r res.AccessRequest) {\n\t\t\t\tr.NotFound()\n\t\t\t}),\n\t\t\tres.Access(func(r res.AccessRequest) {\n\t\t\t\tr.NotFound()\n\t\t\t}),\n\t\t)\n\t}, nil)\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) {\n\tt.Helper()\n\n\tif container == nil {\n\t\tassert.Fail(t, \"Events container must not be nil\", msgAndArgs...)\n\n\t\treturn\n\t}\n\n\tassert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func (c BootstrapVerifyConfiguration) VerifyIndexSegmentsOrDefault() bool {\n\tif c.VerifyIndexSegments == nil {\n\t\treturn false\n\t}\n\n\treturn *c.VerifyIndexSegments\n}", "func TestMultipleRegisterCalls(t *testing.T) {\n\tRegister(\"multiple-register-driver-1\")\n\trequire.PanicsWithError(t, \"Register called twice for driver multiple-register-driver-1\", func() {\n\t\tRegister(\"multiple-register-driver-1\")\n\t})\n\n\t// Should be no error.\n\tRegister(\"multiple-register-driver-2\")\n}", "func TestValidatorSMMultiVoting(t *testing.T) {\n\n\tctx, _, mk := CreateTestInput(t, false, SufficientInitPower)\n\tclearNotBondedPool(t, ctx, mk.SupplyKeeper)\n\n\tparams := DefaultParams()\n\n\toriginVaSet := addrVals[1:]\n\tparams.MaxValidators = uint16(len(originVaSet))\n\tparams.Epoch = 2\n\tparams.UnbondingTime = time.Millisecond * 300\n\n\tstartUpValidator := NewValidator(StartUpValidatorAddr, StartUpValidatorPubkey, Description{}, types.DefaultMinSelfDelegation)\n\n\tstartUpStatus := baseValidatorStatus{startUpValidator}\n\n\tbAction := baseAction{mk}\n\n\torgValsLen := len(originVaSet)\n\tfullVaSet := make([]sdk.ValAddress, orgValsLen+1)\n\tcopy(fullVaSet, originVaSet)\n\tcopy(fullVaSet[orgValsLen:], []sdk.ValAddress{startUpStatus.getValidator().GetOperator()})\n\n\texpZeroDec := sdk.ZeroDec()\n\texpValsBondedToken := DefaultMSD.MulInt64(int64(len(fullVaSet)))\n\texpDlgGrpBondedToken := DelegatedToken1.Add(DelegatedToken2)\n\texpAllBondedToken := expValsBondedToken.Add(expDlgGrpBondedToken)\n\tstartUpCheck := andChecker{[]actResChecker{\n\t\tqueryPoolCheck(&expAllBondedToken, &expZeroDec),\n\t\tnoErrorInHandlerResult(true),\n\t}}\n\n\t// after delegator in group finish adding shares, do following check\n\taddSharesChecker := andChecker{[]actResChecker{\n\t\tvalidatorDelegatorShareIncreased(true),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tqueryDelegatorCheck(ValidDelegator2, true, fullVaSet, nil, &DelegatedToken2, &expZeroDec),\n\t\tqueryAllValidatorCheck([]sdk.BondStatus{sdk.Unbonded, sdk.Bonded, sdk.Unbonding}, []int{1, 4, 0}),\n\t\tquerySharesToCheck(startUpStatus.getValidator().OperatorAddress, 1, []sdk.AccAddress{ValidDelegator2}),\n\t\tqueryPoolCheck(&expAllBondedToken, &expZeroDec),\n\t\tnoErrorInHandlerResult(true),\n\t}}\n\n\t// All Deleagtor Unbond half of the delegation\n\texpDlgBondedTokens1 := DelegatedToken1.QuoInt64(2)\n\texpDlgUnbondedToken1 := expDlgBondedTokens1\n\texpDlgBondedTokens2 := DelegatedToken2.QuoInt64(2)\n\texpDlgUnbondedToken2 := expDlgBondedTokens2\n\texpAllUnBondedToken1 := expDlgUnbondedToken1.Add(expDlgUnbondedToken2)\n\texpAllBondedToken1 := DefaultMSD.MulInt64(int64(len(fullVaSet))).Add(expDlgBondedTokens1).Add(expDlgBondedTokens2)\n\twithdrawChecker1 := andChecker{[]actResChecker{\n\t\tvalidatorDelegatorShareIncreased(false),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tqueryDelegatorCheck(ValidDelegator1, true, originVaSet, nil, &expDlgBondedTokens1, &expDlgUnbondedToken1),\n\t\tqueryDelegatorCheck(ValidDelegator2, true, fullVaSet, nil, &expDlgBondedTokens2, &expDlgUnbondedToken2),\n\t\tqueryAllValidatorCheck([]sdk.BondStatus{sdk.Unbonded, sdk.Bonded, sdk.Unbonding}, []int{1, 4, 0}),\n\t\tquerySharesToCheck(startUpStatus.getValidator().OperatorAddress, 1, []sdk.AccAddress{ValidDelegator2}),\n\t\tqueryPoolCheck(&expAllBondedToken1, &expAllUnBondedToken1),\n\t\tqueryValidatorCheck(sdk.Unbonded, false, nil, nil, nil),\n\t}}\n\n\t// All Deleagtor Unbond the delegation left\n\texpDlgGrpUnbonded2 := expZeroDec\n\texpAllBondedToken2 := DefaultMSD.MulInt64(int64(len(fullVaSet)))\n\twithdrawChecker2 := andChecker{[]actResChecker{\n\t\t// cannot find unbonding token in GetUnbonding info\n\t\tqueryDelegatorCheck(ValidDelegator1, false, []sdk.ValAddress{}, nil, &expZeroDec, nil),\n\t\tqueryDelegatorCheck(ValidDelegator2, false, []sdk.ValAddress{}, nil, &expZeroDec, nil),\n\t\tqueryAllValidatorCheck([]sdk.BondStatus{sdk.Unbonded, sdk.Bonded, sdk.Unbonding}, []int{1, 4, 0}),\n\t\tquerySharesToCheck(startUpStatus.getValidator().OperatorAddress, 0, []sdk.AccAddress{}),\n\t\tqueryPoolCheck(&expAllBondedToken2, &expDlgGrpUnbonded2),\n\t\tqueryValidatorCheck(sdk.Unbonded, false, nil, nil, nil),\n\t}}\n\n\tinputActions := []IAction{\n\t\tcreateValidatorAction{bAction, nil},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsAddSharesAction{bAction, true, false, 0, []sdk.AccAddress{ValidDelegator1}},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsAddSharesAction{bAction, true, true, 0, []sdk.AccAddress{ValidDelegator2}},\n\t\tendBlockAction{bAction},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsWithdrawAction{bAction, true, false},\n\t\tendBlockAction{bAction},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsWithdrawAction{bAction, true, true},\n\t\twaitUntilUnbondingTimeExpired{bAction},\n\t\tendBlockAction{bAction},\n\t}\n\n\tactionsAndChecker := []actResChecker{\n\t\tstartUpCheck.GetChecker(),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tvalidatorDelegatorShareIncreased(false),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\taddSharesChecker.GetChecker(),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tnil,\n\t\twithdrawChecker1.GetChecker(),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tnil,\n\t\twithdrawChecker2.GetChecker(),\n\t}\n\n\tsmTestCase := newValidatorSMTestCase(mk, params, startUpStatus, inputActions, actionsAndChecker, t)\n\tsmTestCase.SetupValidatorSetAndDelegatorSet(int(params.MaxValidators))\n\tsmTestCase.printParticipantSnapshot(t)\n\tsmTestCase.Run(t)\n\n}", "func TestLimitListenerError(t *testing.T) {\n\tdonec := make(chan bool, 1)\n\tgo func() {\n\t\tconst n = 2\n\t\tll := LimitListener(errorListener{}, n)\n\t\tfor i := 0; i < n+1; i++ {\n\t\t\t_, err := ll.Accept()\n\t\t\tif err != errFake {\n\t\t\t\tt.Fatalf(\"Accept error = %v; want errFake\", err)\n\t\t\t}\n\t\t}\n\t\tdonec <- true\n\t}()\n\tselect {\n\tcase <-donec:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout. deadlock?\")\n\t}\n}", "func TestMultipleHeartbeatTimeout(t *testing.T) {\n\ts := NewSupervisor(nil)\n\traa := NewRecoverableAction(s)\n\trab := NewRecoverableAction(s)\n\trac := NewRecoverableAction(s)\n\n\ts.AddRecoverable(\"A\", raa)\n\ts.AddRecoverable(\"B\", rab)\n\ts.AddRecoverable(\"C\", rac)\n\n\tt.Logf(\"(A) is '%v'.\", raa.Action(TimeConsumingAction))\n\tt.Logf(\"(B) is '%v'.\", rab.Action(PositiveAction))\n\tt.Logf(\"(C) is '%v'.\", rac.Action(PositiveAction))\n}", "func (m *MockEventLogger) AppendCheckMulti(assumedVersion uint64, events ...eventlog.EventData) (uint64, uint64, time.Time, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{assumedVersion}\n\tfor _, a := range events {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AppendCheckMulti\", varargs...)\n\tret0, _ := ret[0].(uint64)\n\tret1, _ := ret[1].(uint64)\n\tret2, _ := ret[2].(time.Time)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func (w *SegmentWAL) initSegments() error {\n\tfns, err := sequenceFiles(w.dirFile.Name(), \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(fns) == 0 {\n\t\treturn nil\n\t}\n\t// We must open all files in read/write mode as we may have to truncate along\n\t// the way and any file may become the tail.\n\tfor _, fn := range fns {\n\t\tf, err := os.OpenFile(fn, os.O_RDWR, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tw.files = append(w.files, f)\n\t}\n\n\t// Consume and validate meta headers.\n\tfor _, f := range w.files {\n\t\tmetab := make([]byte, 8)\n\n\t\tif n, err := f.Read(metab); err != nil {\n\t\t\treturn errors.Wrapf(err, \"validate meta %q\", f.Name())\n\t\t} else if n != 8 {\n\t\t\treturn errors.Errorf(\"invalid header size %d in %q\", n, f.Name())\n\t\t}\n\n\t\tif m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic {\n\t\t\treturn errors.Errorf(\"invalid magic header %x in %q\", m, f.Name())\n\t\t}\n\t\tif metab[4] != WALFormatDefault {\n\t\t\treturn errors.Errorf(\"unknown WAL segment format %d in %q\", metab[4], f.Name())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Segment) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range segmentBeforeUpsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (setting *MongodbDatabaseCollectionThroughputSetting) createValidations() []func() (admission.Warnings, error) {\n\treturn []func() (admission.Warnings, error){setting.validateResourceReferences}\n}", "func (d *segmentationDescriptor) SetSegmentsExpected(value uint8) {\n\td.segsExpected = value\n}", "func subTestEnqueue(t *testing.T) {\n\tlease := 10 * time.Millisecond\n\toriginalCnt := variable.GetMaxDeltaSchemaCount()\n\tdefer variable.SetMaxDeltaSchemaCount(originalCnt)\n\n\tvalidator := NewSchemaValidator(lease, nil).(*schemaValidator)\n\trequire.True(t, validator.IsStarted())\n\n\t// maxCnt is 0.\n\tvariable.SetMaxDeltaSchemaCount(0)\n\tvalidator.enqueue(1, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{11}, ActionTypes: []uint64{11}})\n\trequire.Len(t, validator.deltaSchemaInfos, 0)\n\n\t// maxCnt is 10.\n\tvariable.SetMaxDeltaSchemaCount(10)\n\tds := []deltaSchemaInfo{\n\t\t{0, []int64{1}, []uint64{1}},\n\t\t{1, []int64{1}, []uint64{1}},\n\t\t{2, []int64{1}, []uint64{1}},\n\t\t{3, []int64{2, 2}, []uint64{2, 2}},\n\t\t{4, []int64{2}, []uint64{2}},\n\t\t{5, []int64{1, 4}, []uint64{1, 4}},\n\t\t{6, []int64{1, 4}, []uint64{1, 4}},\n\t\t{7, []int64{3, 1, 3}, []uint64{3, 1, 3}},\n\t\t{8, []int64{1, 2, 3}, []uint64{1, 2, 3}},\n\t\t{9, []int64{1, 2, 3}, []uint64{1, 2, 3}},\n\t}\n\tfor _, d := range ds {\n\t\tvalidator.enqueue(d.schemaVersion, &transaction.RelatedSchemaChange{PhyTblIDS: d.relatedIDs, ActionTypes: d.relatedActions})\n\t}\n\tvalidator.enqueue(10, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{1}, ActionTypes: []uint64{1}})\n\tret := []deltaSchemaInfo{\n\t\t{0, []int64{1}, []uint64{1}},\n\t\t{2, []int64{1}, []uint64{1}},\n\t\t{3, []int64{2, 2}, []uint64{2, 2}},\n\t\t{4, []int64{2}, []uint64{2}},\n\t\t{6, []int64{1, 4}, []uint64{1, 4}},\n\t\t{9, []int64{1, 2, 3}, []uint64{1, 2, 3}},\n\t\t{10, []int64{1}, []uint64{1}},\n\t}\n\trequire.Equal(t, ret, validator.deltaSchemaInfos)\n\t// The Items' relatedTableIDs have different order.\n\tvalidator.enqueue(11, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{1, 2, 3, 4}, ActionTypes: []uint64{1, 2, 3, 4}})\n\tvalidator.enqueue(12, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{4, 1, 2, 3, 1}, ActionTypes: []uint64{4, 1, 2, 3, 1}})\n\tvalidator.enqueue(13, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{4, 1, 3, 2, 5}, ActionTypes: []uint64{4, 1, 3, 2, 5}})\n\tret[len(ret)-1] = deltaSchemaInfo{13, []int64{4, 1, 3, 2, 5}, []uint64{4, 1, 3, 2, 5}}\n\trequire.Equal(t, ret, validator.deltaSchemaInfos)\n\t// The length of deltaSchemaInfos is greater then maxCnt.\n\tvalidator.enqueue(14, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{1}, ActionTypes: []uint64{1}})\n\tvalidator.enqueue(15, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{2}, ActionTypes: []uint64{2}})\n\tvalidator.enqueue(16, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{3}, ActionTypes: []uint64{3}})\n\tvalidator.enqueue(17, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{4}, ActionTypes: []uint64{4}})\n\tret = append(ret, deltaSchemaInfo{14, []int64{1}, []uint64{1}})\n\tret = append(ret, deltaSchemaInfo{15, []int64{2}, []uint64{2}})\n\tret = append(ret, deltaSchemaInfo{16, []int64{3}, []uint64{3}})\n\tret = append(ret, deltaSchemaInfo{17, []int64{4}, []uint64{4}})\n\trequire.Equal(t, ret[1:], validator.deltaSchemaInfos)\n}", "func TestVisitPrefixesHierarchically(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixesHierarchically(proto.Key(\"/db1/table/1\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\treturn false, nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\texpConfigs := []ConfigUnion{config3, config2, config1}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n\n\t// Now, stop partway through by returning done=true.\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixesHierarchically(proto.Key(\"/db1/table/1\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\tif len(configs) == 2 {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\texpConfigs := []ConfigUnion{config3, config2}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n\n\t// Now, stop partway through by returning an error.\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixesHierarchically(proto.Key(\"/db1/table/1\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\tif len(configs) == 2 {\n\t\t\t\treturn false, util.Errorf(\"foo\")\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}); err == nil {\n\t\t\tt.Fatalf(\"expected an error, but didn't get one\")\n\t\t}\n\t\texpConfigs := []ConfigUnion{config3, config2}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n}", "func (mr *MockMetricsMockRecorder) MultiCreateSuccessResponseCounter() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"MultiCreateSuccessResponseCounter\", reflect.TypeOf((*MockMetrics)(nil).MultiCreateSuccessResponseCounter))\n}", "func checkMultipleSigners(tx authsigning.Tx) error {\n\tdirectSigners := 0\n\tsigsV2, err := tx.GetSignaturesV2()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, sig := range sigsV2 {\n\t\tdirectSigners += countDirectSigners(sig.Data)\n\t\tif directSigners > 1 {\n\t\t\treturn sdkerrors.ErrNotSupported.Wrap(\"txs signed with CLI can have maximum 1 DIRECT signer\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func verifyPartialLedgers(t *testing.T, provider *Provider, targetStatus []msgs.Status) {\n\t// Also double-check that deleted ledgers do not appear in the provider listing.\n\tactiveLedgers, err := provider.List()\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < len(targetStatus); i++ {\n\t\tledgerID := constructTestLedgerID(i)\n\t\tif targetStatus[i] == msgs.Status_UNDER_CONSTRUCTION || targetStatus[i] == msgs.Status_UNDER_DELETION {\n\t\t\tverifyLedgerDoesNotExist(t, provider, ledgerID)\n\t\t\trequire.NotContains(t, ledgerID, activeLedgers)\n\t\t} else {\n\t\t\tverifyLedgerIDExists(t, provider, ledgerID, targetStatus[i])\n\t\t\trequire.Contains(t, activeLedgers, ledgerID)\n\t\t}\n\t}\n}", "func TestResilientMultiWriter(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\twriters []io.Writer\n\t}{\n\t\t{\n\t\t\tname: \"All valid writers\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"All invalid writers\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"First invalid writer\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"First valid writer\",\n\t\t\twriters: []io.Writer{\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: false,\n\t\t\t\t},\n\t\t\t\tmockedWriter{\n\t\t\t\t\twantErr: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\twriters := tt.writers\n\t\tmultiWriter := MultiLevelWriter(writers...)\n\n\t\tlogger := New(multiWriter).With().Timestamp().Logger().Level(InfoLevel)\n\t\tlogger.Info().Msg(\"Test msg\")\n\n\t\tif len(writers) != writeCalls {\n\t\t\tt.Errorf(\"Expected %d writers to have been called but only %d were.\", len(writers), writeCalls)\n\t\t}\n\t\twriteCalls = 0\n\t}\n}", "func TestSubscriberIDs(t *testing.T) {\n\tassert := asserts.NewTesting(t, asserts.FailStop)\n\tmsh := mesh.New()\n\n\terr := msh.SpawnCells(\n\t\tNewTestBehavior(\"foo\"),\n\t\tNewTestBehavior(\"bar\"),\n\t\tNewTestBehavior(\"baz\"),\n\t)\n\tassert.NoError(err)\n\n\terr = msh.Subscribe(\"foo\", \"bar\", \"baz\")\n\tassert.NoError(err)\n\n\tsubscriberIDs, err := msh.Subscribers(\"foo\")\n\tassert.NoError(err)\n\tassert.Length(subscriberIDs, 2)\n\n\tsubscriberIDs, err = msh.Subscribers(\"bar\")\n\tassert.NoError(err)\n\tassert.Length(subscriberIDs, 0)\n\n\terr = msh.Unsubscribe(\"foo\", \"baz\")\n\tassert.NoError(err)\n\n\tsubscriberIDs, err = msh.Subscribers(\"foo\")\n\tassert.NoError(err)\n\tassert.Length(subscriberIDs, 1)\n\n\terr = msh.Stop()\n\tassert.NoError(err)\n}", "func TestMultiRoutineAccess_logger(t *testing.T) {\n\t// storing address of *Logger instance for validation.\n\tmsgArray := make([]string, 0)\n\n\t// wait-group to make sure a certain number of go-routine(s)\n\t// has finished its task.\n\tvar wgroup sync.WaitGroup\n\n\tfor i := 0; i < 10; i++ {\n\t\t// updates the wait-group counter.\n\t\twgroup.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\t// decreses the wait-group counter by 1.\n\t\t\t// When the counter returns to 0, the wait-group will end the \"wait\".\n\t\t\tdefer wgroup.Done()\n\n\t\t\tlog := logger.GetLogger()\n\t\t\t// append the address value of instance \"log\"\n\t\t\tlock.Lock()\n\t\t\tmsgArray = append(msgArray, fmt.Sprintf(\"%p\", log))\n\t\t\tlock.Unlock()\n\t\t\tlog.Log(fmt.Sprintf(\"this is a log entry from [%v]\\n\", idx))\n\t\t}(i)\n\t}\n\twgroup.Wait()\n\n\t// verification\n\tif len(msgArray) == 0 {\n\t\tt.Fatalf(\"expect to have a least one message\")\n\t}\n\taddrLine := msgArray[0]\n\tfor i := 1; i < len(msgArray); i++ {\n\t\tline := msgArray[i]\n\t\tif addrLine != line {\n\t\t\tt.Errorf(\"expect both lines (addresses of Logger) should be identical, [%v] vs [%v]\\n\", addrLine, line)\n\t\t}\n\t}\n}", "func TestLifecycleManyAddons(t *testing.T) {\n\trunLifecycleTestAWS(&LifecycleTestOptions{\n\t\tt: t,\n\t\tSrcDir: \"many-addons\",\n\t\tClusterName: \"minimal.example.com\",\n\t})\n}", "func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {\n\tpods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{\n\t\t// Find all running pods\n\t\tFieldSelector: \"status.phase=Running\",\n\t\t// Find all injected pods. We don't care about non-injected pods, because the new behavior\n\t\t// mirrors Kubernetes; this is only a breaking change for existing Istio users.\n\t\tLabelSelector: \"security.istio.io/tlsMode=istio\",\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar messages diag.Messages = make([]diag.Message, 0)\n\tg := errgroup.Group{}\n\n\tsem := semaphore.NewWeighted(25)\n\tfor _, pod := range pods.Items {\n\t\tpod := pod\n\t\tif !fromLegacyNetworkingVersion(pod) {\n\t\t\t// Skip check. This pod is already on a version where the change has been made; if they were going\n\t\t\t// to break they would already be broken.\n\t\t\tcontinue\n\t\t}\n\t\tg.Go(func() error {\n\t\t\t_ = sem.Acquire(context.Background(), 1)\n\t\t\tdefer sem.Release(1)\n\t\t\t// Fetch list of all clusters to get which ports we care about\n\t\t\tresp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, \"GET\", \"config_dump?resource=dynamic_active_clusters&mask=cluster.name\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get config dump: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tports, err := extractInboundPorts(resp)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get ports: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Next, look at what ports the pod is actually listening on\n\t\t\t// This requires parsing the output from ss; the version we use doesn't support JSON\n\t\t\tout, _, err := cli.PodExec(pod.Name, pod.Namespace, \"istio-proxy\", \"ss -ltnH\")\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"executable file not found\") {\n\t\t\t\t\t// Likely distroless or other custom build without ss. Nothing we can do here...\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"failed to get listener state: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfor _, ss := range strings.Split(out, \"\\n\") {\n\t\t\t\tif len(ss) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbind, port, err := net.SplitHostPort(getColumn(ss, 3))\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"failed to get parse state: \", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tip, _ := netip.ParseAddr(bind)\n\t\t\t\tportn, _ := strconv.Atoi(port)\n\t\t\t\tif _, f := ports[portn]; f {\n\t\t\t\t\tc := ports[portn]\n\t\t\t\t\tif bind == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if bind == \"*\" || ip.IsUnspecified() {\n\t\t\t\t\t\tc.Wildcard = true\n\t\t\t\t\t} else if ip.IsLoopback() {\n\t\t\t\t\t\tc.Lo = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Explicit = true\n\t\t\t\t\t}\n\t\t\t\t\tports[portn] = c\n\t\t\t\t}\n\t\t\t}\n\n\t\t\torigin := &kube3.Origin{\n\t\t\t\tType: gvk.Pod,\n\t\t\t\tFullName: resource.FullName{\n\t\t\t\t\tNamespace: resource.Namespace(pod.Namespace),\n\t\t\t\t\tName: resource.LocalName(pod.Name),\n\t\t\t\t},\n\t\t\t\tResourceVersion: resource.Version(pod.ResourceVersion),\n\t\t\t}\n\t\t\tfor port, status := range ports {\n\t\t\t\t// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.\n\t\t\t\tif status.Lo {\n\t\t\t\t\tmessages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn messages, nil\n}", "func TestOnlyCallOnceOnMultipleDepChanges(t *testing.T) {\n\tr := New()\n\ti := r.CreateInput(1)\n\tc1 := r.CreateCompute1(i, func(v int) int { return v + 1 })\n\tc2 := r.CreateCompute1(i, func(v int) int { return v - 1 })\n\tc3 := r.CreateCompute1(c2, func(v int) int { return v - 1 })\n\tc4 := r.CreateCompute2(c1, c3, func(v1, v3 int) int { return v1 * v3 })\n\tchanged4 := 0\n\tc4.AddCallback(func(int) { changed4++ })\n\ti.SetValue(3)\n\tif changed4 < 1 {\n\t\tt.Fatalf(\"callback function was not called\")\n\t} else if changed4 > 1 {\n\t\tt.Fatalf(\"callback function was called too often\")\n\t}\n}", "func (t *SelfTester) SetOnNewPoliciesReadyCb(cb func()) {\n}", "func AssertNoLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...any) {\n\tt.Helper()\n\n\tif container == nil {\n\t\treturn\n\t}\n\n\tassert.Equal(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func WaitTerminations(done chan bool) {\n\tremaining := SegmentListeners\n\tfor remaining > 0 {\n\t\tlog.Printf(\"WaitTerminations: waiting for %d listeners\\n\", remaining)\n\t\t_ = <-done\n\t\tremaining--\n\t}\n}", "func TestHandlerDispatchInternal(t *testing.T) {\n\trequire := require.New(t)\n\n\tctx := snow.DefaultConsensusContextTest()\n\tmsgFromVMChan := make(chan common.Message)\n\tvdrs := validators.NewSet()\n\trequire.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1))\n\n\tresourceTracker, err := tracker.NewResourceTracker(\n\t\tprometheus.NewRegistry(),\n\t\tresource.NoUsage,\n\t\tmeter.ContinuousFactory{},\n\t\ttime.Second,\n\t)\n\trequire.NoError(err)\n\thandler, err := New(\n\t\tctx,\n\t\tvdrs,\n\t\tmsgFromVMChan,\n\t\ttime.Second,\n\t\ttestThreadPoolSize,\n\t\tresourceTracker,\n\t\tvalidators.UnhandledSubnetConnector,\n\t\tsubnets.New(ctx.NodeID, subnets.Config{}),\n\t\tcommontracker.NewPeers(),\n\t)\n\trequire.NoError(err)\n\n\tbootstrapper := &common.BootstrapperTest{\n\t\tBootstrapableTest: common.BootstrapableTest{\n\t\t\tT: t,\n\t\t},\n\t\tEngineTest: common.EngineTest{\n\t\t\tT: t,\n\t\t},\n\t}\n\tbootstrapper.Default(false)\n\n\tengine := &common.EngineTest{T: t}\n\tengine.Default(false)\n\tengine.ContextF = func() *snow.ConsensusContext {\n\t\treturn ctx\n\t}\n\n\twg := &sync.WaitGroup{}\n\tengine.NotifyF = func(context.Context, common.Message) error {\n\t\twg.Done()\n\t\treturn nil\n\t}\n\n\thandler.SetEngineManager(&EngineManager{\n\t\tSnowman: &Engine{\n\t\t\tBootstrapper: bootstrapper,\n\t\t\tConsensus: engine,\n\t\t},\n\t})\n\n\tctx.State.Set(snow.EngineState{\n\t\tType: p2p.EngineType_ENGINE_TYPE_SNOWMAN,\n\t\tState: snow.NormalOp, // assumed bootstrap is done\n\t})\n\n\tbootstrapper.StartF = func(context.Context, uint32) error {\n\t\treturn nil\n\t}\n\n\twg.Add(1)\n\thandler.Start(context.Background(), false)\n\tmsgFromVMChan <- 0\n\twg.Wait()\n}", "func TestValidEvents(t *testing.T) {\n\ttestCases := []struct {\n\t\tevents []string\n\t\terrCode APIErrorCode\n\t}{\n\t\t// Return error for unknown event element.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:UnknownAPI\",\n\t\t\t},\n\t\t\terrCode: ErrEventNotification,\n\t\t},\n\t\t// Return success for supported event.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:ObjectCreated:Put\",\n\t\t\t},\n\t\t\terrCode: ErrNone,\n\t\t},\n\t\t// Return success for supported events.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:ObjectCreated:*\",\n\t\t\t\t\"s3:ObjectRemoved:*\",\n\t\t\t},\n\t\t\terrCode: ErrNone,\n\t\t},\n\t\t// Return error for empty event list.\n\t\t{\n\t\t\tevents: []string{\"\"},\n\t\t\terrCode: ErrEventNotification,\n\t\t},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\terrCode := checkEvents(testCase.events)\n\t\tif testCase.errCode != errCode {\n\t\t\tt.Errorf(\"Test %d: Expected \\\"%d\\\", got \\\"%d\\\"\", i+1, testCase.errCode, errCode)\n\t\t}\n\t}\n}", "func (noopMeter) RegisterCallback([]instrument.Asynchronous, func(context.Context)) error {\n\treturn nil\n}", "func (c *AviController) SetupMultiClusterIngressEventHandlers(numWorkers uint32) {\n\tutils.AviLog.Infof(\"Setting up MultiClusterIngress CRD Event handlers\")\n\n\tmultiClusterIngressEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmci := obj.(*akov1alpha1.MultiClusterIngress)\n\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(mci))\n\t\t\tkey := lib.MultiClusterIngress + \"/\" + utils.ObjKey(mci)\n\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Multi-cluster Ingress add event: Namespace: %s didn't qualify filter. Not adding multi-cluster ingress\", key, namespace)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := c.GetValidator().ValidateMultiClusterIngressObj(key, mci); err != nil {\n\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Validation of MultiClusterIngress failed: %v\", key, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t},\n\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\toldObj := old.(*akov1alpha1.MultiClusterIngress)\n\t\t\tmci := new.(*akov1alpha1.MultiClusterIngress)\n\t\t\tif !reflect.DeepEqual(oldObj.Spec, mci.Spec) {\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(mci))\n\t\t\t\tkey := lib.MultiClusterIngress + \"/\" + utils.ObjKey(mci)\n\t\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Multi-cluster Ingress update event: Namespace: %s didn't qualify filter. Not updating multi-cluster ingress\", key, namespace)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err := c.GetValidator().ValidateMultiClusterIngressObj(key, mci); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Validation of MultiClusterIngress failed: %v\", key, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmci, ok := obj.(*akov1alpha1.MultiClusterIngress)\n\t\t\tif !ok {\n\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmci, ok = tombstone.Obj.(*akov1alpha1.MultiClusterIngress)\n\t\t\t\tif !ok {\n\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not a MultiClusterIngress: %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(mci))\n\t\t\tkey := lib.MultiClusterIngress + \"/\" + utils.ObjKey(mci)\n\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Multi-cluster Ingress delete event: Namespace: %s didn't qualify filter. Not deleting multi-cluster ingress\", key, namespace)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t},\n\t}\n\tc.informers.MultiClusterIngressInformer.Informer().AddEventHandler(multiClusterIngressEventHandler)\n}", "func TestInvalidToValidSubscription(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcst, err := createConsensusSetTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cst.Close()\n\n\t// Start by performing a bad subscribe.\n\tms := newMockSubscriber()\n\tbadCCID := modules.ConsensusChangeID{255, 255, 255}\n\terr = cst.cs.ConsensusSetSubscribe(&ms, badCCID, cst.cs.tg.StopChan())\n\tif err != modules.ErrInvalidConsensusChangeID {\n\t\tt.Error(\"consensus set returning the wrong error during an invalid subscription:\", err)\n\t}\n\n\t// Perform a correct subscribe.\n\terr = cst.cs.ConsensusSetSubscribe(&ms, modules.ConsensusChangeBeginning, cst.cs.tg.StopChan())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Mine a block and check that the mock subscriber only got a single\n\t// consensus change.\n\tnumPrevUpdates := len(ms.updates)\n\t_, err = cst.miner.AddBlock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(ms.updates) != numPrevUpdates+1 {\n\t\tt.Error(\"subscriber received two consensus changes for a single block\")\n\t}\n}", "func AssertNoLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) {\n\tt.Helper()\n\n\tif container == nil {\n\t\treturn\n\t}\n\n\tassert.Equal(t, len(container.GetEvents()), 0, msgAndArgs...)\n}", "func verifyExtensionLifecycle(t *testing.T, factory extension.Factory, getConfigFn getExtensionConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\textCreateSet := extensiontest.NewNopCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tfirstExt, err := factory.CreateExtension(ctx, extCreateSet, getConfigFn())\n\trequire.NoError(t, err)\n\trequire.NoError(t, firstExt.Start(ctx, host))\n\trequire.NoError(t, firstExt.Shutdown(ctx))\n\n\tsecondExt, err := factory.CreateExtension(ctx, extCreateSet, getConfigFn())\n\trequire.NoError(t, err)\n\trequire.NoError(t, secondExt.Start(ctx, host))\n\trequire.NoError(t, secondExt.Shutdown(ctx))\n}", "func TestRegisterCallback(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tstopper := stop.NewStopper()\n\tdefer stopper.Stop()\n\tis := newInfoStore(1, emptyAddr, stopper)\n\twg := &sync.WaitGroup{}\n\tcb := callbackRecord{wg: wg}\n\n\ti1 := is.newInfo(nil, time.Second)\n\ti2 := is.newInfo(nil, time.Second)\n\tif err := is.addInfo(\"key1\", i1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := is.addInfo(\"key2\", i2); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twg.Add(2)\n\tis.registerCallback(\"key.*\", cb.Add)\n\twg.Wait()\n\tactKeys := cb.Keys()\n\tsort.Strings(actKeys)\n\tif expKeys := []string{\"key1\", \"key2\"}; !reflect.DeepEqual(actKeys, expKeys) {\n\t\tt.Errorf(\"expected %v, got %v\", expKeys, cb.Keys())\n\t}\n}", "func TestRangesFull(t *testing.T) {\n\tinitDone := make(chan struct{})\n\t// A single /32 can't be used to allocate since we always reserve 2 IPs,\n\t// the network and broadcast address, which in the case of a /32 means it is always full.\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.123/32\", \"FF::123/128\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv6Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-a\" {\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t}\n\n\t\tif svc.Name != \"service-b\" {\n\n\t\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\t\tt.Error(\"Expected condition to be svc-satisfied:false\")\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif svc.Status.Conditions[0].Reason != \"out_of_ips\" {\n\t\t\t\tt.Error(\"Expected condition reason to be out of IPs\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected two service updates\")\n\t}\n}", "func (o *Segment) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range segmentAfterUpsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestAsyncEvents(t *testing.T) {\n\tif !haveArchive {\n\t\treturn\n\t}\n\n\tif testing.Verbose() && DEBUG {\n\t\tlogging.SetLevel(logging.DEBUG, \"archive\")\n\t}\n\n\tarchive.Listeners.RecordingSignalListener = RecordingSignalListener\n\tarchive.Listeners.RecordingEventStartedListener = RecordingEventStartedListener\n\tarchive.Listeners.RecordingEventProgressListener = RecordingEventProgressListener\n\tarchive.Listeners.RecordingEventStoppedListener = RecordingEventStoppedListener\n\n\ttestCounters = TestCounters{0, 0, 0, 0}\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tarchive.EnableRecordingEvents()\n\tarchive.RecordingEventsPoll()\n\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tpublication, err := archive.AddRecordedPublication(testCases[0].sampleChannel, testCases[0].sampleStream)\n\tif err != nil {\n\t\tt.Log(err)\n\t\tt.FailNow()\n\t}\n\n\t// Delay a little to get the publication is established\n\tidler := idlestrategy.Sleeping{SleepFor: time.Millisecond * 100}\n\tidler.Idle(0)\n\n\tarchive.RecordingEventsPoll()\n\tif !CounterValuesMatch(testCounters, 1, 1, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tif err := archive.StopRecordingByPublication(*publication); err != nil {\n\t\tt.Log(err)\n\t\tt.FailNow()\n\t}\n\n\tif !CounterValuesMatch(testCounters, 2, 1, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tarchive.RecordingEventsPoll()\n\tif !CounterValuesMatch(testCounters, 2, 1, 0, 1, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\t// Cleanup\n\tarchive.DisableRecordingEvents()\n\tarchive.Listeners.RecordingSignalListener = nil\n\tarchive.Listeners.RecordingEventStartedListener = nil\n\tarchive.Listeners.RecordingEventProgressListener = nil\n\tarchive.Listeners.RecordingEventStoppedListener = nil\n\ttestCounters = TestCounters{0, 0, 0, 0}\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tpublication.Close()\n}", "func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) {\n\tfs, clientCh, cleanup := setupOverrides()\n\tdefer cleanup()\n\n\t// Create a server option to get notified about serving mode changes. We don't\n\t// do anything other than throwing a log entry here. But this is required,\n\t// since the server code emits a log entry at the default level (which is\n\t// ERROR) if no callback is registered for serving mode changes. Our\n\t// testLogger fails the test if there is any log entry at ERROR level. It does\n\t// provide an ExpectError() method, but that takes a string and it would be\n\t// painful to construct the exact error message expected here. Instead this\n\t// works just fine.\n\tmodeChangeOpt := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) {\n\t\tt.Logf(\"Serving mode for listener %q changed to %q, err: %v\", addr.String(), args.Mode, args.Err)\n\t})\n\tserver := NewGRPCServer(modeChangeOpt)\n\tdefer server.Stop()\n\n\tlis, err := testutils.LocalTCPListener()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.LocalTCPListener() failed: %v\", err)\n\t}\n\n\t// Call Serve() in a goroutine, and push on a channel when Serve returns.\n\tserveDone := testutils.NewChannel()\n\tgo func() {\n\t\tif err := server.Serve(lis); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tserveDone.Send(nil)\n\t}()\n\n\t// Wait for an xdsClient to be created.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := clientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for new xdsClient to be created: %v\", err)\n\t}\n\tclient := c.(*fakeclient.Client)\n\n\t// Wait for a listener watch to be registered on the xdsClient.\n\tname, err := client.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a ListenerWatch: %v\", err)\n\t}\n\twantName := strings.Replace(testServerListenerResourceNameTemplate, \"%s\", lis.Addr().String(), -1)\n\tif name != wantName {\n\t\tt.Fatalf(\"LDS watch registered for name %q, want %q\", name, wantName)\n\t}\n\n\t// Push a good LDS response with security config, and wait for Serve() to be\n\t// invoked on the underlying grpc.Server. Also make sure that certificate\n\t// providers are not created.\n\tfcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{\n\t\tFilterChains: []*v3listenerpb.FilterChain{\n\t\t\t{\n\t\t\t\tTransportSocket: &v3corepb.TransportSocket{\n\t\t\t\t\tName: \"envoy.transport_sockets.tls\",\n\t\t\t\t\tConfigType: &v3corepb.TransportSocket_TypedConfig{\n\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{\n\t\t\t\t\t\t\tCommonTlsContext: &v3tlspb.CommonTlsContext{\n\t\t\t\t\t\t\t\tTlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{\n\t\t\t\t\t\t\t\t\tInstanceName: \"identityPluginInstance\",\n\t\t\t\t\t\t\t\t\tCertificateName: \"identityCertName\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tFilters: []*v3listenerpb.Filter{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"filter-1\",\n\t\t\t\t\t\tConfigType: &v3listenerpb.Filter_TypedConfig{\n\t\t\t\t\t\t\tTypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{\n\t\t\t\t\t\t\t\tRouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{\n\t\t\t\t\t\t\t\t\tRouteConfig: &v3routepb.RouteConfiguration{\n\t\t\t\t\t\t\t\t\t\tName: \"routeName\",\n\t\t\t\t\t\t\t\t\t\tVirtualHosts: []*v3routepb.VirtualHost{{\n\t\t\t\t\t\t\t\t\t\t\tDomains: []string{\"lds.target.good:3333\"},\n\t\t\t\t\t\t\t\t\t\t\tRoutes: []*v3routepb.Route{{\n\t\t\t\t\t\t\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{\n\t\t\t\t\t\t\t\t\t\t\t\t\tPathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\tAction: &v3routepb.Route_NonForwardingAction{},\n\t\t\t\t\t\t\t\t\t\t\t}}}}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tHttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsclient.NewFilterChainManager() failed with error: %v\", err)\n\t}\n\taddr, port := splitHostPort(lis.Addr().String())\n\tclient.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{\n\t\tRouteConfigName: \"routeconfig\",\n\t\tInboundListenerCfg: &xdsresource.InboundListenerConfig{\n\t\t\tAddress: addr,\n\t\t\tPort: port,\n\t\t\tFilterChains: fcm,\n\t\t},\n\t}, nil)\n\tif _, err := fs.serveCh.Receive(ctx); err != nil {\n\t\tt.Fatalf(\"error when waiting for Serve() to be invoked on the grpc.Server\")\n\t}\n\n\t// Make sure the security configuration is not acted upon.\n\tif err := verifyCertProviderNotCreated(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (m *HeavySyncMock) ValidateCallCounters() {\n\n\tif !m.ResetFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Reset\")\n\t}\n\n\tif !m.StartFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Start\")\n\t}\n\n\tif !m.StopFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Stop\")\n\t}\n\n\tif !m.StoreBlobsFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreBlobs\")\n\t}\n\n\tif !m.StoreDropFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreDrop\")\n\t}\n\n\tif !m.StoreIndicesFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreIndices\")\n\t}\n\n\tif !m.StoreRecordsFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreRecords\")\n\t}\n\n}", "func TestPutBulkBlobSpanningChunksStreamAccessDoesNotExist(t *testing.T) {\n defer testutils.DeleteBucketContents(client, testBucket)\n\n helper := helpers.NewHelpers(client)\n\n errorCallbackCalled := false\n var mutex sync.Mutex\n errorCallback := func(objectName string, err error) {\n mutex.Lock()\n errorCallbackCalled = true\n mutex.Unlock()\n\n ds3Testing.AssertString(t, \"object name\", LargeBookTitle, objectName)\n }\n\n strategy := helpers.WriteTransferStrategy{\n BlobStrategy: newTestBlobStrategy(),\n Options: helpers.WriteBulkJobOptions{MaxUploadSize: &helpers.MinUploadSize},\n Listeners: helpers.ListenerStrategy{ErrorCallback:errorCallback},\n }\n\n // open a file but lie that its bigger than it is\n f, err := os.Open(testutils.BookPath + testutils.BookTitles[0])\n writeObj := helperModels.PutObject{\n PutObject: ds3Models.Ds3PutObject{Name: LargeBookTitle, Size: 20*1024*1024},\n ChannelBuilder: &testStreamAccessReadChannelBuilder{f: f},\n }\n\n var writeObjects []helperModels.PutObject\n writeObjects = append(writeObjects, writeObj)\n\n ds3Testing.AssertNilError(t, err)\n\n _, err = helper.PutObjects(testBucket, writeObjects, strategy)\n ds3Testing.AssertNilError(t, err)\n ds3Testing.AssertBool(t, \"error callback called\", true, errorCallbackCalled)\n}", "func TestManagedCheckForUploadWorkers(t *testing.T) {\n\t// Test the blank case. Because there are no workers, the function should\n\t// return a blank set of workers and indicate that the chunk is okay to\n\t// distribute. We don't want the upload loop freezing if there are no\n\t// workers.\n\tuc := new(unfinishedUploadChunk)\n\tvar inputWorkers []*worker\n\tworkers, finalized := managedSelectWorkersForUploading(uc, inputWorkers)\n\tif workers != nil {\n\t\tt.Fatal(\"bad\")\n\t}\n\tif !finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n\t// Give the UC some minpieces and needed pieces.\n\tuc.staticPiecesNeeded = 1\n\tuc.staticMinimumPieces = 1\n\tworkers, finalized = managedSelectWorkersForUploading(uc, inputWorkers)\n\tif workers != nil {\n\t\tt.Fatal(\"bad\")\n\t}\n\tif !finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n\tuc.staticPiecesNeeded = 2\n\tworkers, finalized = managedSelectWorkersForUploading(uc, inputWorkers)\n\tif workers != nil {\n\t\tt.Fatal(\"bad\")\n\t}\n\tif !finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n\n\t// Test the use case where there are not enough available workers, but there\n\t// are enough overloaded workers. This should result in finalized being\n\t// 'false', as we want to wait for the overloaded workers to finish\n\t// processing their chunks and become available.\n\tinputWorkers = append(inputWorkers, newOverloadedWorker())\n\tworkers, finalized = managedSelectWorkersForUploading(uc, inputWorkers)\n\tif workers != nil {\n\t\tt.Fatal(\"bad\")\n\t}\n\tif finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n\n\t// Test the case where the only worker is busy\n\tfor i := 0; i < workerUploadOverloadedThreshold-workerUploadBusyThreshold; i++ {\n\t\tinputWorkers[0].unprocessedChunks.Pop()\n\t}\n\tworkers, finalized = managedSelectWorkersForUploading(uc, inputWorkers)\n\tif workers != nil {\n\t\tt.Fatal(\"bad\")\n\t}\n\tif finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n\n\t// Test the case where the only worker is available. Since the only worker\n\t// is available, the chunk should be good to go.\n\tfor i := 0; i < workerUploadBusyThreshold; i++ {\n\t\tinputWorkers[0].unprocessedChunks.Pop()\n\t}\n\tworkers, finalized = managedSelectWorkersForUploading(uc, inputWorkers)\n\tif len(workers) != 1 {\n\t\tt.Fatal(\"bad\")\n\t}\n\tif !finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n\n\t// Test what happens when there is an overloaded worker that could be busy.\n\tinputWorkers = append(inputWorkers, newOverloadedWorker())\n\tworkers, finalized = managedSelectWorkersForUploading(uc, inputWorkers)\n\tif workers != nil {\n\t\tt.Fatal(\"bad\")\n\t}\n\tif finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n\t// Now check what happens when it is busy. There are enough available\n\t// workers to make the chunk available, and enough busy workers to finish\n\t// the chunk, so it should pass.\n\tfor i := 0; i < workerUploadOverloadedThreshold-workerUploadBusyThreshold; i++ {\n\t\tinputWorkers[1].unprocessedChunks.Pop()\n\t}\n\tworkers, finalized = managedSelectWorkersForUploading(uc, inputWorkers)\n\tif len(workers) != 2 {\n\t\tt.Fatal(\"bad\", len(workers))\n\t}\n\tif !finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n\t// Change the number of staticPiecesNeeded in the chunk to 3, so that we now\n\t// have enough available workers to make the chunk available but not enough\n\t// busy workers (or workers at all) to make the chunk complete. This should\n\t// succeed.\n\tworkers, finalized = managedSelectWorkersForUploading(uc, inputWorkers)\n\tif len(workers) != 2 {\n\t\tt.Fatal(\"bad\", len(workers))\n\t}\n\tif !finalized {\n\t\tt.Fatal(\"bad\")\n\t}\n}", "func (s) TestBalancer_TwoAddresses_ReportingEnabledOOB(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tutilSetter func(orca.ServerMetricsRecorder, float64)\n\t}{{\n\t\tname: \"application_utilization\",\n\t\tutilSetter: func(smr orca.ServerMetricsRecorder, val float64) {\n\t\t\tsmr.SetApplicationUtilization(val)\n\t\t},\n\t}, {\n\t\tname: \"cpu_utilization\",\n\t\tutilSetter: func(smr orca.ServerMetricsRecorder, val float64) {\n\t\t\tsmr.SetCPUUtilization(val)\n\t\t},\n\t}, {\n\t\tname: \"application over cpu\",\n\t\tutilSetter: func(smr orca.ServerMetricsRecorder, val float64) {\n\t\t\tsmr.SetApplicationUtilization(val)\n\t\t\tsmr.SetCPUUtilization(2.0) // ignored because ApplicationUtilization is set\n\t\t},\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\t\t\tdefer cancel()\n\n\t\t\tsrv1 := startServer(t, reportOOB)\n\t\t\tsrv2 := startServer(t, reportOOB)\n\n\t\t\t// srv1 starts loaded and srv2 starts without load; ensure RPCs are routed\n\t\t\t// disproportionately to srv2 (10:1).\n\t\t\tsrv1.oobMetrics.SetQPS(10.0)\n\t\t\ttc.utilSetter(srv1.oobMetrics, 1.0)\n\n\t\t\tsrv2.oobMetrics.SetQPS(10.0)\n\t\t\ttc.utilSetter(srv2.oobMetrics, 0.1)\n\n\t\t\tsc := svcConfig(t, oobConfig)\n\t\t\tif err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil {\n\t\t\t\tt.Fatalf(\"Error starting client: %v\", err)\n\t\t\t}\n\t\t\taddrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}}\n\t\t\tsrv1.R.UpdateState(resolver.State{Addresses: addrs})\n\n\t\t\t// Call each backend once to ensure the weights have been received.\n\t\t\tensureReached(ctx, t, srv1.Client, 2)\n\n\t\t\t// Wait for the weight update period to allow the new weights to be processed.\n\t\t\ttime.Sleep(weightUpdatePeriod)\n\t\t\tcheckWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10})\n\t\t})\n\t}\n}", "func TestInvalidConsensusChangeSubscription(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcst, err := createConsensusSetTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cst.Close()\n\n\tms := newMockSubscriber()\n\tbadCCID := modules.ConsensusChangeID{255, 255, 255}\n\terr = cst.cs.ConsensusSetSubscribe(&ms, badCCID, cst.cs.tg.StopChan())\n\tif err != modules.ErrInvalidConsensusChangeID {\n\t\tt.Error(\"consensus set returning the wrong error during an invalid subscription:\", err)\n\t}\n\n\tcst.cs.mu.Lock()\n\tfor i := range cst.cs.subscribers {\n\t\tif cst.cs.subscribers[i] == &ms {\n\t\t\tt.Fatal(\"subscriber was not removed from subscriber list after an erroneus subscription\")\n\t\t}\n\t}\n\tcst.cs.mu.Unlock()\n}", "func (r *FlakeTestResult) subdivide(events []int) {\n\tif len(events) == 1 {\n\t\t// Isolated failure.\n\t\tregion := FlakeRegion{events, events[0], events[0], 1, 1, nil}\n\t\tr.All = append(r.All, region)\n\t\treturn\n\t}\n\n\tmle, ad := interarrivalAnalysis(events)\n\tif ad == nil || ad.P >= 0.05 {\n\t\t// We failed to reject the null hypothesis that this\n\t\t// isn't geometrically distributed. That's about as\n\t\t// close as we're going to get to calling it\n\t\t// geometrically distributed.\n\t\tregion := FlakeRegion{events, events[0], events[len(events)-1], len(events), mle.P, ad}\n\t\tr.All = append(r.All, region)\n\t\treturn\n\t}\n\n\t// We reject the null hypothesis and accept the alternate\n\t// hypothesis that this range of events is not a Bernoulli\n\t// process. Subdivide on the longest gap, which is the least\n\t// likely event in this range.\n\tlongestIndex, longestVal := 0, events[1]-events[0]\n\tfor i := 0; i < len(events)-1; i++ {\n\t\tval := events[i+1] - events[i]\n\t\tif val > longestVal {\n\t\t\tlongestIndex, longestVal = i, val\n\t\t}\n\t}\n\n\t//fmt.Fprintln(os.Stderr, \"subdividing\", events[:longestIndex+1], events[longestIndex+1:], mle.P, ad.P)\n\n\t// Find the more recent ranges first.\n\tr.subdivide(events[longestIndex+1:])\n\tr.subdivide(events[:longestIndex+1])\n}", "func (mr *MockProviderMockRecorder) OnEndpointsAdd(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"OnEndpointsAdd\", reflect.TypeOf((*MockProvider)(nil).OnEndpointsAdd), arg0)\n}", "func (client WorkloadNetworksClient) CreateSegmentsResponder(resp *http.Response) (result WorkloadNetworkSegment, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func TestEventListener(t *testing.T) {\n\tvar d *DB\n\tvar buf syncedBuffer\n\tmem := vfs.NewMem()\n\terr := mem.MkdirAll(\"ext\", 0755)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdatadriven.RunTest(t, \"testdata/event_listener\", func(td *datadriven.TestData) string {\n\t\tswitch td.Cmd {\n\t\tcase \"open\":\n\t\t\tbuf.Reset()\n\t\t\tvar err error\n\t\t\td, err = Open(\"db\", &Options{\n\t\t\t\tFS: loggingFS{mem, &buf},\n\t\t\t\tEventListener: MakeLoggingEventListener(&buf),\n\t\t\t\tMaxManifestFileSize: 1,\n\t\t\t\tWALDir: \"wal\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"flush\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Flush(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"compact\":\n\t\t\tbuf.Reset()\n\t\t\tif err := d.Set([]byte(\"a\"), nil, nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Compact([]byte(\"a\"), []byte(\"b\")); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"ingest\":\n\t\t\tbuf.Reset()\n\t\t\tf, err := mem.Create(\"ext/0\")\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tw := sstable.NewWriter(f, nil, LevelOptions{})\n\t\t\tif err := w.Add(base.MakeInternalKey([]byte(\"a\"), 0, InternalKeyKindSet), nil); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := d.Ingest([]string{\"ext/0\"}); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tif err := mem.Remove(\"ext/0\"); err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\treturn buf.String()\n\n\t\tcase \"metrics\":\n\t\t\treturn d.Metrics().String()\n\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"unknown command: %s\", td.Cmd)\n\t\t}\n\t})\n}", "func SNSSQSMultipleSubsDifferentConsumerIDs(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tconsumerGroup2 := watcher.NewUnordered()\n\n\t// Set the partition key on all messages so they are written to the same partition. This allows for checking of ordered messages.\n\tmetadata := map[string]string{\n\t\tmessageKey: partition0,\n\t}\n\n\t// subscriber of the given topic\n\tsubscriberApplication := func(appID string, topicName string, messagesWatcher *watcher.Watcher) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\t// Simulate periodic errors.\n\t\t\tsim := simulate.PeriodicError(ctx, 100)\n\t\t\t// Setup the /orders event handler.\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tif err := sim(); err != nil {\n\t\t\t\t\t\treturn true, err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Track/Observe the data of the event.\n\t\t\t\t\tmessagesWatcher.Observe(e.Data)\n\t\t\t\t\tctx.Logf(\"Message Received appID: %s,pubsub: %s, topic: %s, id: %s, data: %s\", appID, e.PubsubName, e.Topic, e.ID, e.Data)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tpublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, numMessages)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\t// add the messages as expectations to the watchers\n\t\t\tfor _, messageWatcher := range messageWatchers {\n\t\t\t\tmessageWatcher.ExpectStrings(messages...)\n\t\t\t}\n\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMultipleSubsDifferentConsumerIDs - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tassertMessages := func(timeout time.Duration, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// assert for messages\n\t\t\tfor _, m := range messageWatchers {\n\t\t\t\tif !m.Assert(ctx, 25*timeout) {\n\t\t\t\t\tctx.Errorf(\"SNSSQSMultipleSubsDifferentConsumerIDs - message assertion failed for watcher: %#v\\n\", m)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tflow.New(t, \"SNSSQS certification - single publisher and multiple subscribers with different consumer IDs\").\n\n\t\t// Run subscriberApplication app1\n\t\tStep(app.Run(appID1, fmt.Sprintf(\":%d\", appPort),\n\t\t\tsubscriberApplication(appID1, topicActiveName, consumerGroup1))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_1\"\n\t\tStep(sidecar.Run(sidecarName1,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_one\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort)),\n\t\t\t)...,\n\t\t)).\n\n\t\t// Run subscriberApplication app2\n\t\tStep(app.Run(appID2, fmt.Sprintf(\":%d\", appPort+portOffset),\n\t\t\tsubscriberApplication(appID2, topicActiveName, consumerGroup2))).\n\n\t\t// RRun the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_2\"\n\t\tStep(sidecar.Run(sidecarName2,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_two\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"publish messages to ==>\"+topicActiveName, publishMessages(metadata, sidecarName1, topicActiveName, consumerGroup1)).\n\t\tStep(\"verify if app1, app2 together have recevied messages published to topic1\", assertMessages(10*time.Second, consumerGroup1)).\n\t\tStep(\"reset\", flow.Reset(consumerGroup1, consumerGroup2)).\n\t\tRun()\n}", "func TestMultiRangeEmptyAfterTruncate(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\ts, db := setupMultipleRanges(t, \"c\", \"d\")\n\tdefer s.Stop()\n\n\t// Delete the keys within a transaction. Implicitly, the intents are\n\t// resolved via ResolveIntentRange upon completion.\n\tif err := db.Txn(func(txn *client.Txn) error {\n\t\tb := &client.Batch{}\n\t\tb.DelRange(\"a\", \"b\")\n\t\tb.DelRange(\"e\", \"f\")\n\t\tb.DelRange(keys.LocalMax, roachpb.KeyMax)\n\t\treturn txn.CommitInBatch(b)\n\t}); err != nil {\n\t\tt.Fatalf(\"unexpected error on transactional DeleteRange: %s\", err)\n\t}\n}", "func (subscription *NamespacesTopicsSubscription) createValidations() []func() (admission.Warnings, error) {\n\treturn []func() (admission.Warnings, error){subscription.validateResourceReferences}\n}", "func verifyReceiverLifecycle(t *testing.T, factory component.ReceiverFactory, getConfigFn getReceiverConfigFn) {\n\tctx := context.Background()\n\thost := newAssertNoErrorHost(t)\n\treceiverCreateSet := componenttest.NewNopReceiverCreateSettings()\n\n\tif getConfigFn == nil {\n\t\tgetConfigFn = factory.CreateDefaultConfig\n\t}\n\n\tcreateFns := []createReceiverFn{\n\t\twrapCreateLogsRcvr(factory),\n\t\twrapCreateTracesRcvr(factory),\n\t\twrapCreateMetricsRcvr(factory),\n\t}\n\n\tfor _, createFn := range createFns {\n\t\tfirstRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\tif errors.Is(err, componenterror.ErrDataTypeIsNotSupported) {\n\t\t\tcontinue\n\t\t}\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, firstRcvr.Start(ctx, host))\n\t\trequire.NoError(t, firstRcvr.Shutdown(ctx))\n\n\t\tsecondRcvr, err := createFn(ctx, receiverCreateSet, getConfigFn())\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, secondRcvr.Start(ctx, host))\n\t\trequire.NoError(t, secondRcvr.Shutdown(ctx))\n\t}\n}", "func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeController) error {\n\tvar err error\n\n\t// Read recorded events - wait up to 1 minute to get all the expected ones\n\t// (just in case some goroutines are slower with writing)\n\ttimer := time.NewTimer(time.Minute)\n\tdefer timer.Stop()\n\n\tfakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder)\n\tgotEvents := []string{}\n\tfinished := false\n\tfor len(gotEvents) < len(expectedEvents) && !finished {\n\t\tselect {\n\t\tcase event, ok := <-fakeRecorder.Events:\n\t\t\tif ok {\n\t\t\t\tglog.V(5).Infof(\"event recorder got event %s\", event)\n\t\t\t\tgotEvents = append(gotEvents, event)\n\t\t\t} else {\n\t\t\t\tglog.V(5).Infof(\"event recorder finished\")\n\t\t\t\tfinished = true\n\t\t\t}\n\t\tcase _, _ = <-timer.C:\n\t\t\tglog.V(5).Infof(\"event recorder timeout\")\n\t\t\tfinished = true\n\t\t}\n\t}\n\n\t// Evaluate the events\n\tfor i, expected := range expectedEvents {\n\t\tif len(gotEvents) <= i {\n\t\t\tt.Errorf(\"Event %q not emitted\", expected)\n\t\t\terr = fmt.Errorf(\"Events do not match\")\n\t\t\tcontinue\n\t\t}\n\t\treceived := gotEvents[i]\n\t\tif !strings.HasPrefix(received, expected) {\n\t\t\tt.Errorf(\"Unexpected event received, expected %q, got %q\", expected, received)\n\t\t\terr = fmt.Errorf(\"Events do not match\")\n\t\t}\n\t}\n\tfor i := len(expectedEvents); i < len(gotEvents); i++ {\n\t\tt.Errorf(\"Unexpected event received: %q\", gotEvents[i])\n\t\terr = fmt.Errorf(\"Events do not match\")\n\t}\n\treturn err\n}", "func (_mr *MockOptionsMockRecorder) SetSegmentReaderPool(arg0 interface{}) *gomock.Call {\n\treturn _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, \"SetSegmentReaderPool\", reflect.TypeOf((*MockOptions)(nil).SetSegmentReaderPool), arg0)\n}", "func TestMultiRoutineAccessWithDelay_logger(t *testing.T) {\n\t// storing address of *Logger instance for validation.\n\tmsgArray := make([]string, 0)\n\n\t// wait-group to make sure a certain number of go-routine(s)\n\t// has finished its task.\n\tvar wgroup sync.WaitGroup\n\n\tfor i := 0; i < 10; i++ {\n\t\t// updates the wait-group counter.\n\t\twgroup.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\t// decreses the wait-group counter by 1.\n\t\t\t// When the counter returns to 0, the wait-group will end the \"wait\".\n\t\t\tdefer wgroup.Done()\n\n\t\t\t// add a random delay to simulate multi access.\n\t\t\ttime.Sleep(time.Millisecond * time.Duration(rand.Int63n(1000)))\n\n\t\t\tlog := logger.GetLogger()\n\t\t\t// append the address value of instance \"log\"\n\t\t\tlock.Lock()\n\t\t\tmsgArray = append(msgArray, fmt.Sprintf(\"%p\", log))\n\t\t\tlock.Unlock()\n\t\t\tlog.Log(fmt.Sprintf(\"[with delay] this is a log entry from [%v]\\n\", idx))\n\t\t}(i)\n\t}\n\twgroup.Wait()\n\n\t// verification\n\tif len(msgArray) == 0 {\n\t\tt.Fatalf(\"expect to have a least one message\")\n\t}\n\taddrLine := msgArray[0]\n\tfor i := 1; i < len(msgArray); i++ {\n\t\tline := msgArray[i]\n\t\tif addrLine != line {\n\t\t\tt.Errorf(\"expect both lines (addresses of Logger) should be identical, [%v] vs [%v]\\n\", addrLine, line)\n\t\t}\n\t}\n}", "func WatchEventSequenceVerifier(ctx context.Context, dc dynamic.Interface, resourceType schema.GroupVersionResource, namespace string, resourceName string, listOptions metav1.ListOptions, expectedWatchEvents []watch.Event, scenario func(*watchtools.RetryWatcher) []watch.Event, retryCleanup func() error) {\n\tlistWatcher := &cache.ListWatch{\n\t\tWatchFunc: func(listOptions metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn dc.Resource(resourceType).Namespace(namespace).Watch(ctx, listOptions)\n\t\t},\n\t}\n\n\tretries := 3\nretriesLoop:\n\tfor try := 1; try <= retries; try++ {\n\t\tinitResource, err := dc.Resource(resourceType).Namespace(namespace).List(ctx, listOptions)\n\t\tExpectNoError(err, \"Failed to fetch initial resource\")\n\n\t\tresourceWatch, err := watchtools.NewRetryWatcher(initResource.GetResourceVersion(), listWatcher)\n\t\tExpectNoError(err, \"Failed to create a resource watch of %v in namespace %v\", resourceType.Resource, namespace)\n\n\t\t// NOTE the test may need access to the events to see what's going on, such as a change in status\n\t\tactualWatchEvents := scenario(resourceWatch)\n\t\terrs := sets.NewString()\n\t\tgomega.Expect(len(expectedWatchEvents)).To(gomega.BeNumerically(\"<=\", len(actualWatchEvents)), \"Did not get enough watch events\")\n\n\t\ttotalValidWatchEvents := 0\n\t\tfoundEventIndexes := map[int]*int{}\n\n\t\tfor watchEventIndex, expectedWatchEvent := range expectedWatchEvents {\n\t\t\tfoundExpectedWatchEvent := false\n\t\tactualWatchEventsLoop:\n\t\t\tfor actualWatchEventIndex, actualWatchEvent := range actualWatchEvents {\n\t\t\t\tif foundEventIndexes[actualWatchEventIndex] != nil {\n\t\t\t\t\tcontinue actualWatchEventsLoop\n\t\t\t\t}\n\t\t\t\tif actualWatchEvent.Type == expectedWatchEvent.Type {\n\t\t\t\t\tfoundExpectedWatchEvent = true\n\t\t\t\t\tfoundEventIndexes[actualWatchEventIndex] = &watchEventIndex\n\t\t\t\t\tbreak actualWatchEventsLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !foundExpectedWatchEvent {\n\t\t\t\terrs.Insert(fmt.Sprintf(\"Watch event %v not found\", expectedWatchEvent.Type))\n\t\t\t}\n\t\t\ttotalValidWatchEvents++\n\t\t}\n\t\terr = retryCleanup()\n\t\tExpectNoError(err, \"Error occurred when cleaning up resources\")\n\t\tif errs.Len() > 0 && try < retries {\n\t\t\tfmt.Println(\"invariants violated:\\n\", strings.Join(errs.List(), \"\\n - \"))\n\t\t\tcontinue retriesLoop\n\t\t}\n\t\tif errs.Len() > 0 {\n\t\t\tFailf(\"Unexpected error(s): %v\", strings.Join(errs.List(), \"\\n - \"))\n\t\t}\n\t\tExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), \"Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)\", totalValidWatchEvents, len(expectedWatchEvents))\n\t\tbreak retriesLoop\n\t}\n}", "func (h MultiStakingHooks) AfterValidatorBonded(ctx sdk.Context, consAddr sdk.ConsAddress, valAddr sdk.ValAddress) {\n\tfor i := range h {\n\t\th[i].AfterValidatorBonded(ctx, consAddr, valAddr)\n\t}\n}", "func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConfigRange *hcl.Range) (diags hcl.Diagnostics) {\n\tmod := cfg.Module\n\n\tfor name, child := range cfg.Children {\n\t\tmc := mod.ModuleCalls[name]\n\t\tchildNoProviderConfigRange := noProviderConfigRange\n\t\t// if the module call has any of count, for_each or depends_on,\n\t\t// providers are prohibited from being configured in this module, or\n\t\t// any module beneath this module.\n\t\tswitch {\n\t\tcase mc.Count != nil:\n\t\t\tchildNoProviderConfigRange = mc.Count.Range().Ptr()\n\t\tcase mc.ForEach != nil:\n\t\t\tchildNoProviderConfigRange = mc.ForEach.Range().Ptr()\n\t\tcase mc.DependsOn != nil:\n\t\t\tif len(mc.DependsOn) > 0 {\n\t\t\t\tchildNoProviderConfigRange = mc.DependsOn[0].SourceRange().Ptr()\n\t\t\t} else {\n\t\t\t\t// Weird! We'll just use the call itself, then.\n\t\t\t\tchildNoProviderConfigRange = mc.DeclRange.Ptr()\n\t\t\t}\n\t\t}\n\t\tdiags = append(diags, validateProviderConfigs(mc, child, childNoProviderConfigRange)...)\n\t}\n\n\t// the set of provider configuration names passed into the module, with the\n\t// source range of the provider assignment in the module call.\n\tpassedIn := map[string]PassedProviderConfig{}\n\n\t// the set of empty configurations that could be proxy configurations, with\n\t// the source range of the empty configuration block.\n\temptyConfigs := map[string]hcl.Range{}\n\n\t// the set of provider with a defined configuration, with the source range\n\t// of the configuration block declaration.\n\tconfigured := map[string]hcl.Range{}\n\n\t// the set of configuration_aliases defined in the required_providers\n\t// block, with the fully qualified provider type.\n\tconfigAliases := map[string]addrs.AbsProviderConfig{}\n\n\t// the set of provider names defined in the required_providers block, and\n\t// their provider types.\n\tlocalNames := map[string]addrs.Provider{}\n\n\tfor _, pc := range mod.ProviderConfigs {\n\t\tname := providerName(pc.Name, pc.Alias)\n\t\t// Validate the config against an empty schema to see if it's empty.\n\t\t_, pcConfigDiags := pc.Config.Content(&hcl.BodySchema{})\n\t\tif pcConfigDiags.HasErrors() || pc.Version.Required != nil {\n\t\t\tconfigured[name] = pc.DeclRange\n\t\t} else {\n\t\t\temptyConfigs[name] = pc.DeclRange\n\t\t}\n\t}\n\n\tif mod.ProviderRequirements != nil {\n\t\tfor _, req := range mod.ProviderRequirements.RequiredProviders {\n\t\t\tlocalNames[req.Name] = req.Type\n\t\t\tfor _, alias := range req.Aliases {\n\t\t\t\taddr := addrs.AbsProviderConfig{\n\t\t\t\t\tModule: cfg.Path,\n\t\t\t\t\tProvider: req.Type,\n\t\t\t\t\tAlias: alias.Alias,\n\t\t\t\t}\n\t\t\t\tconfigAliases[providerName(alias.LocalName, alias.Alias)] = addr\n\t\t\t}\n\t\t}\n\t}\n\n\t// collect providers passed from the parent\n\tif parentCall != nil {\n\t\tfor _, passed := range parentCall.Providers {\n\t\t\tname := providerName(passed.InChild.Name, passed.InChild.Alias)\n\t\t\tpassedIn[name] = passed\n\t\t}\n\t}\n\n\tparentModuleText := \"the root module\"\n\tmoduleText := \"the root module\"\n\tif !cfg.Path.IsRoot() {\n\t\tmoduleText = cfg.Path.String()\n\t\tif parent := cfg.Path.Parent(); !parent.IsRoot() {\n\t\t\t// module address are prefixed with `module.`\n\t\t\tparentModuleText = parent.String()\n\t\t}\n\t}\n\n\t// Verify that any module calls only refer to named providers, and that\n\t// those providers will have a configuration at runtime. This way we can\n\t// direct users where to add the missing configuration, because the runtime\n\t// error is only \"missing provider X\".\n\tfor _, modCall := range mod.ModuleCalls {\n\t\tfor _, passed := range modCall.Providers {\n\t\t\t// aliased providers are handled more strictly, and are never\n\t\t\t// inherited, so they are validated within modules further down.\n\t\t\t// Skip these checks to prevent redundant diagnostics.\n\t\t\tif passed.InParent.Alias != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tname := passed.InParent.String()\n\t\t\t_, confOK := configured[name]\n\t\t\t_, localOK := localNames[name]\n\t\t\t_, passedOK := passedIn[name]\n\n\t\t\t// This name was not declared somewhere within in the\n\t\t\t// configuration. We ignore empty configs, because they will\n\t\t\t// already produce a warning.\n\t\t\tif !(confOK || localOK) {\n\t\t\t\tdefAddr := addrs.NewDefaultProvider(name)\n\t\t\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\t\t\tSeverity: hcl.DiagWarning,\n\t\t\t\t\tSummary: \"Reference to undefined provider\",\n\t\t\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\t\t\"There is no explicit declaration for local provider name %q in %s, so Terraform is assuming you mean to pass a configuration for provider %q.\\n\\nTo clarify your intent and silence this warning, add to %s a required_providers entry named %q with source = %q, or a different source address if appropriate.\",\n\t\t\t\t\t\tname, moduleText, defAddr.ForDisplay(),\n\t\t\t\t\t\tparentModuleText, name, defAddr.ForDisplay(),\n\t\t\t\t\t),\n\t\t\t\t\tSubject: &passed.InParent.NameRange,\n\t\t\t\t})\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Now we may have named this provider within the module, but\n\t\t\t// there won't be a configuration available at runtime if the\n\t\t\t// parent module did not pass one in.\n\t\t\tif !cfg.Path.IsRoot() && !(confOK || passedOK) {\n\t\t\t\tdefAddr := addrs.NewDefaultProvider(name)\n\t\t\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\t\t\tSeverity: hcl.DiagWarning,\n\t\t\t\t\tSummary: \"Missing required provider configuration\",\n\t\t\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\t\t\"The configuration for %s expects to inherit a configuration for provider %s with local name %q, but %s doesn't pass a configuration under that name.\\n\\nTo satisfy this requirement, add an entry for %q to the \\\"providers\\\" argument in the module %q block.\",\n\t\t\t\t\t\tmoduleText, defAddr.ForDisplay(), name, parentModuleText,\n\t\t\t\t\t\tname, parentCall.Name,\n\t\t\t\t\t),\n\t\t\t\t\tSubject: parentCall.DeclRange.Ptr(),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tif cfg.Path.IsRoot() {\n\t\t// nothing else to do in the root module\n\t\treturn diags\n\t}\n\n\t// there cannot be any configurations if no provider config is allowed\n\tif len(configured) > 0 && noProviderConfigRange != nil {\n\t\t// We report this from the perspective of the use of count, for_each,\n\t\t// or depends_on rather than from inside the module, because the\n\t\t// recipient of this message is more likely to be the author of the\n\t\t// calling module (trying to use an older module that hasn't been\n\t\t// updated yet) than of the called module.\n\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\tSeverity: hcl.DiagError,\n\t\t\tSummary: \"Module is incompatible with count, for_each, and depends_on\",\n\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\"The module at %s is a legacy module which contains its own local provider configurations, and so calls to it may not use the count, for_each, or depends_on arguments.\\n\\nIf you also control the module %q, consider updating this module to instead expect provider configurations to be passed by its caller.\",\n\t\t\t\tcfg.Path, cfg.SourceAddr,\n\t\t\t),\n\t\t\tSubject: noProviderConfigRange,\n\t\t})\n\t}\n\n\t// now check that the user is not attempting to override a config\n\tfor name := range configured {\n\t\tif passed, ok := passedIn[name]; ok {\n\t\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\t\tSeverity: hcl.DiagError,\n\t\t\t\tSummary: \"Cannot override provider configuration\",\n\t\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\t\"The configuration of %s has its own local configuration for %s, and so it cannot accept an overridden configuration provided by %s.\",\n\t\t\t\t\tmoduleText, name, parentModuleText,\n\t\t\t\t),\n\t\t\t\tSubject: &passed.InChild.NameRange,\n\t\t\t})\n\t\t}\n\t}\n\n\t// A declared alias requires either a matching configuration within the\n\t// module, or one must be passed in.\n\tfor name, providerAddr := range configAliases {\n\t\t_, confOk := configured[name]\n\t\t_, passedOk := passedIn[name]\n\n\t\tif confOk || passedOk {\n\t\t\tcontinue\n\t\t}\n\n\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\tSeverity: hcl.DiagError,\n\t\t\tSummary: \"Missing required provider configuration\",\n\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\"The child module requires an additional configuration for provider %s, with the local name %q.\\n\\nRefer to the module's documentation to understand the intended purpose of this additional provider configuration, and then add an entry for %s in the \\\"providers\\\" meta-argument in the module block to choose which provider configuration the module should use for that purpose.\",\n\t\t\t\tproviderAddr.Provider.ForDisplay(), name,\n\t\t\t\tname,\n\t\t\t),\n\t\t\tSubject: &parentCall.DeclRange,\n\t\t})\n\t}\n\n\t// You cannot pass in a provider that cannot be used\n\tfor name, passed := range passedIn {\n\t\tchildTy := passed.InChild.providerType\n\t\t// get a default type if there was none set\n\t\tif childTy.IsZero() {\n\t\t\t// This means the child module is only using an inferred\n\t\t\t// provider type. We allow this but will generate a warning to\n\t\t\t// declare provider_requirements below.\n\t\t\tchildTy = addrs.NewDefaultProvider(passed.InChild.Name)\n\t\t}\n\n\t\tproviderAddr := addrs.AbsProviderConfig{\n\t\t\tModule: cfg.Path,\n\t\t\tProvider: childTy,\n\t\t\tAlias: passed.InChild.Alias,\n\t\t}\n\n\t\tlocalAddr, localName := localNames[name]\n\t\tif localName {\n\t\t\tproviderAddr.Provider = localAddr\n\t\t}\n\n\t\taliasAddr, configAlias := configAliases[name]\n\t\tif configAlias {\n\t\t\tproviderAddr = aliasAddr\n\t\t}\n\n\t\t_, emptyConfig := emptyConfigs[name]\n\n\t\tif !(localName || configAlias || emptyConfig) {\n\n\t\t\t// we still allow default configs, so switch to a warning if the incoming provider is a default\n\t\t\tif providerAddr.Provider.IsDefault() {\n\t\t\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\t\t\tSeverity: hcl.DiagWarning,\n\t\t\t\t\tSummary: \"Reference to undefined provider\",\n\t\t\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\t\t\"There is no explicit declaration for local provider name %q in %s, so Terraform is assuming you mean to pass a configuration for %q.\\n\\nIf you also control the child module, add a required_providers entry named %q with the source address %q.\",\n\t\t\t\t\t\tname, moduleText, providerAddr.Provider.ForDisplay(),\n\t\t\t\t\t\tname, providerAddr.Provider.ForDisplay(),\n\t\t\t\t\t),\n\t\t\t\t\tSubject: &passed.InChild.NameRange,\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\t\t\tSeverity: hcl.DiagError,\n\t\t\t\t\tSummary: \"Reference to undefined provider\",\n\t\t\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\t\t\"The child module does not declare any provider requirement with the local name %q.\\n\\nIf you also control the child module, you can add a required_providers entry named %q with the source address %q to accept this provider configuration.\",\n\t\t\t\t\t\tname, name, providerAddr.Provider.ForDisplay(),\n\t\t\t\t\t),\n\t\t\t\t\tSubject: &passed.InChild.NameRange,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\t// The provider being passed in must also be of the correct type.\n\t\tpTy := passed.InParent.providerType\n\t\tif pTy.IsZero() {\n\t\t\t// While we would like to ensure required_providers exists here,\n\t\t\t// implied default configuration is still allowed.\n\t\t\tpTy = addrs.NewDefaultProvider(passed.InParent.Name)\n\t\t}\n\n\t\t// use the full address for a nice diagnostic output\n\t\tparentAddr := addrs.AbsProviderConfig{\n\t\t\tModule: cfg.Parent.Path,\n\t\t\tProvider: pTy,\n\t\t\tAlias: passed.InParent.Alias,\n\t\t}\n\n\t\tif cfg.Parent.Module.ProviderRequirements != nil {\n\t\t\treq, defined := cfg.Parent.Module.ProviderRequirements.RequiredProviders[name]\n\t\t\tif defined {\n\t\t\t\tparentAddr.Provider = req.Type\n\t\t\t}\n\t\t}\n\n\t\tif !providerAddr.Provider.Equals(parentAddr.Provider) {\n\t\t\t// If this module declares the same source address for a different\n\t\t\t// local name then we'll prefer to suggest changing to match\n\t\t\t// the child module's chosen name, assuming that it was the local\n\t\t\t// name that was wrong rather than the source address.\n\t\t\tvar otherLocalName string\n\t\t\tfor localName, sourceAddr := range localNames {\n\t\t\t\tif sourceAddr.Equals(parentAddr.Provider) {\n\t\t\t\t\totherLocalName = localName\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst errSummary = \"Provider type mismatch\"\n\t\t\tif otherLocalName != \"\" {\n\t\t\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\t\t\tSeverity: hcl.DiagError,\n\t\t\t\t\tSummary: errSummary,\n\t\t\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\t\t\"The assigned configuration is for provider %q, but local name %q in %s represents %q.\\n\\nTo pass this configuration to the child module, use the local name %q instead.\",\n\t\t\t\t\t\tparentAddr.Provider.ForDisplay(), passed.InChild.Name,\n\t\t\t\t\t\tparentModuleText, providerAddr.Provider.ForDisplay(),\n\t\t\t\t\t\totherLocalName,\n\t\t\t\t\t),\n\t\t\t\t\tSubject: &passed.InChild.NameRange,\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\t// If there is no declared requirement for the provider the\n\t\t\t\t// caller is trying to pass under any name then we'll instead\n\t\t\t\t// report it as an unsuitable configuration to pass into the\n\t\t\t\t// child module's provider configuration slot.\n\t\t\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\t\t\tSeverity: hcl.DiagError,\n\t\t\t\t\tSummary: errSummary,\n\t\t\t\t\tDetail: fmt.Sprintf(\n\t\t\t\t\t\t\"The local name %q in %s represents provider %q, but %q in %s represents %q.\\n\\nEach provider has its own distinct configuration schema and provider types, so this module's %q can be assigned only a configuration for %s, which is not required by %s.\",\n\t\t\t\t\t\tpassed.InParent, parentModuleText, parentAddr.Provider.ForDisplay(),\n\t\t\t\t\t\tpassed.InChild, moduleText, providerAddr.Provider.ForDisplay(),\n\t\t\t\t\t\tpassed.InChild, providerAddr.Provider.ForDisplay(),\n\t\t\t\t\t\tmoduleText,\n\t\t\t\t\t),\n\t\t\t\t\tSubject: passed.InParent.NameRange.Ptr(),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\t// Empty configurations are no longer needed. Since the replacement for\n\t// this calls for one entry per provider rather than one entry per\n\t// provider _configuration_, we'll first gather them up by provider\n\t// and then report a single warning for each, whereby we can show a direct\n\t// example of what the replacement should look like.\n\ttype ProviderReqSuggestion struct {\n\t\tSourceAddr addrs.Provider\n\t\tSourceRanges []hcl.Range\n\t\tRequiredConfigs []string\n\t\tAliasCount int\n\t}\n\tproviderReqSuggestions := make(map[string]*ProviderReqSuggestion)\n\tfor name, src := range emptyConfigs {\n\t\tproviderLocalName := name\n\t\tif idx := strings.IndexByte(providerLocalName, '.'); idx >= 0 {\n\t\t\tproviderLocalName = providerLocalName[:idx]\n\t\t}\n\n\t\tsourceAddr, ok := localNames[name]\n\t\tif !ok {\n\t\t\tsourceAddr = addrs.NewDefaultProvider(providerLocalName)\n\t\t}\n\n\t\tsuggestion := providerReqSuggestions[providerLocalName]\n\t\tif suggestion == nil {\n\t\t\tproviderReqSuggestions[providerLocalName] = &ProviderReqSuggestion{\n\t\t\t\tSourceAddr: sourceAddr,\n\t\t\t}\n\t\t\tsuggestion = providerReqSuggestions[providerLocalName]\n\t\t}\n\n\t\tif providerLocalName != name {\n\t\t\t// It's an aliased provider config, then.\n\t\t\tsuggestion.AliasCount++\n\t\t}\n\n\t\tsuggestion.RequiredConfigs = append(suggestion.RequiredConfigs, name)\n\t\tsuggestion.SourceRanges = append(suggestion.SourceRanges, src)\n\t}\n\tfor name, suggestion := range providerReqSuggestions {\n\t\tvar buf strings.Builder\n\n\t\tfmt.Fprintf(\n\t\t\t&buf,\n\t\t\t\"Earlier versions of Terraform used empty provider blocks (\\\"proxy provider configurations\\\") for child modules to declare their need to be passed a provider configuration by their callers. That approach was ambiguous and is now deprecated.\\n\\nIf you control this module, you can migrate to the new declaration syntax by removing all of the empty provider %q blocks and then adding or updating an entry like the following to the required_providers block of %s:\\n\",\n\t\t\tname, moduleText,\n\t\t)\n\t\tfmt.Fprintf(&buf, \" %s = {\\n\", name)\n\t\tfmt.Fprintf(&buf, \" source = %q\\n\", suggestion.SourceAddr.ForDisplay())\n\t\tif suggestion.AliasCount > 0 {\n\t\t\t// A lexical sort is fine because all of these strings are\n\t\t\t// guaranteed to start with the same provider local name, and\n\t\t\t// so we're only really sorting by the alias part.\n\t\t\tsort.Strings(suggestion.RequiredConfigs)\n\t\t\tfmt.Fprintln(&buf, \" configuration_aliases = [\")\n\t\t\tfor _, addrStr := range suggestion.RequiredConfigs {\n\t\t\t\tfmt.Fprintf(&buf, \" %s,\\n\", addrStr)\n\t\t\t}\n\t\t\tfmt.Fprintln(&buf, \" ]\")\n\n\t\t}\n\t\tfmt.Fprint(&buf, \" }\")\n\n\t\t// We're arbitrarily going to just take the one source range that\n\t\t// sorts earliest here. Multiple should be rare, so this is only to\n\t\t// ensure that we produce a deterministic result in the edge case.\n\t\tsort.Slice(suggestion.SourceRanges, func(i, j int) bool {\n\t\t\treturn suggestion.SourceRanges[i].String() < suggestion.SourceRanges[j].String()\n\t\t})\n\t\tdiags = append(diags, &hcl.Diagnostic{\n\t\t\tSeverity: hcl.DiagWarning,\n\t\t\tSummary: \"Redundant empty provider block\",\n\t\t\tDetail: buf.String(),\n\t\t\tSubject: suggestion.SourceRanges[0].Ptr(),\n\t\t})\n\t}\n\n\treturn diags\n}", "func TestManyValidatorChangesSaveLoad(t *testing.T) {\n\tconst valSetSize = 7\n\ttearDown, stateDB, state := setupTestCase(t)\n\trequire.Equal(t, int64(0), state.LastBlockHeight)\n\tstate.Validators = genValSet(valSetSize)\n\tstate.NextValidators = state.Validators.CopyIncrementProposerPriority(1)\n\tSaveState(stateDB, state)\n\tdefer tearDown(t)\n\n\t_, valOld := state.Validators.GetByIndex(0)\n\tvar pubkeyOld = valOld.PubKey\n\tpubkey := ed25519.GenPrivKey().PubKey()\n\tconst height = 1\n\n\t// Swap the first validator with a new one (validator set size stays the same).\n\theader, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey)\n\n\t// Save state etc.\n\tvar err error\n\tvar validatorUpdates []*types.Validator\n\tvalidatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates)\n\trequire.NoError(t, err)\n\tstate, err = updateState(state, blockID, &header, responses, validatorUpdates)\n\trequire.Nil(t, err)\n\tnextHeight := state.LastBlockHeight + 1\n\tsaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators)\n\n\t// Load nextheight, it should be the oldpubkey.\n\tv0, err := LoadValidators(stateDB, nextHeight)\n\tassert.Nil(t, err)\n\tassert.Equal(t, valSetSize, v0.Size())\n\tindex, val := v0.GetByAddress(pubkeyOld.Address())\n\tassert.NotNil(t, val)\n\tif index < 0 {\n\t\tt.Fatal(\"expected to find old validator\")\n\t}\n\n\t// Load nextheight+1, it should be the new pubkey.\n\tv1, err := LoadValidators(stateDB, nextHeight+1)\n\tassert.Nil(t, err)\n\tassert.Equal(t, valSetSize, v1.Size())\n\tindex, val = v1.GetByAddress(pubkey.Address())\n\tassert.NotNil(t, val)\n\tif index < 0 {\n\t\tt.Fatal(\"expected to find newly added validator\")\n\t}\n}", "func TestCallbackAddRemove(t *testing.T) {\n\tr := New()\n\ti := r.CreateInput(1)\n\tc := r.CreateCompute1(i, func(v int) int { return v + 1 })\n\tvar observed1 []int\n\tcb1 := c.AddCallback(func(v int) {\n\t\tobserved1 = append(observed1, v)\n\t})\n\tvar observed2 []int\n\tc.AddCallback(func(v int) {\n\t\tobserved2 = append(observed2, v)\n\t})\n\ti.SetValue(2)\n\tif len(observed1) != 1 || observed1[0] != 3 {\n\t\tt.Fatalf(\"observed1 not properly called\")\n\t}\n\tif len(observed2) != 1 || observed2[0] != 3 {\n\t\tt.Fatalf(\"observed2 not properly called\")\n\t}\n\tc.RemoveCallback(cb1)\n\ti.SetValue(3)\n\tif len(observed1) != 1 {\n\t\tt.Fatalf(\"observed1 called after removal\")\n\t}\n\tif len(observed2) != 2 || observed2[1] != 4 {\n\t\tt.Fatalf(\"observed2 not properly called after first callback removal\")\n\t}\n}", "func (mr *MockFullNodeMockRecorder) MpoolSelects(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"MpoolSelects\", reflect.TypeOf((*MockFullNode)(nil).MpoolSelects), arg0, arg1, arg2)\n}", "func (m *MockLoadBalance) EnsureListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestBatchOnUploadEventFailure(t *testing.T) {\n\tbatch := Batch{&mocks.FailingWriter{}}\n\terr := batch.OnUploadEvent(&spec.Measurement{\n\t\tElapsed: 1.0,\n\t})\n\tif err != mocks.ErrMocked {\n\t\tt.Fatal(\"Not the error we expected\")\n\t}\n}", "func (mr *MockProviderMockRecorder) OnEndpointsSynced() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"OnEndpointsSynced\", reflect.TypeOf((*MockProvider)(nil).OnEndpointsSynced))\n}", "func TestMultiRangeEmptyAfterTruncate(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\ts, _, _ := serverutils.StartServer(t, base.TestServerArgs{})\n\tdefer s.Stopper().Stop()\n\tdb := setupMultipleRanges(t, s, \"c\", \"d\")\n\n\t// Delete the keys within a transaction. The range [c,d) doesn't have\n\t// any active requests.\n\tif err := db.Txn(context.TODO(), func(txn *client.Txn) error {\n\t\tb := txn.NewBatch()\n\t\tb.DelRange(\"a\", \"b\", false)\n\t\tb.DelRange(\"e\", \"f\", false)\n\t\treturn txn.CommitInBatch(b)\n\t}); err != nil {\n\t\tt.Fatalf(\"unexpected error on transactional DeleteRange: %s\", err)\n\t}\n}", "func TestCallbackInvokedWhenSetEarly(t *testing.T) {\n\twg := &sync.WaitGroup{}\n\twg.Add(1)\n\tapp := blockedABCIApplication{\n\t\twg: wg,\n\t}\n\t_, c := setupClientServer(t, app)\n\treqRes := c.CheckTxAsync(types.RequestCheckTx{})\n\n\tdone := make(chan struct{})\n\tcb := func(_ *types.Response) {\n\t\tclose(done)\n\t}\n\treqRes.SetCallback(cb)\n\tapp.wg.Done()\n\n\tcalled := func() bool {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\trequire.Eventually(t, called, time.Second, time.Millisecond*25)\n}", "func TestValidatorSMDestroyValidatorUnbonding2UnBonded2Removed(t *testing.T) {\n\n\t_, _, mk := CreateTestInput(t, false, SufficientInitPower)\n\tparams := DefaultParams()\n\n\toriginVaSet := addrVals[1:]\n\tparams.MaxValidators = uint16(len(originVaSet))\n\tparams.Epoch = 1\n\tparams.UnbondingTime = time.Millisecond * 300\n\n\tstartUpValidator := NewValidator(StartUpValidatorAddr, StartUpValidatorPubkey, Description{}, types.DefaultMinSelfDelegation)\n\n\tstartUpStatus := baseValidatorStatus{startUpValidator}\n\n\torgValsLen := len(originVaSet)\n\tfullVaSet := make([]sdk.ValAddress, orgValsLen+1)\n\tcopy(fullVaSet, originVaSet)\n\tcopy(fullVaSet[orgValsLen:], []sdk.ValAddress{startUpStatus.getValidator().GetOperator()})\n\n\tbAction := baseAction{mk}\n\tinputActions := []IAction{\n\t\tcreateValidatorAction{bAction, nil},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsAddSharesAction{bAction, false, true, 0, nil},\n\t\tendBlockAction{bAction},\n\t\tdestroyValidatorAction{bAction},\n\t\tendBlockAction{bAction},\n\n\t\t// first unbonding time pass, delegator shares left, validator unbonding --> unbonded\n\t\twaitUntilUnbondingTimeExpired{bAction},\n\t\tendBlockAction{bAction},\n\n\t\t// delegators unbond all tokens back, validator has no msd & delegator shares now, delegator removed\n\t\tdelegatorsWithdrawAction{bAction, true, true},\n\t}\n\n\texpZeroDec := sdk.ZeroDec()\n\tdlgAddSharesCheck1 := andChecker{[]actResChecker{\n\t\tvalidatorDelegatorShareIncreased(true),\n\t\tvalidatorRemoved(false),\n\t\tvalidatorDelegatorShareLeft(true),\n\t\tvalidatorStatusChecker(sdk.Bonded.String()),\n\t}}\n\n\tafterUnbondingTimeExpiredCheck1 := andChecker{[]actResChecker{\n\t\tvalidatorRemoved(false),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t}}\n\n\tdlgUnbondCheck2 := andChecker{[]actResChecker{\n\t\tnoErrorInHandlerResult(true),\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tvalidatorRemoved(true),\n\t\tqueryDelegatorCheck(ValidDelegator1, false, nil, nil, &expZeroDec, nil),\n\t}}\n\n\tactionsAndChecker := []actResChecker{\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tqueryValidatorCheck(sdk.Bonded, false, &SharesFromDefaultMSD, &startUpValidator.MinSelfDelegation, nil),\n\t\tdlgAddSharesCheck1.GetChecker(),\n\t\tnil,\n\t\tqueryValidatorCheck(sdk.Bonded, true, nil, &expZeroDec, nil),\n\t\tvalidatorStatusChecker(sdk.Unbonding.String()),\n\t\tvalidatorStatusChecker(sdk.Unbonding.String()),\n\t\tafterUnbondingTimeExpiredCheck1.GetChecker(),\n\t\tdlgUnbondCheck2.GetChecker(),\n\t}\n\n\tsmTestCase := newValidatorSMTestCase(mk, params, startUpStatus, inputActions, actionsAndChecker, t)\n\tsmTestCase.SetupValidatorSetAndDelegatorSet(int(params.MaxValidators))\n\tsmTestCase.printParticipantSnapshot(t)\n\tsmTestCase.Run(t)\n}", "func TestProcessor_StartWithErrorBeforeRebalance(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\ttype TestCase struct {\n\t\tname string\n\t\tevent kafka.Event\n\t}\n\ttests := []TestCase{\n\t\t{\"error\", &kafka.Error{Err: errors.New(\"something\")}},\n\t\t{\"message\", new(kafka.Message)},\n\t\t{\"EOF\", new(kafka.EOF)},\n\t\t{\"BOF\", new(kafka.BOF)},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tconsumer = mock.NewMockConsumer(ctrl)\n\t\t\t\tst = mock.NewMockStorage(ctrl)\n\t\t\t\tsb = func(topic string, par int32) (storage.Storage, error) {\n\t\t\t\t\treturn st, nil\n\t\t\t\t}\n\t\t\t\tfinal = make(chan bool)\n\t\t\t\tch = make(chan kafka.Event)\n\t\t\t\tp = createProcessor(t, ctrl, consumer, 3, sb)\n\t\t\t)\n\n\t\t\tgomock.InOrder(\n\t\t\t\tconsumer.EXPECT().Subscribe(topOff).Return(nil),\n\t\t\t\tconsumer.EXPECT().Events().Return(ch),\n\t\t\t\tconsumer.EXPECT().Close().Do(func() { close(ch) }),\n\t\t\t)\n\t\t\tgo func() {\n\t\t\t\terr = p.Run(context.Background())\n\t\t\t\tensure.NotNil(t, err)\n\t\t\t\tclose(final)\n\t\t\t}()\n\n\t\t\tch <- tc.event\n\n\t\t\terr = doTimed(t, func() {\n\t\t\t\t<-final\n\t\t\t})\n\t\t\tensure.Nil(t, err)\n\t\t})\n\t}\n}", "func SNSSQSMessageVisibilityTimeout(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tmetadata := map[string]string{}\n\tlatch := make(chan struct{})\n\tbusyTime := 10 * time.Second\n\tmessagesToSend := 1\n\twaitForLatch := func(appID string, ctx flow.Context, l chan struct{}) error {\n\t\tctx.Logf(\"waitForLatch %s is waiting...\\n\", appID)\n\t\t<-l\n\t\tctx.Logf(\"waitForLatch %s ready to continue!\\n\", appID)\n\t\treturn nil\n\t}\n\n\tsubscriberMVTimeoutApp := func(appID string, topicName string, messagesWatcher *watcher.Watcher, l chan struct{}) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeout App: %q topicName: %q\\n\", appID, topicName)\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeout App: %q got message: %s busy for %v\\n\", appID, e.Data, busyTime)\n\t\t\t\t\ttime.Sleep(busyTime)\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeout App: %q - notifying next Subscriber to continue...\\n\", appID)\n\t\t\t\t\tl <- struct{}{}\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeoutApp: %q - sent busy for %v\\n\", appID, busyTime)\n\t\t\t\t\ttime.Sleep(busyTime)\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeoutApp: %q - done!\\n\", appID)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tnotExpectingMessagesSubscriberApp := func(appID string, topicName string, messagesWatcher *watcher.Watcher, l chan struct{}) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.notExpectingMessagesSubscriberApp App: %q topicName: %q waiting for notification to start receiving messages\\n\", appID, topicName)\n\t\t\treturn multierr.Combine(\n\t\t\t\twaitForLatch(appID, ctx, l),\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.notExpectingMessagesSubscriberApp App: %q got unexpected message: %s\\n\", appID, e.Data)\n\t\t\t\t\tmessagesWatcher.FailIfNotExpected(t, e.Data)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\ttestTtlPublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, messagesToSend)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\tctx.Logf(\"####### get the sidecar (dapr) client sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"####### Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMessageVisibilityTimeout - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tconnectToSideCar := func(sidecarName string) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\tctx.Logf(\"####### connect to sidecar (dapr) client sidecarName: %s and exit\", sidecarName)\n\t\t\t// get the sidecar (dapr) client\n\t\t\tsidecar.GetClient(ctx, sidecarName)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tflow.New(t, \"SNSSQS certification - messageVisibilityTimeout attribute receive\").\n\t\t// App1 should receive the messages, wait some time (busy), notify App2, wait some time (busy),\n\t\t// and finish processing message.\n\t\tStep(app.Run(appID1, fmt.Sprintf(\":%d\", appPort+portOffset),\n\t\t\tsubscriberMVTimeoutApp(appID1, messageVisibilityTimeoutTopic, consumerGroup1, latch))).\n\t\tStep(sidecar.Run(sidecarName1,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/message_visibility_timeout\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(fmt.Sprintf(\"publish messages to messageVisibilityTimeoutTopic: %s\", messageVisibilityTimeoutTopic),\n\t\t\ttestTtlPublishMessages(metadata, sidecarName1, messageVisibilityTimeoutTopic, consumerGroup1)).\n\n\t\t// App2 waits for App1 notification to subscribe to message\n\t\t// After subscribing, if App2 receives any messages, the messageVisibilityTimeoutTopic is either too short,\n\t\t// or code is broken somehow\n\t\tStep(app.Run(appID2, fmt.Sprintf(\":%d\", appPort+portOffset+2),\n\t\t\tnotExpectingMessagesSubscriberApp(appID2, messageVisibilityTimeoutTopic, consumerGroup1, latch))).\n\t\tStep(sidecar.Run(sidecarName2,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/message_visibility_timeout\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset+2)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset+2)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset+2)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset+2)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"No messages will be sent here\",\n\t\t\tconnectToSideCar(sidecarName2)).\n\t\tStep(\"wait\", flow.Sleep(10*time.Second)).\n\t\tRun()\n\n}", "func (wh *Webhook) InformersHaveSynced() {\n\twh.synced()\n\twh.Logger.Info(\"Informers have been synced, unblocking admission webhooks.\")\n}", "func (setting *MongodbDatabaseCollectionThroughputSetting) updateValidations() []func(old runtime.Object) (admission.Warnings, error) {\n\treturn []func(old runtime.Object) (admission.Warnings, error){\n\t\tfunc(old runtime.Object) (admission.Warnings, error) {\n\t\t\treturn setting.validateResourceReferences()\n\t\t},\n\t\tsetting.validateWriteOnceProperties}\n}", "func checkEvents(t *testing.T, ctx context.Context, expectedEvents []string, ctrl *PersistentVolumeController) error {\n\tvar err error\n\n\t// Read recorded events - wait up to 1 minute to get all the expected ones\n\t// (just in case some goroutines are slower with writing)\n\ttimer := time.NewTimer(time.Minute)\n\tdefer timer.Stop()\n\tlogger := klog.FromContext(ctx)\n\tfakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder)\n\tgotEvents := []string{}\n\tfinished := false\n\tfor len(gotEvents) < len(expectedEvents) && !finished {\n\t\tselect {\n\t\tcase event, ok := <-fakeRecorder.Events:\n\t\t\tif ok {\n\t\t\t\tlogger.V(5).Info(\"Event recorder got event\", \"event\", event)\n\t\t\t\tgotEvents = append(gotEvents, event)\n\t\t\t} else {\n\t\t\t\tlogger.V(5).Info(\"Event recorder finished\")\n\t\t\t\tfinished = true\n\t\t\t}\n\t\tcase _, _ = <-timer.C:\n\t\t\tlogger.V(5).Info(\"Event recorder timeout\")\n\t\t\tfinished = true\n\t\t}\n\t}\n\n\t// Evaluate the events\n\tfor i, expected := range expectedEvents {\n\t\tif len(gotEvents) <= i {\n\t\t\tt.Errorf(\"Event %q not emitted\", expected)\n\t\t\terr = fmt.Errorf(\"events do not match\")\n\t\t\tcontinue\n\t\t}\n\t\treceived := gotEvents[i]\n\t\tif !strings.HasPrefix(received, expected) {\n\t\t\tt.Errorf(\"Unexpected event received, expected %q, got %q\", expected, received)\n\t\t\terr = fmt.Errorf(\"events do not match\")\n\t\t}\n\t}\n\tfor i := len(expectedEvents); i < len(gotEvents); i++ {\n\t\tt.Errorf(\"Unexpected event received: %q\", gotEvents[i])\n\t\terr = fmt.Errorf(\"events do not match\")\n\t}\n\treturn err\n}", "func (s *SegmentationDescriptor) SetSubSegmentsExpected(v int64) *SegmentationDescriptor {\n\ts.SubSegmentsExpected = &v\n\treturn s\n}", "func TestVisitPrefixes(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\ttestData := []struct {\n\t\tstart, end proto.Key\n\t\texpRanges [][2]proto.Key\n\t\texpConfigs []ConfigUnion\n\t}{\n\t\t{proto.KeyMin, proto.KeyMax,\n\t\t\t[][2]proto.Key{\n\t\t\t\t{proto.KeyMin, proto.Key(\"/db1\")},\n\t\t\t\t{proto.Key(\"/db1\"), proto.Key(\"/db1/table\")},\n\t\t\t\t{proto.Key(\"/db1/table\"), proto.Key(\"/db1/tablf\")},\n\t\t\t\t{proto.Key(\"/db1/tablf\"), proto.Key(\"/db2\")},\n\t\t\t\t{proto.Key(\"/db2\"), proto.Key(\"/db3\")},\n\t\t\t\t{proto.Key(\"/db3\"), proto.Key(\"/db4\")},\n\t\t\t\t{proto.Key(\"/db4\"), proto.KeyMax},\n\t\t\t}, []ConfigUnion{config1, config2, config3, config2, config1, config4, config1}},\n\t\t{proto.Key(\"/db0\"), proto.Key(\"/db1/table/foo\"),\n\t\t\t[][2]proto.Key{\n\t\t\t\t{proto.Key(\"/db0\"), proto.Key(\"/db1\")},\n\t\t\t\t{proto.Key(\"/db1\"), proto.Key(\"/db1/table\")},\n\t\t\t\t{proto.Key(\"/db1/table\"), proto.Key(\"/db1/table/foo\")},\n\t\t\t}, []ConfigUnion{config1, config2, config3}},\n\t}\n\tfor i, test := range testData {\n\t\tranges := [][2]proto.Key{}\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(test.start, test.end, func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tranges = append(ranges, [2]proto.Key{start, end})\n\t\t\tconfigs = append(configs, config)\n\t\t\treturn false, nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(test.expRanges, ranges) {\n\t\t\tt.Errorf(\"%d: expected ranges %+v; got %+v\", i, test.expRanges, ranges)\n\t\t}\n\t\tif !reflect.DeepEqual(test.expConfigs, configs) {\n\t\t\tt.Errorf(\"%d: expected configs %+v; got %+v\", i, test.expConfigs, configs)\n\t\t}\n\t}\n\n\t// Now, stop partway through by returning done=true.\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(proto.Key(\"/db2\"), proto.Key(\"/db4\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\tif len(configs) == 2 {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\texpConfigs := []ConfigUnion{config1, config4}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n\n\t// Now, stop partway through by returning an error.\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(proto.Key(\"/db2\"), proto.Key(\"/db4\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\tif len(configs) == 2 {\n\t\t\t\treturn false, util.Errorf(\"foo\")\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}); err == nil {\n\t\t\tt.Fatalf(\"expected an error, but didn't get one\")\n\t\t}\n\t\texpConfigs := []ConfigUnion{config1, config4}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n}", "func TestInvalidEvents(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n\tw2 := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot(.*\"},\n\t}\n\tw2.Config.URL = \"http://localhost:9999/foo\"\n\tw2.Config.ContentType = \"application/json\"\n\n\tobs, err = OutboundSenderFactory{\n\t\tListener: w2,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tLogger: getLogger(),\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestSegmentDummy(t *testing.T) {\n\ttype tcase struct {\n\t\tline geom.Line\n\t}\n\n\tfn := func(t *testing.T, tc tcase) {\n\t\ts := NewSegment(tc.line)\n\t\tif s.GetStart().Equals(tc.line[0]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[0], s.GetStart())\n\t\t}\n\t\tif s.GetEnd().Equals(tc.line[1]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[1], s.GetEnd())\n\t\t}\n\t\tif s.GetLineSegment() != tc.line {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line, s.GetLineSegment())\n\t\t}\n\t}\n\ttestcases := []tcase{\n\t\t{\n\t\t\tline: geom.Line{{1, 2}, {3, 4}},\n\t\t},\n\t}\n\n\tfor i, tc := range testcases {\n\t\ttc := tc\n\t\tt.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) { fn(t, tc) })\n\t}\n}", "func TestMultiNodeStart(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}\n\tccdata, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected marshal error: %v\", err)\n\t}\n\twants := []Ready{\n\t\t{\n\t\t\tSoftState: &SoftState{Lead: 1, RaftState: StateLeader},\n\t\t\tHardState: raftpb.HardState{Term: 2, Commit: 2, Vote: 1},\n\t\t\tEntries: []raftpb.Entry{\n\t\t\t\t{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},\n\t\t\t\t{Term: 2, Index: 2},\n\t\t\t},\n\t\t\tCommittedEntries: []raftpb.Entry{\n\t\t\t\t{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},\n\t\t\t\t{Term: 2, Index: 2},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tHardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1},\n\t\t\tEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte(\"foo\")}},\n\t\t\tCommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte(\"foo\")}},\n\t\t},\n\t}\n\tmn := StartMultiNode(1)\n\tstorage := NewMemoryStorage()\n\tmn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}})\n\tmn.Campaign(ctx, 1)\n\tgs := <-mn.Ready()\n\tg := gs[1]\n\tif !reflect.DeepEqual(g, wants[0]) {\n\t\tt.Fatalf(\"#%d: g = %+v,\\n w %+v\", 1, g, wants[0])\n\t} else {\n\t\tstorage.Append(g.Entries)\n\t\tmn.Advance(gs)\n\t}\n\n\tmn.Propose(ctx, 1, []byte(\"foo\"))\n\tif gs2 := <-mn.Ready(); !reflect.DeepEqual(gs2[1], wants[1]) {\n\t\tt.Errorf(\"#%d: g = %+v,\\n w %+v\", 2, gs2[1], wants[1])\n\t} else {\n\t\tstorage.Append(gs2[1].Entries)\n\t\tmn.Advance(gs2)\n\t}\n\n\tselect {\n\tcase rd := <-mn.Ready():\n\t\tt.Errorf(\"unexpected Ready: %+v\", rd)\n\tcase <-time.After(time.Millisecond):\n\t}\n}", "func TestValidateUnhandledAddons(t *testing.T) {\n\tunhandled, err := findUnhandled()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(unhandled) != 0 {\n\t\tnames := make([]string, 0, len(unhandled))\n\t\tfor _, addon := range unhandled {\n\t\t\tnames = append(names, addon.GetName())\n\t\t}\n\t\tt.Fatal(fmt.Errorf(\"the following addons are not handled as part of a testing group: %+v\", names))\n\t}\n}", "func (m *IndexCollectionAccessorMock) ValidateCallCounters() {\n\n\tif !m.ForJetFinished() {\n\t\tm.t.Fatal(\"Expected call to IndexCollectionAccessorMock.ForJet\")\n\t}\n\n\tif !m.ForPulseAndJetFinished() {\n\t\tm.t.Fatal(\"Expected call to IndexCollectionAccessorMock.ForPulseAndJet\")\n\t}\n\n}", "func (h MultiStakingHooks) AfterValidatorRemoved(ctx sdk.Context, consAddr sdk.ConsAddress, valAddr sdk.ValAddress) {\n\tfor i := range h {\n\t\th[i].AfterValidatorRemoved(ctx, consAddr, valAddr)\n\t}\n}", "func TestAddOnceViewMany(t *testing.T) {\n\ttarget := teaser.New()\n\tm1id := target.Add(\"msg1\")\n\n\t{\n\t\tnextHash, m1vid := target.View()\n\t\tassertMessageId(m1id, m1vid, t)\n\t\tassertMessageHash(\"\", nextHash, t)\n\t}\n\n\t{\n\t\tnextHash, nextvid := target.View()\n\t\tassertMessageId(\"\", nextvid, t)\n\t\tassertMessageHash(\"\", nextHash, t)\n\t}\n}", "func SNSSQSMultiplePubSubsDifferentConsumerIDs(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tconsumerGroup2 := watcher.NewUnordered()\n\n\t// Set the partition key on all messages so they are written to the same partition. This allows for checking of ordered messages.\n\tmetadata := map[string]string{\n\t\tmessageKey: partition0,\n\t}\n\n\tmetadata1 := map[string]string{\n\t\tmessageKey: partition1,\n\t}\n\n\t// subscriber of the given topic\n\tsubscriberApplication := func(appID string, topicName string, messagesWatcher *watcher.Watcher) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\t// Simulate periodic errors.\n\t\t\tsim := simulate.PeriodicError(ctx, 100)\n\t\t\t// Setup the /orders event handler.\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tif err := sim(); err != nil {\n\t\t\t\t\t\treturn true, err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Track/Observe the data of the event.\n\t\t\t\t\tmessagesWatcher.Observe(e.Data)\n\t\t\t\t\tctx.Logf(\"Message Received appID: %s,pubsub: %s, topic: %s, id: %s, data: %s\", appID, e.PubsubName, e.Topic, e.ID, e.Data)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tpublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, numMessages)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\t// add the messages as expectations to the watchers\n\t\t\tfor _, messageWatcher := range messageWatchers {\n\t\t\t\tmessageWatcher.ExpectStrings(messages...)\n\t\t\t}\n\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMultiplePubSubsDifferentConsumerIDs - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tassertMessages := func(timeout time.Duration, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// assert for messages\n\t\t\tfor _, m := range messageWatchers {\n\t\t\t\tif !m.Assert(ctx, 25*timeout) {\n\t\t\t\t\tctx.Errorf(\"SNSSQSMultiplePubSubsDifferentConsumerIDs - message assertion failed for watcher: %#v\\n\", m)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tflow.New(t, \"SNSSQS certification - multiple publishers and multiple subscribers with different consumer IDs\").\n\n\t\t// Run subscriberApplication app1\n\t\tStep(app.Run(appID1, fmt.Sprintf(\":%d\", appPort),\n\t\t\tsubscriberApplication(appID1, topicActiveName, consumerGroup1))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_1\"\n\t\tStep(sidecar.Run(sidecarName1,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_one\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort)),\n\t\t\t)...,\n\t\t)).\n\n\t\t// Run subscriberApplication app2\n\t\tStep(app.Run(appID2, fmt.Sprintf(\":%d\", appPort+portOffset),\n\t\t\tsubscriberApplication(appID2, topicActiveName, consumerGroup2))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_QUEUE_2\"\n\t\tStep(sidecar.Run(sidecarName2,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/consumer_two\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"publish messages to ==> \"+topicActiveName, publishMessages(metadata, sidecarName1, topicActiveName, consumerGroup1)).\n\t\tStep(\"publish messages to ==> \"+topicActiveName, publishMessages(metadata1, sidecarName2, topicActiveName, consumerGroup2)).\n\t\tStep(\"verify if app1, app2 together have recevied messages published to topic1\", assertMessages(10*time.Second, consumerGroup1)).\n\t\tStep(\"verify if app1, app2 together have recevied messages published to topic1\", assertMessages(10*time.Second, consumerGroup2)).\n\t\tStep(\"reset\", flow.Reset(consumerGroup1, consumerGroup2)).\n\t\tRun()\n}", "func validateAdditionsPerStore(\n\tdesc *roachpb.RangeDescriptor, chgsByStoreID changesByStoreID,\n) error {\n\tfor storeID, chgs := range chgsByStoreID {\n\t\tfor _, chg := range chgs {\n\t\t\tif chg.ChangeType.IsRemoval() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If the replica already exists, check that we're not trying to add the\n\t\t\t// same type of replica again.\n\t\t\t//\n\t\t\t// NB: Trying to add a different type of replica, for instance, a\n\t\t\t// NON_VOTER to a store that already has a VOTER is fine when we're trying\n\t\t\t// to swap a VOTER with a NON_VOTER. Ensuring that this is indeed the case\n\t\t\t// is outside the scope of this particular helper method. See\n\t\t\t// validatePromotionsAndDemotions for how that is checked.\n\t\t\treplDesc, found := desc.GetReplicaDescriptor(storeID)\n\t\t\tif !found {\n\t\t\t\t// The store we're trying to add to doesn't already have a replica, all\n\t\t\t\t// good.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch t := replDesc.GetType(); t {\n\t\t\tcase roachpb.LEARNER:\n\t\t\t\t// Looks like we found a learner with the same store and node id. One of\n\t\t\t\t// the following is true:\n\t\t\t\t// 1. some previous leaseholder was trying to add it with the\n\t\t\t\t// learner+snapshot+voter cycle and got interrupted.\n\t\t\t\t// 2. we hit a race between the replicate queue and AdminChangeReplicas.\n\t\t\t\t// 3. We're trying to swap a voting replica with a non-voting replica\n\t\t\t\t// before the voting replica has been upreplicated and switched from\n\t\t\t\t// LEARNER to VOTER_FULL.\n\t\t\t\treturn errors.AssertionFailedf(\n\t\t\t\t\t\"trying to add(%+v) to a store that already has a %s\", chg, t)\n\t\t\tcase roachpb.VOTER_FULL:\n\t\t\t\tif chg.ChangeType == roachpb.ADD_VOTER {\n\t\t\t\t\treturn errors.AssertionFailedf(\n\t\t\t\t\t\t\"trying to add a voter to a store that already has a %s\", t)\n\t\t\t\t}\n\t\t\tcase roachpb.NON_VOTER:\n\t\t\t\tif chg.ChangeType == roachpb.ADD_NON_VOTER {\n\t\t\t\t\treturn errors.AssertionFailedf(\n\t\t\t\t\t\t\"trying to add a non-voter to a store that already has a %s\", t)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn errors.AssertionFailedf(\"store(%d) being added to already contains a\"+\n\t\t\t\t\t\" replica of an unexpected type: %s\", storeID, t)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (h MultiStakingHooks) AfterValidatorBeginUnbonding(ctx sdk.Context, consAddr sdk.ConsAddress,\n\tvalAddr sdk.ValAddress) {\n\tfor i := range h {\n\t\th[i].AfterValidatorBeginUnbonding(ctx, consAddr, valAddr)\n\t}\n}", "func ExampleELB_CreateLoadBalancerListeners_shared00() {\n\tsvc := elb.New(session.New())\n\tinput := &elb.CreateLoadBalancerListenersInput{\n\t\tListeners: []*elb.Listener{\n\t\t\t{\n\t\t\t\tInstancePort: aws.Int64(80),\n\t\t\t\tInstanceProtocol: aws.String(\"HTTP\"),\n\t\t\t\tLoadBalancerPort: aws.Int64(80),\n\t\t\t\tProtocol: aws.String(\"HTTP\"),\n\t\t\t},\n\t\t},\n\t\tLoadBalancerName: aws.String(\"my-load-balancer\"),\n\t}\n\n\tresult, err := svc.CreateLoadBalancerListeners(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase elb.ErrCodeAccessPointNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeAccessPointNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeDuplicateListenerException:\n\t\t\t\tfmt.Println(elb.ErrCodeDuplicateListenerException, aerr.Error())\n\t\t\tcase elb.ErrCodeCertificateNotFoundException:\n\t\t\t\tfmt.Println(elb.ErrCodeCertificateNotFoundException, aerr.Error())\n\t\t\tcase elb.ErrCodeInvalidConfigurationRequestException:\n\t\t\t\tfmt.Println(elb.ErrCodeInvalidConfigurationRequestException, aerr.Error())\n\t\t\tcase elb.ErrCodeUnsupportedProtocolException:\n\t\t\t\tfmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}" ]
[ "0.7061731", "0.6995947", "0.58719605", "0.5816244", "0.5498854", "0.53032184", "0.51472366", "0.505272", "0.5023903", "0.49798962", "0.46906734", "0.4689981", "0.462296", "0.45509726", "0.45450282", "0.4527853", "0.44938648", "0.44776616", "0.44768107", "0.44396228", "0.44191635", "0.44021833", "0.43502924", "0.4347785", "0.43256563", "0.4312063", "0.4309239", "0.43061668", "0.42893824", "0.4287297", "0.42838904", "0.42708725", "0.4265101", "0.42639178", "0.42621684", "0.4252529", "0.4236133", "0.42360926", "0.4211068", "0.42077643", "0.41988444", "0.4195258", "0.41879368", "0.41873825", "0.41855964", "0.41829643", "0.41798618", "0.41792095", "0.4176375", "0.4169162", "0.41669676", "0.4157511", "0.41566902", "0.41562745", "0.41522104", "0.41466135", "0.4145478", "0.41378754", "0.41233206", "0.41203317", "0.41112632", "0.4103203", "0.40975296", "0.40952766", "0.40920627", "0.40894157", "0.4082791", "0.4079531", "0.40678516", "0.40637955", "0.40608206", "0.40578458", "0.40541354", "0.40513682", "0.4049651", "0.40459275", "0.40414372", "0.4036741", "0.40359727", "0.40345", "0.4027672", "0.40245533", "0.4022394", "0.40209532", "0.40180206", "0.40179473", "0.40176517", "0.4013078", "0.40101463", "0.40061396", "0.4005899", "0.40033317", "0.39985386", "0.39979357", "0.39947665", "0.39947563", "0.39944696", "0.3993164", "0.39915785", "0.3991275" ]
0.793702
0
DeleteSegmentListener mocks base method
func (m *MockLoadBalance) DeleteSegmentListener(region string, listener *v1.Listener) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteSegmentListener", region, listener) ret0, _ := ret[0].(error) return ret0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client WorkloadNetworksClient) DeleteSegmentResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func TestDeleteCleanerOneSegment(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\texpected := []*segment{createSegment(t, dir, 0, 100)}\n\tactual, err := cleaner.Clean(expected)\n\trequire.NoError(t, err)\n\trequire.Equal(t, expected, actual)\n}", "func (m *MockLoadBalance) EnsureSegmentListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureSegmentListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockSegmentManager) Remove(segmentID int64, scope querypb.DataScope) {\n\t_m.Called(segmentID, scope)\n}", "func (m *MockLoadBalance) DeleteListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestDeleteCleanerNoSegments(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tsegments, err := cleaner.Clean(nil)\n\trequire.NoError(t, err)\n\trequire.Nil(t, segments)\n}", "func (m *MockListener) Delete(listenerKey api.ListenerKey, checksum api.Checksum) error {\n\tret := m.ctrl.Call(m, \"Delete\", listenerKey, checksum)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Test_DeviceService_Remove_Success(t *testing.T) {\n\th := TestHelper{}\n\trep := new(mocks.IDeviceRepository)\n\trepAuth := new(mocks.IDeviceAuthRepository)\n\ts := h.CreateTestDeviceService(rep, repAuth)\n\n\tip := \"127.0.0.1\"\n\trep.On(\"Remove\", ip).Return(nil)\n\n\terr := s.Remove(ip)\n\tassert.NoError(t, err)\n}", "func NewMockSegmentManager(t mockConstructorTestingTNewMockSegmentManager) *MockSegmentManager {\n\tmock := &MockSegmentManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteSegmentListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteSegmentListener\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteSegmentListener), region, listener)\n}", "func (m *MockProvider) OnServiceDelete(arg0 *v1.Service) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceDelete\", arg0)\n}", "func (m *ManagerMock) Delete(ctx context.Context, s *hub.Subscription) error {\n\targs := m.Called(ctx, s)\n\treturn args.Error(0)\n}", "func (client WorkloadNetworksClient) DeleteSegmentPreparer(ctx context.Context, resourceGroupName string, privateCloudName string, segmentID string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"privateCloudName\": autorest.Encode(\"path\", privateCloudName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"segmentId\": autorest.Encode(\"path\", segmentID),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2020-07-17-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/workloadNetworks/default/segments/{segmentId}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client WorkloadNetworksClient) DeleteSegmentSender(req *http.Request) (future WorkloadNetworksDeleteSegmentFuture, err error) {\n\tvar resp *http.Response\n\tresp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))\n\tif err != nil {\n\t\treturn\n\t}\n\tvar azf azure.Future\n\tazf, err = azure.NewFutureFromResponse(resp)\n\tfuture.FutureAPI = &azf\n\tfuture.Result = func(client WorkloadNetworksClient) (ar autorest.Response, err error) {\n\t\tvar done bool\n\t\tdone, err = future.DoneWithContext(context.Background(), client)\n\t\tif err != nil {\n\t\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksDeleteSegmentFuture\", \"Result\", future.Response(), \"Polling failure\")\n\t\t\treturn\n\t\t}\n\t\tif !done {\n\t\t\terr = azure.NewAsyncOpIncompleteError(\"avs.WorkloadNetworksDeleteSegmentFuture\")\n\t\t\treturn\n\t\t}\n\t\tar.Response = future.Response()\n\t\treturn\n\t}\n\treturn\n}", "func (_m *MockBookingStorage) Delete() {\n\t_m.Called()\n}", "func TestDeleteHandler(t *testing.T) {\n\n // ...\n\n}", "func (a *AdminApiService) DeleteTargetSegment(ctx _context.Context, id string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target-segment/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func TestSegmentDummy(t *testing.T) {\n\ttype tcase struct {\n\t\tline geom.Line\n\t}\n\n\tfn := func(t *testing.T, tc tcase) {\n\t\ts := NewSegment(tc.line)\n\t\tif s.GetStart().Equals(tc.line[0]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[0], s.GetStart())\n\t\t}\n\t\tif s.GetEnd().Equals(tc.line[1]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[1], s.GetEnd())\n\t\t}\n\t\tif s.GetLineSegment() != tc.line {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line, s.GetLineSegment())\n\t\t}\n\t}\n\ttestcases := []tcase{\n\t\t{\n\t\t\tline: geom.Line{{1, 2}, {3, 4}},\n\t\t},\n\t}\n\n\tfor i, tc := range testcases {\n\t\ttc := tc\n\t\tt.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) { fn(t, tc) })\n\t}\n}", "func (m *MockProvider) OnEndpointsDelete(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsDelete\", arg0)\n}", "func (s) TestSuccessCaseDeletedRoute(t *testing.T) {\n\trh, fakeClient, ch := setupTests()\n\n\trh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true})\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\t// Will start two watches.\n\tif err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2); err != nil {\n\t\tt.Fatalf(\"Error while waiting for names: %v\", err)\n\t}\n\n\t// Update the RDSHandler with route names which deletes a route name to\n\t// watch. This should trigger the RDSHandler to cancel the watch for the\n\t// deleted route name to watch.\n\trh.updateRouteNamesToWatch(map[string]bool{route1: true})\n\t// This should delete the watch for route2.\n\trouteNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.CancelRDS failed with error %v\", err)\n\t}\n\tif routeNameDeleted != route2 {\n\t\tt.Fatalf(\"xdsClient.CancelRDS called for route %v, want %v\", routeNameDeleted, route2)\n\t}\n\n\trdsUpdate := xdsresource.RouteConfigUpdate{}\n\t// Invoke callback with the xds client with a certain route update. Due to\n\t// this route update updating every route name that rds handler handles,\n\t// this should write to the update channel to send to the listener.\n\tfakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil)\n\trhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate}\n\tselect {\n\tcase rhu := <-ch:\n\t\tif diff := cmp.Diff(rhu.updates, rhuWant); diff != \"\" {\n\t\t\tt.Fatalf(\"got unexpected route update, diff (-got, +want): %v\", diff)\n\t\t}\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timed out waiting for update from update channel.\")\n\t}\n\n\trh.close()\n\trouteNameDeleted, err = fakeClient.WaitForCancelRouteConfigWatch(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.CancelRDS failed with error: %v\", err)\n\t}\n\tif routeNameDeleted != route1 {\n\t\tt.Fatalf(\"xdsClient.CancelRDS called for route %v, want %v\", routeNameDeleted, route1)\n\t}\n}", "func (s *ServerSuite) TestSrvRTMOnDelete(c *C) {\n\te1 := testutils.NewResponder(\"Hi, I'm endpoint 1\")\n\tdefer e1.Close()\n\n\tb := MakeBatch(Batch{Addr: \"localhost:11300\", Route: `Path(\"/\")`, URL: e1.URL})\n\tc.Assert(s.mux.Init(b.Snapshot()), IsNil)\n\tc.Assert(s.mux.Start(), IsNil)\n\tdefer s.mux.Stop(true)\n\n\t// When: an existing backend server is removed and added again.\n\tfor i := 0; i < 3; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\tc.Assert(s.mux.DeleteServer(b.SK), IsNil)\n\tc.Assert(s.mux.UpsertServer(b.BK, b.S), IsNil)\n\tfor i := 0; i < 4; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\n\t// Then: total count includes only metrics after the server was re-added.\n\trts, err := s.mux.ServerStats(b.SK)\n\tc.Assert(err, IsNil)\n\tc.Assert(rts.Counters.Total, Equals, int64(4))\n}", "func TestServiceDelete(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tvar svcIP string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tsvcIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been allocated\")\n\t}\n\n\terr := fixture.svcClient.Services(\"default\").Delete(context.Background(), \"service-a\", meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been released\")\n\t}\n}", "func (m *MockRouterTx) DELETE(path string, handler interface{}, options ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{path, handler}\n\tfor _, a := range options {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"DELETE\", varargs...)\n}", "func (m *MockVirtualServiceSet) Delete(virtualService ezkube.ResourceId) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\", virtualService)\n}", "func (sdk *Sdk) DeleteSegment(segmentID string) (string, error) {\n\tsdkC := sdk.cms\n\tdeleteSegment := fmt.Sprintf(\"/triggers/%s\", segmentID)\n\n\treturn sdkC.rq.Delete(deleteSegment, nil)\n}", "func (o *Segment) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tif o == nil {\n\t\treturn 0, errors.New(\"boiler: no Segment provided for delete\")\n\t}\n\n\tif err := o.doBeforeDeleteHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), segmentPrimaryKeyMapping)\n\tsql := \"DELETE FROM \\\"segment\\\" WHERE \\\"id\\\"=$1\"\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args...)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"boiler: unable to delete from segment\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"boiler: failed to get rows affected by delete for segment\")\n\t}\n\n\tif err := o.doAfterDeleteHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn rowsAff, nil\n}", "func (m *MockLoadBalance) DeleteMultiListeners(region, lbID string, listeners []*v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMultiListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockTenantServiceVolumeDao) DelShareableBySID(sid string) error {\n\tret := m.ctrl.Call(m, \"DelShareableBySID\", sid)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockWatcher) Delete() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\")\n}", "func (m *MockHooks) OnDelete(arg0 proto.Message) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnDelete\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockDBStorage) DeleteCallback(arg0, arg1 string) (sql.Result, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteCallback\", arg0, arg1)\n\tret0, _ := ret[0].(sql.Result)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_e *MockSegmentManager_Expecter) Remove(segmentID interface{}, scope interface{}) *MockSegmentManager_Remove_Call {\n\treturn &MockSegmentManager_Remove_Call{Call: _e.mock.On(\"Remove\", segmentID, scope)}\n}", "func TestDeleteCleanerNoRetentionSet(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\texpected := []*segment{createSegment(t, dir, 0, 100)}\n\tactual, err := cleaner.Clean(expected)\n\trequire.NoError(t, err)\n\trequire.Equal(t, expected, actual)\n}", "func (_m *Index) Delete(_a0 index.Entry) (storage.Event, error) {\n\tret := _m.Called(_a0)\n\n\tvar r0 storage.Event\n\tif rf, ok := ret.Get(0).(func(index.Entry) storage.Event); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Get(0).(storage.Event)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(index.Entry) error); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockOperation) RemoveHandler(arg0 *client.EventTarget) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveHandler\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (f Factory) TestGetSegmentOK(t *testing.T) {\n\tprocess := \"test\"\n\tparent, _ := f.Client.CreateMap(process, nil, \"test\")\n\n\tsegment, err := f.Client.GetSegment(process, parent.GetLinkHash())\n\tassert.NoError(t, err)\n\tassert.NotNil(t, segment)\n}", "func MockDeleteResponse(t *testing.T) {\n\tth.Mux.HandleFunc(shareEndpoint+\"/\"+shareID, func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"DELETE\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", fake.TokenID)\n\t\tw.WriteHeader(http.StatusAccepted)\n\t})\n}", "func TestDeleteCleanerMessagesKeepActiveSegment(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Messages = 5\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\tsegs := []*segment{\n\t\tcreateSegment(t, dir, 0, 128),\n\t\tcreateSegment(t, dir, 10, 128),\n\t}\n\toffset := int64(0)\n\tfor _, seg := range segs {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\twriteToSegment(t, seg, offset, []byte(\"blah\"))\n\t\t\toffset++\n\t\t}\n\t}\n\n\tactual, err := cleaner.Clean(segs)\n\trequire.NoError(t, err)\n\trequire.Len(t, actual, 1)\n\trequire.Equal(t, int64(10), actual[0].BaseOffset)\n}", "func (_m *Storage) DeleteVeiculo(id int) error {\n\tret := _m.Called(id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(int) error); ok {\n\t\tr0 = rf(id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (c ClientFake) UpdateSegment(name, campaignID, segmentID string) (Segment, error) {\n\treturn Segment{}, nil\n}", "func (s *systemtestSuite) TestServiceAddDeleteServiceVxlan(c *C) {\n\ts.testServiceAddDeleteService(c, \"vxlan\")\n}", "func (m *MockStream) RemoveEventListener(streamEventListener types.StreamEventListener) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RemoveEventListener\", streamEventListener)\n}", "func (r *templateRouter) deleteServiceUnitInternal(id ServiceUnitKey, service ServiceUnit) {\n\tdelete(r.serviceUnits, id)\n\tif len(service.ServiceAliasAssociations) > 0 {\n\t\tr.stateChanged = true\n\t}\n}", "func (m *MockProc) OnSvcHostRemove(arg0 []*host.Host) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnSvcHostRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (client WorkloadNetworksClient) DeleteSegment(ctx context.Context, resourceGroupName string, privateCloudName string, segmentID string) (result WorkloadNetworksDeleteSegmentFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/WorkloadNetworksClient.DeleteSegment\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response() != nil {\n\t\t\t\tsc = result.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._\\(\\)]+$`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"avs.WorkloadNetworksClient\", \"DeleteSegment\", err.Error())\n\t}\n\n\treq, err := client.DeleteSegmentPreparer(ctx, resourceGroupName, privateCloudName, segmentID)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"DeleteSegment\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.DeleteSegmentSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"DeleteSegment\", nil, \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func TestNodeDeleted(t *testing.T) {\n\tpod0 := &v1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"pod0\",\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tNodeName: \"node0\",\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tConditions: []v1.PodCondition{\n\t\t\t\t{\n\t\t\t\t\tType: v1.PodReady,\n\t\t\t\t\tStatus: v1.ConditionTrue,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tpod1 := &v1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"pod1\",\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tNodeName: \"node0\",\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tConditions: []v1.PodCondition{\n\t\t\t\t{\n\t\t\t\t\tType: v1.PodReady,\n\t\t\t\t\tStatus: v1.ConditionTrue,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfnh := &testutil.FakeNodeHandler{\n\t\tExisting: []*v1.Node{\n\t\t\t{\n\t\t\t\tObjectMeta: v1.ObjectMeta{\n\t\t\t\t\tName: \"node0\",\n\t\t\t\t\tCreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\tStatus: v1.NodeStatus{\n\t\t\t\t\tConditions: []v1.NodeCondition{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tType: v1.NodeReady,\n\t\t\t\t\t\t\tStatus: v1.ConditionUnknown,\n\t\t\t\t\t\t\tLastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),\n\t\t\t\t\t\t\tLastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tClientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*pod0, *pod1}}),\n\t\tDeleteWaitChan: make(chan struct{}),\n\t}\n\n\tfactory := informers.NewSharedInformerFactory(fnh, nil, controller.NoResyncPeriodFunc())\n\n\teventBroadcaster := record.NewBroadcaster()\n\tcloudNodeController := &CloudNodeController{\n\t\tkubeClient: fnh,\n\t\tnodeInformer: factory.Nodes(),\n\t\tcloud: &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound},\n\t\tnodeMonitorPeriod: 5 * time.Second,\n\t\trecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: \"controllermanager\"}),\n\t}\n\teventBroadcaster.StartLogging(glog.Infof)\n\n\tcloudNodeController.Run()\n\n\tselect {\n\tcase <-fnh.DeleteWaitChan:\n\tcase <-time.After(wait.ForeverTestTimeout):\n\t\tt.Errorf(\"Timed out waiting %v for node to be deleted\", wait.ForeverTestTimeout)\n\t}\n\tif len(fnh.DeletedNodes) != 1 || fnh.DeletedNodes[0].Name != \"node0\" {\n\t\tt.Errorf(\"Node was not deleted\")\n\t}\n}", "func (client BaseClient) DeleteSubscriptionResponder(resp *http.Response) (result Error, err error) {\n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusAccepted,http.StatusBadRequest,http.StatusForbidden,http.StatusNotFound,http.StatusInternalServerError),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (s *SegmentService) Delete(memberID int, item Segment) error {\n\n\tdata := struct {\n\t\tSegment `json:\"segment\"`\n\t}{item}\n\n\tif item.ID < 1 {\n\t\treturn errors.New(\"Delete Segment requires a segment to have an ID already\")\n\t}\n\n\treq, err := s.client.newRequest(\"DELETE\", fmt.Sprintf(\"segment/%d\", memberID), data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.do(req, nil)\n\treturn err\n}", "func testCmdDeleteFilesystem(t *testing.T) {\n\tt.Log(\"TODO\")\n}", "func TestDeleteCleanerMessages(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Messages = 10\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\tsegs := make([]*segment, 20)\n\tfor i := 0; i < 20; i++ {\n\t\tsegs[i] = createSegment(t, dir, int64(i), 20)\n\t\twriteToSegment(t, segs[i], int64(i), []byte(\"blah\"))\n\t}\n\tactual, err := cleaner.Clean(segs)\n\trequire.NoError(t, err)\n\trequire.Len(t, actual, 10)\n\tfor i := 0; i < 10; i++ {\n\t\trequire.Equal(t, int64(i+10), actual[i].BaseOffset)\n\t}\n}", "func TestDelete(t *testing.T) {\n\tRunWithInstance(func(instance *Instance) {\n\t\tInsertFixtures(instance, []EntryFixture{\n\t\t\t{Name: \"int\", Value: \"2891\", ValueType: 1},\n\t\t\t{Name: \"string\", Value: \"hello world!\", ValueType: 3},\n\t\t})\n\n\t\tif err := instance.Delete(\"int\"); err != nil {\n\t\t\tt.Error(\"Instance.Delete: got error:\\n\", err)\n\t\t}\n\n\t\tif err := panicked(func() { instance.MustDelete(\"string\") }); err != nil {\n\t\t\tt.Error(\"Instance.MustDelete: got panic:\\n\", err)\n\t\t}\n\n\t\tif err := instance.Delete(\"foo\"); err == nil {\n\t\t\tt.Error(\"Instance.Delete: expected error with non-existent entry\")\n\t\t} else if _, ok := err.(*ErrNoEntry); !ok {\n\t\t\tt.Error(\"Instance.Delete: expected error of type *ErrNoEntry\")\n\t\t}\n\n\t\tif err := panicked(func() { instance.MustDelete(\"foo\") }); err == nil {\n\t\t\tt.Error(\"Instance.MustDelete: expected panic with non-existent entry\")\n\t\t}\n\t})\n}", "func TestDelete(t *testing.T) {\n\tth.SetupHTTP()\n\tdefer th.TeardownHTTP()\n\n\tMockDeleteResponse(t)\n\tres := sharetypes.Delete(client.ServiceClient(), \"shareTypeID\")\n\tth.AssertNoErr(t, res.Err)\n}", "func assertDeleted(t *testing.T, cl client.Client, thing client.Object) {\n\tt.Helper()\n\tif err := cl.Delete(context.TODO(), thing); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"Failed to delete %s: %v\", thing.GetName(), err)\n\t} else {\n\t\tt.Logf(\"Deleted %s\", thing.GetName())\n\t}\n}", "func (m *SGController) onSgDeleted(sg *v1alpha1.Statefulguardian) {\n\tglog.Infof(\"Cluster %s deleted\", sg.Name)\n\texecCont:= NewExecController(sg)\n execCont.ClusterQuit(m.ctx)\n\tmetrics.IncEventCounter(sgsDeletedCount)\n\tmetrics.DecEventGauge(sgsTotalCount)\n\tglog.Infof(\"Delete statefulset\")\n\tm.statefulSetControl.DeleteStatefulSet(sg)\n}", "func (m *MockRouter) DELETE(path string, handler interface{}, options ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{path, handler}\n\tfor _, a := range options {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"DELETE\", varargs...)\n}", "func (f Factory) TestGetSegmentNotFound(t *testing.T) {\n\tprocess := \"test\"\n\tfakeLinkHash, _ := types.NewBytes32FromString(\"0000000000000000000000000000000000000000000000000000000000000000\")\n\tsegment, err := f.Client.GetSegment(process, fakeLinkHash)\n\tassert.EqualError(t, err, \"Not Found\")\n\tassert.Nil(t, segment)\n}", "func TestDeleteCleanerBytes(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\tsegs := make([]*segment, 5)\n\tfor i := 0; i < 5; i++ {\n\t\tsegs[i] = createSegment(t, dir, int64(i), 20)\n\t\twriteToSegment(t, segs[i], int64(i), []byte(\"blah\"))\n\t}\n\tactual, err := cleaner.Clean(segs)\n\trequire.NoError(t, err)\n\trequire.Len(t, actual, 2)\n\trequire.Equal(t, int64(3), actual[0].BaseOffset)\n\trequire.Equal(t, int64(4), actual[1].BaseOffset)\n}", "func (client ThreatIntelligenceIndicatorClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func TestReconcileClusterServiceBrokerDelete(t *testing.T) {\n\tfakeKubeClient, fakeCatalogClient, fakeClusterServiceBrokerClient, testController, _ := newTestController(t, getTestCatalogConfig())\n\n\ttestClusterServiceClass := getTestClusterServiceClass()\n\ttestClusterServicePlan := getTestClusterServicePlan()\n\n\tbroker := getTestClusterServiceBroker()\n\tbroker.DeletionTimestamp = &metav1.Time{}\n\tbroker.Finalizers = []string{v1beta1.FinalizerServiceCatalog}\n\tfakeCatalogClient.AddReactor(\"get\", \"clusterservicebrokers\", func(action clientgotesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, broker, nil\n\t})\n\tfakeCatalogClient.AddReactor(\"list\", \"clusterserviceclasses\", func(action clientgotesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.ClusterServiceClassList{\n\t\t\tItems: []v1beta1.ClusterServiceClass{\n\t\t\t\t*testClusterServiceClass,\n\t\t\t},\n\t\t}, nil\n\t})\n\tfakeCatalogClient.AddReactor(\"list\", \"clusterserviceplans\", func(action clientgotesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.ClusterServicePlanList{\n\t\t\tItems: []v1beta1.ClusterServicePlan{\n\t\t\t\t*testClusterServicePlan,\n\t\t\t},\n\t\t}, nil\n\t})\n\n\terr := reconcileClusterServiceBroker(t, testController, broker)\n\tif err != nil {\n\t\tt.Fatalf(\"This should not fail : %v\", err)\n\t}\n\n\tbrokerActions := fakeClusterServiceBrokerClient.Actions()\n\tassertNumberOfClusterServiceBrokerActions(t, brokerActions, 0)\n\n\t// Verify no core kube actions occurred\n\tkubeActions := fakeKubeClient.Actions()\n\tassertNumberOfActions(t, kubeActions, 0)\n\n\tactions := fakeCatalogClient.Actions()\n\t// The four actions should be:\n\t// - list serviceplans\n\t// - delete serviceplans\n\t// - list serviceclasses\n\t// - delete serviceclass\n\t// - update the ready condition\n\t// - get the broker\n\t// - remove the finalizer\n\tassertNumberOfActions(t, actions, 7)\n\n\tlistRestrictions := clientgotesting.ListRestrictions{\n\t\tLabels: labels.Everything(),\n\t\tFields: fields.OneTermEqualSelector(\"spec.clusterServiceBrokerName\", broker.Name),\n\t}\n\tassertList(t, actions[0], &v1beta1.ClusterServiceClass{}, listRestrictions)\n\tassertList(t, actions[1], &v1beta1.ClusterServicePlan{}, listRestrictions)\n\tassertDelete(t, actions[2], testClusterServicePlan)\n\tassertDelete(t, actions[3], testClusterServiceClass)\n\tupdatedClusterServiceBroker := assertUpdateStatus(t, actions[4], broker)\n\tassertClusterServiceBrokerReadyFalse(t, updatedClusterServiceBroker)\n\n\tassertGet(t, actions[5], broker)\n\n\tupdatedClusterServiceBroker = assertUpdateStatus(t, actions[6], broker)\n\tassertEmptyFinalizers(t, updatedClusterServiceBroker)\n\n\tevents := getRecordedEvents(testController)\n\n\texpectedEvent := normalEventBuilder(successClusterServiceBrokerDeletedReason).msg(\n\t\t\"The broker test-clusterservicebroker was deleted successfully.\",\n\t)\n\tif err := checkEvents(events, expectedEvent.stringArr()); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestDestroyHandler(t *testing.T) {\n\tid := \"c79c54de-39ae-46b0-90e5-9f84c77f6974\"\n\tparams := httprouter.Params{\n\t\thttprouter.Param{Key: \"id\", Value: id},\n\t}\n\ts := Subscription{\n\t\tEventType: \"test_type\",\n\t\tContext: \"test_context\",\n\t}\n\n\th := Handler{\n\t\tdb: MockDatabase{\n\t\t\ts: s,\n\t\t\tgetID: id,\n\t\t},\n\t}\n\n\treq, w := newReqParams(\"GET\")\n\n\th.Destroy(w, req, params)\n\n\tcases := []struct {\n\t\tlabel, actual, expected interface{}\n\t}{\n\t\t{\"Response code\", w.Code, 200},\n\t\t{\"Response body contains context\", strings.Contains(w.Body.String(), s.Context), true},\n\t\t{\"Response body contains event type\", strings.Contains(w.Body.String(), s.EventType), true},\n\t}\n\th.Index(w, req, httprouter.Params{})\n\n\ttestCases(t, cases)\n\tcases = []struct {\n\t\tlabel, actual, expected interface{}\n\t}{\n\t\t{\"Response body doesn't contain the id\", strings.Contains(w.Body.String(), id), false},\n\t}\n\ttestCases(t, cases)\n}", "func (client StorageTargetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func TestDeleteVolume(t *testing.T) {\n\tts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, \"{\\\"name\\\":\\\"test\\\",\\\"custom_deploy_script\\\":null,\\\"current_release_id\\\":\\\"20160509194833\\\",\\\"target_release_id\\\":null,\\\"created\\\":\\\"Mon, 09 May 2016 19:48:33 UTC\\\",\\\"updated\\\":\\\"Mon, 09 May 2016 19:48:48 UTC\\\",\\\"tags\\\":{},\\\"addresses\\\":{\\\"external\\\":[{\\\"port\\\":\\\"test\\\",\\\"address\\\":\\\"tcp://test:80\\\"}],\\\"internal\\\":null}}\")\n\t}))\n\ttestURL := strings.Replace(ts.URL, \"https://\", \"\", -1)\n\tConvey(\"When deleting a Supergiant Volume.\", t, func() {\n\t\t//setup steps\n\t\tsg, err := NewClient(testURL, \"test\", \"test\")\n\t\tSo(err, ShouldBeNil)\n\t\trelease, err := sg.GetRelease(\"test\", \"test\")\n\t\tSo(err, ShouldBeNil)\n\n\t\tConvey(\"We would expect the volume delete to error if it dos not have any volumes.\", func() {\n\t\t\tdelerr := DeleteVolume(release, \"cheese\")\n\t\t\tSo(delerr, ShouldNotBeNil)\n\t\t\tSo(delerr.Error(), ShouldEqual, \"This Component has not volumes.\")\n\t\t})\n\n\t\t// One Volume\n\t\trelease.Volumes = append(release.Volumes, &common.VolumeBlueprint{\n\t\t\tName: common.IDString(\"test\"),\n\t\t\tType: \"test\",\n\t\t\tSize: 30,\n\t\t})\n\n\t\tConvey(\"We would expect the volume to delete without error.\", func() {\n\t\t\tdelerr := DeleteVolume(release, \"test\")\n\t\t\tSo(delerr, ShouldBeNil)\n\t\t})\n\t\tConvey(\"We would expect the volume delete to error if it dos not exist.\", func() {\n\t\t\tdelerr := DeleteVolume(release, \"cheese\")\n\t\t\tSo(delerr, ShouldNotBeNil)\n\t\t\tSo(delerr.Error(), ShouldEqual, \"Volume not found.\")\n\t\t})\n\t\tConvey(\"We would expect the volume delete to error if there is an api error.\", func() {\n\t\t\tts.Close()\n\t\t\tdelerr := DeleteVolume(release, \"test\")\n\t\t\tSo(delerr.Error(), ShouldContainSubstring, \"Put https\")\n\t\t})\n\t})\n\n}", "func (ctrler CtrlDefReactor) OnDistributedServiceCardDelete(obj *DistributedServiceCard) error {\n\tlog.Info(\"OnDistributedServiceCardDelete is not implemented\")\n\treturn nil\n}", "func (m *MockVirtualDestinationSet) Delete(virtualDestination ezkube.ResourceId) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\", virtualDestination)\n}", "func TestGetDeleted4A(t *testing.T) {\n}", "func (client BaseClient) DeleteExpectationResponder(resp *http.Response) (result String, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (ds *DeleteSuite) TestDelete_Failure_SensorType_Not_exists() {\n\n\t// Arrange.\n\n\te := echo.New()\n\treq := httptest.NewRequest(http.MethodDelete, \"/:id\", nil)\n\tres := httptest.NewRecorder()\n\tc := e.NewContext(req, res)\n\n\tc.SetPath(\"/:id\")\n\tc.SetParamNames(\"id\")\n\tc.SetParamValues(\"99\")\n\n\t// Act.\n\n\t_ = HandleDelete(c)\n\n\t// Assert.\n\n\tassert.Equal(ds.T(), http.StatusBadRequest, res.Code)\n\tvar httpError echo.HTTPError\n\t_ = json.Unmarshal(res.Body.Bytes(), &httpError)\n\tassert.Equal(ds.T(), \"sensortype not found\", httpError.Message)\n}", "func TestSiaDirDelete(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// Create new siaDir\n\trootDir, err := newRootDir(t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsiaPath, err := modules.NewSiaPath(\"deleteddir\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsiaDirSysPath := siaPath.SiaDirSysPath(rootDir)\n\twal, _ := newTestWAL()\n\tsiaDir, err := New(siaDirSysPath, rootDir, modules.DefaultDirPerm, wal)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Delete the siadir and keep siadir in memory\n\terr = siaDir.Delete()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify functions either return or error accordingly\n\t//\n\t// First set should not error or panic\n\tif !siaDir.Deleted() {\n\t\tt.Error(\"SiaDir metadata should reflect the deletion\")\n\t}\n\t_ = siaDir.MDPath()\n\t_ = siaDir.Metadata()\n\t_ = siaDir.Path()\n\n\t// Second Set should return an error\n\terr = siaDir.Rename(\"\")\n\tif !errors.Contains(err, ErrDeleted) {\n\t\tt.Error(\"Rename should return with and error for SiaDir deleted\")\n\t}\n\terr = siaDir.SetPath(\"\")\n\tif !errors.Contains(err, ErrDeleted) {\n\t\tt.Error(\"SetPath should return with and error for SiaDir deleted\")\n\t}\n\t_, err = siaDir.DirReader()\n\tif !errors.Contains(err, ErrDeleted) {\n\t\tt.Error(\"DirReader should return with and error for SiaDir deleted\")\n\t}\n\tsiaDir.mu.Lock()\n\terr = siaDir.updateMetadata(Metadata{})\n\tif !errors.Contains(err, ErrDeleted) {\n\t\tt.Error(\"updateMetadata should return with and error for SiaDir deleted\")\n\t}\n\tsiaDir.mu.Unlock()\n}", "func (client ServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func (m *MockDB) DeleteSentenceFromDocumentID(documentID uint) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSentenceFromDocumentID\", documentID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestDeleteResource(t *testing.T) {\n\tt.Run(\"successful delete\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t}).AnyTimes()\n\n\t\tctx := context.Background()\n\t\terr := s.DeleteResource(ctx, specMock, \"service\")\n\t\tg.Expect(err).To(BeNil())\n\t})\n\n\tt.Run(\"delete in progress\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t}).AnyTimes()\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\terr := s.DeleteResource(ctx, specMock, \"service\")\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTransient()).To(BeTrue())\n\t})\n\n\tt.Run(\"skip delete for unmanaged resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t}).AnyTimes()\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\terr := s.DeleteResource(ctx, specMock, \"service\")\n\t\tg.Expect(err).To(BeNil())\n\t})\n\n\tt.Run(\"error checking if resource is managed\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(ErroringGetClient{Client: c, err: errors.New(\"a get error\")}, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t}).AnyTimes()\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\terr := s.DeleteResource(ctx, specMock, \"service\")\n\t\tg.Expect(err).To(MatchError(ContainSubstring(\"a get error\")))\n\t})\n\n\tt.Run(\"error deleting\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(ErroringDeleteClient{Client: c, err: errors.New(\"an error\")}, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t}).AnyTimes()\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\terr := s.DeleteResource(ctx, specMock, \"service\")\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to delete resource\"))\n\t})\n}", "func (m *MockStudentRepository) Delete(arg0 gocql.UUID) *exception.AppError {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Delete\", arg0)\n\tret0, _ := ret[0].(*exception.AppError)\n\treturn ret0\n}", "func TestGSLBHostRuleDelete(t *testing.T) {\n\ttestPrefix := \"gdp-gslbhrd-\"\n\tgslbHRName := \"test-gslb-hr\"\n\thmRefs := []string{\"my-hm1\"}\n\tsp := \"gap-1\"\n\tttl := 10\n\n\taddTestGDPWithProperties(t, hmRefs, &ttl, &sp, nil)\n\tingObj, routeObj := addIngressAndRouteObjects(t, testPrefix)\n\tvar expectedMembers []nodes.AviGSK8sObj\n\texpectedMembers = append(expectedMembers, getTestGSMemberFromIng(t, ingObj, ingCluster, 1))\n\texpectedMembers = append(expectedMembers, getTestGSMemberFromRoute(t, routeObj, routeCluster, 1))\n\tg := gomega.NewGomegaWithT(t)\n\n\thostName := routeObj.Spec.Host\n\tgslbHRHmRefs := []string{\"my-hm2\"}\n\tgslbHRTTL := 20\n\tgsHRObj := addGSLBHostRule(t, gslbHRName, gslbutils.AVISystem, hostName, gslbHRHmRefs, nil, &gslbHRTTL,\n\t\tingestion.GslbHostRuleAccepted, \"\")\n\tg.Eventually(func() bool {\n\t\treturn verifyGSMembers(t, expectedMembers, routeObj.Spec.Host, utils.ADMIN_NS, gslbHRHmRefs,\n\t\t\t&sp, &gslbHRTTL, nil)\n\t}, 5*time.Second, 1*time.Second).Should(gomega.Equal(true))\n\n\tt.Logf(\"will delete the gslb host rule object\")\n\tdeleteGSLBHostRule(t, gsHRObj.Name, gsHRObj.Namespace)\n\tg.Eventually(func() bool {\n\t\t// TTL and HM refs will fall back to the GDP object\n\t\treturn verifyGSMembers(t, expectedMembers, routeObj.Spec.Host, utils.ADMIN_NS, hmRefs,\n\t\t\t&sp, &ttl, nil)\n\t}, 5*time.Second, 1*time.Second).Should(gomega.Equal(true))\n}", "func (m *MockEventBus) RemoveHandler(arg0 members.Handler) {\n\tm.ctrl.Call(m, \"RemoveHandler\", arg0)\n}", "func (p *fullEndpoint) delete(expectedOldValue *Endpoint) {\n\tatomic.CompareAndSwapPointer(&p.endpoint, unsafe.Pointer(expectedOldValue), nil)\n}", "func (m *MockTenantServiceMountRelationDao) DELTenantServiceMountRelationByServiceID(serviceID string) error {\n\tret := m.ctrl.Call(m, \"DELTenantServiceMountRelationByServiceID\", serviceID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockLoadBalance) EnsureMultiSegmentListeners(region, lbID string, listeners []*v1.Listener) (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureMultiSegmentListeners\", region, lbID, listeners)\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func DeleteScriptMocked(t *testing.T, scriptIn *types.Script) {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewScriptService(cs)\n\tassert.Nil(err, \"Couldn't load script service\")\n\tassert.NotNil(ds, \"Script service not instanced\")\n\n\t// to json\n\tdIn, err := json.Marshal(scriptIn)\n\tassert.Nil(err, \"Script test data corrupted\")\n\n\t// call service\n\tcs.On(\"Delete\", fmt.Sprintf(APIPathBlueprintScript, scriptIn.ID)).Return(dIn, 200, nil)\n\terr = ds.DeleteScript(scriptIn.ID)\n\tassert.Nil(err, \"Error deleting script\")\n\n}", "func (_m *ServerConnexion) Delete(oath string) error {\n\tret := _m.Called(oath)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(oath)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockHost) RemoveStreamHandler(arg0 protocol.ID) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RemoveStreamHandler\", arg0)\n}", "func (m *MockStreamEventListener) OnDestroyStream() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnDestroyStream\")\n}", "func DeleteServerMocked(t *testing.T, serverIn *types.Server) {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// to json\n\tdIn, err := json.Marshal(serverIn)\n\tassert.Nil(err, \"Server test data corrupted\")\n\n\t// call service\n\tcs.On(\"Delete\", fmt.Sprintf(\"/cloud/servers/%s\", serverIn.ID)).Return(dIn, 200, nil)\n\terr = ds.DeleteServer(serverIn.ID)\n\tassert.Nil(err, \"Error deleting server\")\n}", "func TestRangeDelete(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\t// Add a new CIDR, this should not have any effect on the existing service.\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.20\") {\n\t\t\tt.Error(\"Expected new ingress to be in the 10.0.20.0/24 range\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// Remove the existing range, this should trigger the re-allocation of the existing service\n\tpoolA.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.20.0/24\",\n\t\t},\n\t}\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func HandleLoadbalancerDeletionSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"DELETE\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\n\t\tw.WriteHeader(http.StatusNoContent)\n\t})\n}", "func (ctrler CtrlDefReactor) OnSnapshotRestoreDelete(obj *SnapshotRestore) error {\n\tlog.Info(\"OnSnapshotRestoreDelete is not implemented\")\n\treturn nil\n}", "func (client IngestionSettingsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func (m *InterfacesClientMock) Delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) *retry.Error {\n\targs := m.Called(resourceGroupName, networkInterfaceName)\n\tif args.Error(1) != nil {\n\t\treturn &retry.Error{RawError: args.Error(1)}\n\t}\n\treturn nil\n}", "func (s *Service) Delete(ctx context.Context, spec azure.Spec) error {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid internal load balancer specification\")\n\t}\n\tklog.V(2).Infof(\"deleting internal load balancer %s\", internalLBSpec.Name)\n\tf, err := s.Client.Delete(ctx, s.Scope.ClusterConfig.ResourceGroup, internalLBSpec.Name)\n\tif err != nil && azure.ResourceNotFound(err) {\n\t\t// already deleted\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to delete internal load balancer %s in resource group %s\", internalLBSpec.Name, s.Scope.ClusterConfig.ResourceGroup)\n\t}\n\n\terr = f.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create, future response\")\n\t}\n\n\t_, err = f.Result(s.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"result error\")\n\t}\n\tklog.V(2).Infof(\"successfully deleted internal load balancer %s\", internalLBSpec.Name)\n\treturn err\n}", "func (_m *DBClient) DeleteTransmission(age int64, status models.TransmissionStatus) error {\n\tret := _m.Called(age, status)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(int64, models.TransmissionStatus) error); ok {\n\t\tr0 = rf(age, status)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestTrunkENI_DeleteCooledDownENIs_NoDeletionTimeStamp(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\ttrunkENI, ec2APIHelper, _ := getMockHelperInstanceAndTrunkObject(ctrl)\n\n\tEniDetails1.deletionTimeStamp = time.Time{}\n\tEniDetails2.deletionTimeStamp = time.Now().Add(-time.Second * 34)\n\ttrunkENI.usedVlanIds[VlanId1] = true\n\ttrunkENI.usedVlanIds[VlanId2] = true\n\n\ttrunkENI.deleteQueue = append(trunkENI.deleteQueue, EniDetails1, EniDetails2)\n\n\tec2APIHelper.EXPECT().DeleteNetworkInterface(&EniDetails1.ID).Return(nil)\n\tec2APIHelper.EXPECT().DeleteNetworkInterface(&EniDetails2.ID).Return(nil)\n\n\ttrunkENI.DeleteCooledDownENIs()\n\tassert.Equal(t, 0, len(trunkENI.deleteQueue))\n}", "func (suite *TenantTestSuite) TestDeleteTenant() {\n\n\trequest, _ := http.NewRequest(\"DELETE\", \"/api/v2/admin/tenants/6ac7d684-1f8e-4a02-a502-720e8f11e50b\", strings.NewReader(\"\"))\n\trequest.Header.Set(\"x-api-key\", suite.clientkey)\n\trequest.Header.Set(\"Accept\", \"application/json\")\n\tresponse := httptest.NewRecorder()\n\n\tsuite.router.ServeHTTP(response, request)\n\n\tcode := response.Code\n\toutput := response.Body.String()\n\n\tmetricProfileJSON := `{\n \"status\": {\n \"message\": \"Tenant Successfully Deleted\",\n \"code\": \"200\"\n }\n}`\n\t// Check that we must have a 200 ok code\n\tsuite.Equal(200, code, \"Internal Server Error\")\n\t// Compare the expected and actual json response\n\tsuite.Equal(metricProfileJSON, output, \"Response body mismatch\")\n\n\t// check that the element has actually been Deleted\n\t// connect to mongodb\n\tsession, err := mgo.Dial(suite.cfg.MongoDB.Host)\n\tdefer session.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// try to retrieve item\n\tvar result map[string]interface{}\n\tc := session.DB(suite.cfg.MongoDB.Db).C(\"tenants\")\n\terr = c.Find(bson.M{\"id\": \"6ac7d684-1f8e-4a02-a502-720e8f11e50b\"}).One(&result)\n\n\tsuite.NotEqual(err, nil, \"No not found error\")\n\tsuite.Equal(err.Error(), \"not found\", \"No not found error\")\n}", "func (m *MockIpsecClient) IpsecCbDelete(ctx context.Context, in *IpsecCbDeleteRequestMsg, opts ...grpc.CallOption) (*IpsecCbDeleteResponseMsg, error) {\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"IpsecCbDelete\", varargs...)\n\tret0, _ := ret[0].(*IpsecCbDeleteResponseMsg)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockReminds) HandleDeleteRemindCommand(arg0 *discordgo.Session, arg1 *discordgo.MessageCreate, arg2 []string, arg3 context.Context) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"HandleDeleteRemindCommand\", arg0, arg1, arg2, arg3)\n}", "func (client HTTPSuccessClient) Delete200Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (m *MockCallback) OnRemove(arg0 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnRemove\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockServiceEntrySet) Delete(serviceEntry ezkube.ResourceId) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Delete\", serviceEntry)\n}", "func TestMarkForDelete(t *testing.T) {\n\tif !testutil.ShouldRunIntegrationTests() {\n\t\tt.Skip(\"Skipping integration test. Set ENV var RUN_EXCHANGE_INTEGRATION=true if you want to run them.\")\n\t}\n\tconfigFile := filepath.Join(\"config\", \"integration.json\")\n\tconfig, err := models.LoadConfigFile(configFile)\n\trequire.Nil(t, err)\n\t_context := context.NewContext(config)\n\n\tinstResp := _context.PharosClient.InstitutionGet(\"test.edu\")\n\trequire.Nil(t, instResp.Error)\n\tinstitution := instResp.Institution()\n\trequire.NotNil(t, institution)\n\n\t// Mark one object in standard storage for deletion\n\ts3Key := testutil.INTEGRATION_GOOD_BAGS[9]\n\tidentifier := strings.Replace(s3Key, \"aptrust.integration.test\", \"test.edu\", 1)\n\tidentifier = strings.Replace(identifier, \".tar\", \"\", 1)\n\tmarkObjectForDeletion(t, _context, identifier, institution.Id)\n\n\t// Mark one object in Glacier-only storage for deletion\n\ts3Key = testutil.INTEGRATION_GLACIER_BAGS[0]\n\tidentifier = strings.Replace(s3Key, \"aptrust.integration.test\", \"test.edu\", 1)\n\tidentifier = strings.Replace(identifier, \".tar\", \"\", 1)\n\tmarkObjectForDeletion(t, _context, identifier, institution.Id)\n}", "func BenchSegment(ctx context.Context, name string) (stop func())", "func TestPodDeletionEvent(t *testing.T) {\n\tf := func(path cmp.Path) bool {\n\t\tswitch path.String() {\n\t\t// These fields change at runtime, so ignore it\n\t\tcase \"LastTimestamp\", \"FirstTimestamp\", \"ObjectMeta.Name\":\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tt.Run(\"emitPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Marking for deletion Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n\n\tt.Run(\"emitCancelPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitCancelPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Cancelling deletion of Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n}", "func (client ScheduleMessageClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusBadRequest, http.StatusUnauthorized, http.StatusForbidden, http.StatusNotFound, http.StatusInternalServerError),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}" ]
[ "0.64730257", "0.62781554", "0.6204497", "0.61749214", "0.5982212", "0.5937816", "0.57380974", "0.57224774", "0.5657586", "0.56310046", "0.55468357", "0.5523831", "0.5517349", "0.54638124", "0.54564875", "0.54352546", "0.54173577", "0.540052", "0.53742874", "0.5329653", "0.5300116", "0.52193725", "0.5217846", "0.5208294", "0.52076775", "0.51939934", "0.51938355", "0.51805454", "0.5179914", "0.5177495", "0.51604104", "0.51593465", "0.5144099", "0.50970584", "0.5091588", "0.508985", "0.5089482", "0.5088557", "0.5087718", "0.5084894", "0.5082616", "0.5078257", "0.5063403", "0.5060441", "0.5058335", "0.50322515", "0.50274956", "0.50249815", "0.5021381", "0.5016902", "0.50149614", "0.5008257", "0.50081325", "0.5006003", "0.50028485", "0.49993816", "0.49895018", "0.4982185", "0.498148", "0.49779606", "0.49768615", "0.49644288", "0.49557608", "0.49526355", "0.49513534", "0.49412444", "0.49286604", "0.49252707", "0.49250376", "0.49060738", "0.48971784", "0.48894337", "0.48820603", "0.48748472", "0.48710486", "0.4868433", "0.48646533", "0.4863757", "0.48521972", "0.48518622", "0.48503286", "0.4838586", "0.48371923", "0.48327333", "0.4832002", "0.482347", "0.48211464", "0.4818931", "0.4817195", "0.48130715", "0.48096216", "0.4806841", "0.48057544", "0.4803048", "0.47937667", "0.47914732", "0.47881564", "0.47868046", "0.4781923", "0.4781209" ]
0.7474042
0
DeleteSegmentListener indicates an expected call of DeleteSegmentListener
func (mr *MockLoadBalanceMockRecorder) DeleteSegmentListener(region, listener interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSegmentListener", reflect.TypeOf((*MockLoadBalance)(nil).DeleteSegmentListener), region, listener) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockLoadBalance) DeleteSegmentListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSegmentListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (client WorkloadNetworksClient) DeleteSegmentResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func TestDeleteCleanerOneSegment(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\texpected := []*segment{createSegment(t, dir, 0, 100)}\n\tactual, err := cleaner.Clean(expected)\n\trequire.NoError(t, err)\n\trequire.Equal(t, expected, actual)\n}", "func TestDeleteCleanerNoSegments(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tsegments, err := cleaner.Clean(nil)\n\trequire.NoError(t, err)\n\trequire.Nil(t, segments)\n}", "func (m *SGController) onSgDeleted(sg *v1alpha1.Statefulguardian) {\n\tglog.Infof(\"Cluster %s deleted\", sg.Name)\n\texecCont:= NewExecController(sg)\n execCont.ClusterQuit(m.ctx)\n\tmetrics.IncEventCounter(sgsDeletedCount)\n\tmetrics.DecEventGauge(sgsTotalCount)\n\tglog.Infof(\"Delete statefulset\")\n\tm.statefulSetControl.DeleteStatefulSet(sg)\n}", "func (client WorkloadNetworksClient) DeleteSegmentSender(req *http.Request) (future WorkloadNetworksDeleteSegmentFuture, err error) {\n\tvar resp *http.Response\n\tresp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))\n\tif err != nil {\n\t\treturn\n\t}\n\tvar azf azure.Future\n\tazf, err = azure.NewFutureFromResponse(resp)\n\tfuture.FutureAPI = &azf\n\tfuture.Result = func(client WorkloadNetworksClient) (ar autorest.Response, err error) {\n\t\tvar done bool\n\t\tdone, err = future.DoneWithContext(context.Background(), client)\n\t\tif err != nil {\n\t\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksDeleteSegmentFuture\", \"Result\", future.Response(), \"Polling failure\")\n\t\t\treturn\n\t\t}\n\t\tif !done {\n\t\t\terr = azure.NewAsyncOpIncompleteError(\"avs.WorkloadNetworksDeleteSegmentFuture\")\n\t\t\treturn\n\t\t}\n\t\tar.Response = future.Response()\n\t\treturn\n\t}\n\treturn\n}", "func (sdk *Sdk) DeleteSegment(segmentID string) (string, error) {\n\tsdkC := sdk.cms\n\tdeleteSegment := fmt.Sprintf(\"/triggers/%s\", segmentID)\n\n\treturn sdkC.rq.Delete(deleteSegment, nil)\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureSegmentListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureSegmentListener\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureSegmentListener), region, listener)\n}", "func (o *Segment) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tif o == nil {\n\t\treturn 0, errors.New(\"boiler: no Segment provided for delete\")\n\t}\n\n\tif err := o.doBeforeDeleteHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), segmentPrimaryKeyMapping)\n\tsql := \"DELETE FROM \\\"segment\\\" WHERE \\\"id\\\"=$1\"\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args...)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"boiler: unable to delete from segment\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"boiler: failed to get rows affected by delete for segment\")\n\t}\n\n\tif err := o.doAfterDeleteHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn rowsAff, nil\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteListener(region, listener interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteListener\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteListener), region, listener)\n}", "func (kcp *KCP) delSegment(seg *segment) {\n\tif seg.data.Len() > 0 {\n\t\tkcp.pool.Put(seg.data)\n\t\tseg.data = nil\n\t}\n}", "func (client WorkloadNetworksClient) DeleteSegmentPreparer(ctx context.Context, resourceGroupName string, privateCloudName string, segmentID string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"privateCloudName\": autorest.Encode(\"path\", privateCloudName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"segmentId\": autorest.Encode(\"path\", segmentID),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2020-07-17-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/workloadNetworks/default/segments/{segmentId}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (s *SegmentService) Delete(memberID int, item Segment) error {\n\n\tdata := struct {\n\t\tSegment `json:\"segment\"`\n\t}{item}\n\n\tif item.ID < 1 {\n\t\treturn errors.New(\"Delete Segment requires a segment to have an ID already\")\n\t}\n\n\treq, err := s.client.newRequest(\"DELETE\", fmt.Sprintf(\"segment/%d\", memberID), data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.do(req, nil)\n\treturn err\n}", "func (client WorkloadNetworksClient) DeleteSegment(ctx context.Context, resourceGroupName string, privateCloudName string, segmentID string) (result WorkloadNetworksDeleteSegmentFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/WorkloadNetworksClient.DeleteSegment\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response() != nil {\n\t\t\t\tsc = result.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._\\(\\)]+$`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"avs.WorkloadNetworksClient\", \"DeleteSegment\", err.Error())\n\t}\n\n\treq, err := client.DeleteSegmentPreparer(ctx, resourceGroupName, privateCloudName, segmentID)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"DeleteSegment\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.DeleteSegmentSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"DeleteSegment\", nil, \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func (_m *MockSegmentManager) Remove(segmentID int64, scope querypb.DataScope) {\n\t_m.Called(segmentID, scope)\n}", "func (a *AdminApiService) DeleteTargetSegment(ctx _context.Context, id string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target-segment/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (m *MockLoadBalance) EnsureSegmentListener(region string, listener *v1.Listener) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureSegmentListener\", region, listener)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestDeleteCleanerMessagesKeepActiveSegment(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Messages = 5\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\tsegs := []*segment{\n\t\tcreateSegment(t, dir, 0, 128),\n\t\tcreateSegment(t, dir, 10, 128),\n\t}\n\toffset := int64(0)\n\tfor _, seg := range segs {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\twriteToSegment(t, seg, offset, []byte(\"blah\"))\n\t\t\toffset++\n\t\t}\n\t}\n\n\tactual, err := cleaner.Clean(segs)\n\trequire.NoError(t, err)\n\trequire.Len(t, actual, 1)\n\trequire.Equal(t, int64(10), actual[0].BaseOffset)\n}", "func (l *Listener) delete(rOpts *ReconcileOptions) error {\n\tin := elbv2.DeleteListenerInput{\n\t\tListenerArn: l.CurrentListener.ListenerArn,\n\t}\n\n\tif err := awsutil.ALBsvc.RemoveListener(in); err != nil {\n\t\trOpts.Eventf(api.EventTypeWarning, \"ERROR\", \"Error deleting %v listener: %s\", *l.CurrentListener.Port, err.Error())\n\t\tl.logger.Errorf(\"Failed Listener deletion. ARN: %s: %s\",\n\t\t\t*l.CurrentListener.ListenerArn, err.Error())\n\t\treturn err\n\t}\n\n\tl.deleted = true\n\treturn nil\n}", "func TestDeleteCleanerNoRetentionSet(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\texpected := []*segment{createSegment(t, dir, 0, 100)}\n\tactual, err := cleaner.Clean(expected)\n\trequire.NoError(t, err)\n\trequire.Equal(t, expected, actual)\n}", "func (sc *SecretController) saDeleted(obj interface{}) {\n\tacct := obj.(*v1.ServiceAccount)\n\tsc.deleteSecret(acct.GetName(), acct.GetNamespace())\n\tsc.monitoring.ServiceAccountDeletion.Inc()\n}", "func (ctrler CtrlDefReactor) OnDistributedServiceCardDelete(obj *DistributedServiceCard) error {\n\tlog.Info(\"OnDistributedServiceCardDelete is not implemented\")\n\treturn nil\n}", "func (ctrler CtrlDefReactor) OnConfigurationSnapshotDelete(obj *ConfigurationSnapshot) error {\n\tlog.Info(\"OnConfigurationSnapshotDelete is not implemented\")\n\treturn nil\n}", "func (p *Prometheus) ObserveDurationResourceDeleteEventProcessedSuccess(handler string, start time.Time) {\n\td := p.getDuration(start)\n\tp.processedSucDuration.WithLabelValues(handler, delEventType).Observe(d.Seconds())\n}", "func (m *MockLoadBalance) DeleteListener(region string, listener *v1.Listener) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteListener\", region, listener)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (ctrler CtrlDefReactor) OnDSCProfileDelete(obj *DSCProfile) error {\n\tlog.Info(\"OnDSCProfileDelete is not implemented\")\n\treturn nil\n}", "func (mr *MockLoadBalanceMockRecorder) DeleteMultiListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteMultiListeners\", reflect.TypeOf((*MockLoadBalance)(nil).DeleteMultiListeners), region, lbID, listeners)\n}", "func TestSegmentDummy(t *testing.T) {\n\ttype tcase struct {\n\t\tline geom.Line\n\t}\n\n\tfn := func(t *testing.T, tc tcase) {\n\t\ts := NewSegment(tc.line)\n\t\tif s.GetStart().Equals(tc.line[0]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[0], s.GetStart())\n\t\t}\n\t\tif s.GetEnd().Equals(tc.line[1]) == false {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line[1], s.GetEnd())\n\t\t}\n\t\tif s.GetLineSegment() != tc.line {\n\t\t\tt.Errorf(\"error, expected %v got %v\", tc.line, s.GetLineSegment())\n\t\t}\n\t}\n\ttestcases := []tcase{\n\t\t{\n\t\t\tline: geom.Line{{1, 2}, {3, 4}},\n\t\t},\n\t}\n\n\tfor i, tc := range testcases {\n\t\ttc := tc\n\t\tt.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) { fn(t, tc) })\n\t}\n}", "func (ctrler CtrlDefReactor) OnSnapshotRestoreDelete(obj *SnapshotRestore) error {\n\tlog.Info(\"OnSnapshotRestoreDelete is not implemented\")\n\treturn nil\n}", "func (mr *MockListenerMockRecorder) Delete(listenerKey, checksum interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Delete\", reflect.TypeOf((*MockListener)(nil).Delete), listenerKey, checksum)\n}", "func (mr *MockProviderMockRecorder) OnServiceDelete(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"OnServiceDelete\", reflect.TypeOf((*MockProvider)(nil).OnServiceDelete), arg0)\n}", "func (c *Controller) OnDelete(del common.Cluster) {\n\tblog.Infof(\"cluster %+v delete\", del)\n\tif _, ok := c.reconcilerMap[del.ClusterID]; ok {\n\t\tblog.Infof(\"delete del reconciler for %+v\", del)\n\t\t// call cancel function\n\t\tc.cancelFuncMap[del.ClusterID]()\n\t\tdelete(c.cancelFuncMap, del.ClusterID)\n\t\tdelete(c.reconcilerMap, del.ClusterID)\n\t} else {\n\t\tblog.Infof(\"no reconciler for cluster %+v, need to delete\", del)\n\t}\n}", "func TestDeleteCleanerMessagesCompacted(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Messages = 10\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\t// Write segment with gaps in the offsets to emulate compaction.\n\tseg1 := createSegment(t, dir, 0, 1024)\n\twriteToSegment(t, seg1, 2, []byte(\"blah\"))\n\twriteToSegment(t, seg1, 4, []byte(\"blah\"))\n\twriteToSegment(t, seg1, 12, []byte(\"blah\"))\n\n\tseg2 := createSegment(t, dir, 13, 1024)\n\twriteToSegment(t, seg2, 13, []byte(\"blah\"))\n\twriteToSegment(t, seg2, 14, []byte(\"blah\"))\n\twriteToSegment(t, seg2, 15, []byte(\"blah\"))\n\n\tsegs := []*segment{seg1, seg2}\n\tactual, err := cleaner.Clean(segs)\n\n\trequire.NoError(t, err)\n\trequire.Len(t, actual, 2)\n\n\t// Ensure no messages were actually deleted.\n\tss := newSegmentScanner(actual[0])\n\t_, entry, err := ss.Scan()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(2), entry.Offset)\n\t_, entry, err = ss.Scan()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(4), entry.Offset)\n\t_, entry, err = ss.Scan()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(12), entry.Offset)\n\t_, _, err = ss.Scan()\n\trequire.Error(t, err)\n\n\tss = newSegmentScanner(actual[1])\n\t_, entry, err = ss.Scan()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(13), entry.Offset)\n\t_, entry, err = ss.Scan()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(14), entry.Offset)\n\t_, entry, err = ss.Scan()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(15), entry.Offset)\n\t_, _, err = ss.Scan()\n\trequire.Error(t, err)\n}", "func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) (err error) {\n\tdefer func() { c.resetOnConnError(err) }()\n\n\tsftpClient, err := c.Init(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, pos := range a {\n\t\tfilename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot determine wal segment path: %w\", err)\n\t\t}\n\n\t\tif err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"cannot delete wal segment %q: %w\", filename, err)\n\t\t}\n\t\tinternal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, \"DELETE\").Inc()\n\t}\n\n\treturn nil\n}", "func (s) TestSuccessCaseDeletedRoute(t *testing.T) {\n\trh, fakeClient, ch := setupTests()\n\n\trh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true})\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\t// Will start two watches.\n\tif err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2); err != nil {\n\t\tt.Fatalf(\"Error while waiting for names: %v\", err)\n\t}\n\n\t// Update the RDSHandler with route names which deletes a route name to\n\t// watch. This should trigger the RDSHandler to cancel the watch for the\n\t// deleted route name to watch.\n\trh.updateRouteNamesToWatch(map[string]bool{route1: true})\n\t// This should delete the watch for route2.\n\trouteNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.CancelRDS failed with error %v\", err)\n\t}\n\tif routeNameDeleted != route2 {\n\t\tt.Fatalf(\"xdsClient.CancelRDS called for route %v, want %v\", routeNameDeleted, route2)\n\t}\n\n\trdsUpdate := xdsresource.RouteConfigUpdate{}\n\t// Invoke callback with the xds client with a certain route update. Due to\n\t// this route update updating every route name that rds handler handles,\n\t// this should write to the update channel to send to the listener.\n\tfakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil)\n\trhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate}\n\tselect {\n\tcase rhu := <-ch:\n\t\tif diff := cmp.Diff(rhu.updates, rhuWant); diff != \"\" {\n\t\t\tt.Fatalf(\"got unexpected route update, diff (-got, +want): %v\", diff)\n\t\t}\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timed out waiting for update from update channel.\")\n\t}\n\n\trh.close()\n\trouteNameDeleted, err = fakeClient.WaitForCancelRouteConfigWatch(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"xdsClient.CancelRDS failed with error: %v\", err)\n\t}\n\tif routeNameDeleted != route1 {\n\t\tt.Fatalf(\"xdsClient.CancelRDS called for route %v, want %v\", routeNameDeleted, route1)\n\t}\n}", "func (mr *MockIpsecServerMockRecorder) IpsecCbDelete(arg0, arg1 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IpsecCbDelete\", reflect.TypeOf((*MockIpsecServer)(nil).IpsecCbDelete), arg0, arg1)\n}", "func Delete(c *golangsdk.ServiceClient, id string) (r DeleteResult) {\n\turl := resourceURL(c, id)\n\t//fmt.Printf(\"Delete listener url: %s.\\n\", url)\n\t_, r.Err = c.Delete(url, &golangsdk.RequestOpts{\n\t\tOkCodes: []int{204},\n\t})\n\treturn\n}", "func (e *ObservableEditableBuffer) deleted(q0, q1 OffsetTuple) {\n\te.treatasclean = false\n\tfor observer := range e.observers {\n\t\tobserver.Deleted(q0, q1)\n\t}\n}", "func (ctrler CtrlDefReactor) OnClusterDelete(obj *Cluster) error {\n\tlog.Info(\"OnClusterDelete is not implemented\")\n\treturn nil\n}", "func (p *Prometheus) IncResourceDeleteEventProcessedSuccess(handler string) {\n\tp.processedSuc.WithLabelValues(handler, delEventType).Inc()\n}", "func (d *Delegate) BeforeJobDeleted(spec job.Job) {}", "func (o *Segment) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range segmentBeforeDeleteHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Segment) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range segmentAfterDeleteHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestDeleteCleanerMessages(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Messages = 10\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\tsegs := make([]*segment, 20)\n\tfor i := 0; i < 20; i++ {\n\t\tsegs[i] = createSegment(t, dir, int64(i), 20)\n\t\twriteToSegment(t, segs[i], int64(i), []byte(\"blah\"))\n\t}\n\tactual, err := cleaner.Clean(segs)\n\trequire.NoError(t, err)\n\trequire.Len(t, actual, 10)\n\tfor i := 0; i < 10; i++ {\n\t\trequire.Equal(t, int64(i+10), actual[i].BaseOffset)\n\t}\n}", "func (mr *MockLoadBalanceMockRecorder) EnsureMultiSegmentListeners(region, lbID, listeners interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"EnsureMultiSegmentListeners\", reflect.TypeOf((*MockLoadBalance)(nil).EnsureMultiSegmentListeners), region, lbID, listeners)\n}", "func (_m *Callbacks) SubscriptionDeleted(id *fftypes.UUID) {\n\t_m.Called(id)\n}", "func (o SegmentSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tif len(o) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tif len(segmentBeforeDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), segmentPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := \"DELETE FROM \\\"segment\\\" WHERE \" +\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, segmentPrimaryKeyColumns, len(o))\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"boiler: unable to delete all from segment slice\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"boiler: failed to get rows affected by deleteall for segment\")\n\t}\n\n\tif len(segmentAfterDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doAfterDeleteHooks(ctx, exec); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rowsAff, nil\n}", "func HandleLoadbalancerDeletionSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"DELETE\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\n\t\tw.WriteHeader(http.StatusNoContent)\n\t})\n}", "func (c *grafana) MarkDeleted() {}", "func (s *MetalLBSpeaker) OnDeleteService(svc *slim_corev1.Service) error {\n\tif s.shutDown() {\n\t\treturn ErrShutDown\n\t}\n\tvar (\n\t\tsvcID = k8s.ParseServiceID(svc)\n\t\tl = log.WithFields(logrus.Fields{\n\t\t\t\"component\": \"MetalLBSpeaker.OnDeleteService\",\n\t\t\t\"service-id\": svcID,\n\t\t})\n\t\tmeta = fence.Meta{}\n\t)\n\n\ts.Lock()\n\tdelete(s.services, svcID)\n\ts.Unlock()\n\n\tif err := meta.FromObjectMeta(&svc.ObjectMeta); err != nil {\n\t\tl.WithError(err).Error(\"failed to parse event metadata\")\n\t}\n\n\tl.Debug(\"adding event to queue\")\n\t// Passing nil as the service will force the MetalLB speaker to withdraw\n\t// the BGP announcement.\n\ts.queue.Add(svcEvent{\n\t\tMeta: meta,\n\t\top: Delete,\n\t\tid: svcID,\n\t\tsvc: nil,\n\t\teps: nil,\n\t})\n\treturn nil\n}", "func (f Factory) TestGetSegmentNotFound(t *testing.T) {\n\tprocess := \"test\"\n\tfakeLinkHash, _ := types.NewBytes32FromString(\"0000000000000000000000000000000000000000000000000000000000000000\")\n\tsegment, err := f.Client.GetSegment(process, fakeLinkHash)\n\tassert.EqualError(t, err, \"Not Found\")\n\tassert.Nil(t, segment)\n}", "func TestPodDeletionEvent(t *testing.T) {\n\tf := func(path cmp.Path) bool {\n\t\tswitch path.String() {\n\t\t// These fields change at runtime, so ignore it\n\t\tcase \"LastTimestamp\", \"FirstTimestamp\", \"ObjectMeta.Name\":\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tt.Run(\"emitPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Marking for deletion Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n\n\tt.Run(\"emitCancelPodDeletionEvent\", func(t *testing.T) {\n\t\tcontroller := &NoExecuteTaintManager{}\n\t\trecorder := testutil.NewFakeRecorder()\n\t\tcontroller.recorder = recorder\n\t\tcontroller.emitCancelPodDeletionEvent(types.NamespacedName{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"test\",\n\t\t})\n\t\twant := []*v1.Event{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t},\n\t\t\t\tInvolvedObject: v1.ObjectReference{\n\t\t\t\t\tKind: \"Pod\",\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t\tReason: \"TaintManagerEviction\",\n\t\t\t\tType: \"Normal\",\n\t\t\t\tCount: 1,\n\t\t\t\tMessage: \"Cancelling deletion of Pod test/test\",\n\t\t\t\tSource: v1.EventSource{Component: \"nodeControllerTest\"},\n\t\t\t},\n\t\t}\n\t\tif diff := cmp.Diff(want, recorder.Events, cmp.FilterPath(f, cmp.Ignore())); len(diff) > 0 {\n\t\t\tt.Errorf(\"emitPodDeletionEvent() returned data (-want,+got):\\n%s\", diff)\n\t\t}\n\t})\n}", "func (c ClientFake) UpdateSegment(name, campaignID, segmentID string) (Segment, error) {\n\treturn Segment{}, nil\n}", "func (rh *ruleHandler) internalDelete(name string) {\n\t// deletes relevant discoverer delegate\n\tif delegate, exists := rh.d.delegates[name]; exists {\n\t\tdelegate.handler.DeleteMissing(nil)\n\t\tdelete(rh.d.delegates, name)\n\t}\n}", "func (d Delegate) BeforeJobDeleted(spec job.Job) {\n}", "func TestDeleteCleanerBytes(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 100\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\tsegs := make([]*segment, 5)\n\tfor i := 0; i < 5; i++ {\n\t\tsegs[i] = createSegment(t, dir, int64(i), 20)\n\t\twriteToSegment(t, segs[i], int64(i), []byte(\"blah\"))\n\t}\n\tactual, err := cleaner.Clean(segs)\n\trequire.NoError(t, err)\n\trequire.Len(t, actual, 2)\n\trequire.Equal(t, int64(3), actual[0].BaseOffset)\n\trequire.Equal(t, int64(4), actual[1].BaseOffset)\n}", "func (mr *MockTenantServiceDeleteDaoMockRecorder) DeleteTenantServicesDelete(record interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteTenantServicesDelete\", reflect.TypeOf((*MockTenantServiceDeleteDao)(nil).DeleteTenantServicesDelete), record)\n}", "func (f Factory) TestGetSegmentOK(t *testing.T) {\n\tprocess := \"test\"\n\tparent, _ := f.Client.CreateMap(process, nil, \"test\")\n\n\tsegment, err := f.Client.GetSegment(process, parent.GetLinkHash())\n\tassert.NoError(t, err)\n\tassert.NotNil(t, segment)\n}", "func (m *VirtualEventsEventsItemSessionsItemVirtualAppointmentRequestBuilder) Delete(ctx context.Context, requestConfiguration *VirtualEventsEventsItemSessionsItemVirtualAppointmentRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (mr *MockProviderMockRecorder) OnEndpointsDelete(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"OnEndpointsDelete\", reflect.TypeOf((*MockProvider)(nil).OnEndpointsDelete), arg0)\n}", "func assertDeleted(t *testing.T, cl client.Client, thing client.Object) {\n\tt.Helper()\n\tif err := cl.Delete(context.TODO(), thing); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"Failed to delete %s: %v\", thing.GetName(), err)\n\t} else {\n\t\tt.Logf(\"Deleted %s\", thing.GetName())\n\t}\n}", "func (s *scheduler) onFlushSegDone(e sched.Event) {\n\tevent := e.(*flushSegEvent)\n\tif err := e.GetError(); err != nil {\n\t\t// s.opts.EventListener.BackgroundErrorCB(err)\n\t\tevent.Segment.Unref()\n\t\treturn\n\t}\n\tctx := &Context{Opts: s.opts}\n\tmeta := event.Segment.GetMeta()\n\ttd, err := s.tables.StrongRefTable(meta.Table.Id)\n\tif err != nil {\n\t\t// s.opts.EventListener.BackgroundErrorCB(err)\n\t\tevent.Segment.Unref()\n\t\treturn\n\t}\n\tlogutil.Infof(\" %s | Segment %d | UpgradeSegEvent | Started\", sched.EventPrefix, meta.Id)\n\tnewevent := NewUpgradeSegEvent(ctx, event.Segment, td)\n\ts.Schedule(newevent)\n}", "func (mr *MockIpsecClientMockRecorder) IpsecCbDelete(ctx, in interface{}, opts ...interface{}) *gomock.Call {\n\tvarargs := append([]interface{}{ctx, in}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IpsecCbDelete\", reflect.TypeOf((*MockIpsecClient)(nil).IpsecCbDelete), varargs...)\n}", "func (mr *MockServiceMockRecorder) DeleteServerAndStorages(ctx, r interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteServerAndStorages\", reflect.TypeOf((*MockService)(nil).DeleteServerAndStorages), ctx, r)\n}", "func (s *systemtestSuite) TestServiceAddDeleteServiceVxlan(c *C) {\n\ts.testServiceAddDeleteService(c, \"vxlan\")\n}", "func (ctrler CtrlDefReactor) OnTenantDelete(obj *Tenant) error {\n\tlog.Info(\"OnTenantDelete is not implemented\")\n\treturn nil\n}", "func (mr *MockInternalServerMockRecorder) TlsCbDelete(arg0, arg1 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"TlsCbDelete\", reflect.TypeOf((*MockInternalServer)(nil).TlsCbDelete), arg0, arg1)\n}", "func (mm *Manager) OnBcsServiceDelete(obj interface{}) {\n\tsvc, ok := obj.(*v2.BcsService)\n\tif !ok {\n\t\tmesosCritical.WithLabelValues(typeBcsService, eventDelete).Inc()\n\t\tblog.Errorf(\"[Criticail]MesosManager BcsService event handler get unknown type obj %v OnDelete\", obj)\n\t\treturn\n\t}\n\tkey := fmt.Sprintf(\"%s/%s\", svc.GetNamespace(), svc.GetName())\n\tblog.V(5).Infof(\"BcsService %s delete, ready to refresh\", key)\n\tmesosEvent.WithLabelValues(typeBcsService, eventDelete, statusSuccess).Inc()\n\tmm.deleteAppService(key)\n}", "func (mr *MockTenantServiceMonitorDaoMockRecorder) DeleteServiceMonitor(mo interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteServiceMonitor\", reflect.TypeOf((*MockTenantServiceMonitorDao)(nil).DeleteServiceMonitor), mo)\n}", "func (a *UserSegmentsApiService) DeleteUserSegment(ctx context.Context, projectKey string, environmentKey string, userSegmentKey string) ( *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/segments/{projectKey}/{environmentKey}/{userSegmentKey}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"projectKey\"+\"}\", fmt.Sprintf(\"%v\", projectKey), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"environmentKey\"+\"}\", fmt.Sprintf(\"%v\", environmentKey), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"userSegmentKey\"+\"}\", fmt.Sprintf(\"%v\", userSegmentKey), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\treturn localVarHttpResponse, err\n}", "func (m *ManagedDevicesItemSecurityBaselineStatesSecurityBaselineStateItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *ManagedDevicesItemSecurityBaselineStatesSecurityBaselineStateItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func onDataSourceDelete(ctx context.Context, deletedSource string) {\n\n\t// TODO - find a way to delete datasources - surely a config watch\n\n\tlog.Logger(ctx).Info(\"Sync = Send Event Server-wide for \" + deletedSource)\n\tcl := defaults.NewClient()\n\tcl.Publish(ctx, cl.NewPublication(common.TOPIC_DATASOURCE_EVENT, &object.DataSourceEvent{\n\t\tType: object.DataSourceEvent_DELETE,\n\t\tName: deletedSource,\n\t}))\n\n}", "func (s *scheduler) onUpgradeSegDone(e sched.Event) {\n\tevent := e.(*upgradeSegEvent)\n\tdefer event.TableData.Unref()\n\tdefer event.OldSegment.Unref()\n\tif err := e.GetError(); err != nil {\n\t\ts.opts.EventListener.BackgroundErrorCB(err)\n\t\treturn\n\t}\n\tevent.Segment.Unref()\n}", "func (s *segment) Delete() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.f == nil {\n\t\treturn nil\n\t}\n\n\terr := s.close()\n\tif err != nil {\n\t\treturn err\n\t}\n\terrA := os.Remove(s.filePath)\n\tif errA != nil {\n\t\treturn errSegmentRemove(errA)\n\t}\n\n\t// do we need to do this?\n\ts.f = nil\n\ts = nil\n\n\treturn nil\n}", "func deleted(s split, save func(string)) {\n\tif s.R != \"\" {\n\t\tsave(s.L + s.R[1:])\n\t}\n}", "func (a *AllApiService) EnterpriseDeleteEnterpriseNetworkSegment(ctx _context.Context, body EnterpriseDeleteEnterpriseNetworkSegment) (DeletionConfirmation, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue DeletionConfirmation\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/deleteEnterpriseNetworkSegment\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v DeletionConfirmation\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (mr *MockDBStorageMockRecorder) DeleteCallback(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteCallback\", reflect.TypeOf((*MockDBStorage)(nil).DeleteCallback), arg0, arg1)\n}", "func (mr *MockDBMockRecorder) DeleteSentenceFromDocumentID(documentID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteSentenceFromDocumentID\", reflect.TypeOf((*MockDB)(nil).DeleteSentenceFromDocumentID), documentID)\n}", "func (r *ScanRequest) Delete(*cloudformationevt.Event, *runtime.Context) error {\n return nil\n}", "func testDeleteSimple(tree T, insert, delete []interface{}, exact bool, errPrfx string, t *testing.T) {\n\tx, y := randomPosition(tree.View())\n\tfor _, e := range insert {\n\t\ttree.Insert(x, y, e)\n\t}\n\texpCol := new(list.List)\nOUTER_LOOP:\n\tfor _, i := range insert {\n\t\tfor _, d := range delete {\n\t\t\tif i == d {\n\t\t\t\tcontinue OUTER_LOOP\n\t\t\t}\n\t\t}\n\t\texpCol.PushBack(i)\n\t}\n\texpDel := new(list.List)\n\tfor _, d := range delete {\n\t\texpDel.PushBack(d)\n\t}\n\tpred, deleted := makeDelClosure(delete)\n\tdelView := tree.View()\n\tif exact {\n\t\tdelView = NewViewP(x, x, y, y)\n\t}\n\ttestDelete(tree, delView, pred, deleted, expDel, t, errPrfx)\n\tfun, collected := SimpleSurvey()\n\ttestSurvey(tree, tree.View(), fun, collected, expCol, t, errPrfx)\n}", "func (s *ServerSuite) TestSrvRTMOnDelete(c *C) {\n\te1 := testutils.NewResponder(\"Hi, I'm endpoint 1\")\n\tdefer e1.Close()\n\n\tb := MakeBatch(Batch{Addr: \"localhost:11300\", Route: `Path(\"/\")`, URL: e1.URL})\n\tc.Assert(s.mux.Init(b.Snapshot()), IsNil)\n\tc.Assert(s.mux.Start(), IsNil)\n\tdefer s.mux.Stop(true)\n\n\t// When: an existing backend server is removed and added again.\n\tfor i := 0; i < 3; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\tc.Assert(s.mux.DeleteServer(b.SK), IsNil)\n\tc.Assert(s.mux.UpsertServer(b.BK, b.S), IsNil)\n\tfor i := 0; i < 4; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\n\t// Then: total count includes only metrics after the server was re-added.\n\trts, err := s.mux.ServerStats(b.SK)\n\tc.Assert(err, IsNil)\n\tc.Assert(rts.Counters.Total, Equals, int64(4))\n}", "func (s *BasePlSqlParserListener) ExitDeferred_segment_creation(ctx *Deferred_segment_creationContext) {\n}", "func (mr *MockEndpointsDaoMockRecorder) DeleteByServiceID(sid interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteByServiceID\", reflect.TypeOf((*MockEndpointsDao)(nil).DeleteByServiceID), sid)\n}", "func (s *BasePlSqlParserListener) EnterOn_delete_clause(ctx *On_delete_clauseContext) {}", "func (sm *stateMachine) OnDelete(key string) {\n\tif !sm.running.Load() {\n\t\tsm.logger.Warn(\"state machine is stopped\",\n\t\t\tlogger.String(\"type\", sm.stateMachineType.String()))\n\t\treturn\n\t}\n\tsm.logger.Info(\"discovery state removed\",\n\t\tlogger.String(\"type\", sm.stateMachineType.String()),\n\t\tlogger.String(\"key\", key))\n\tif sm.onDeleteFn != nil {\n\t\tsm.onDeleteFn(key)\n\t}\n}", "func TestDeleteCleanerMessagesBelowLimit(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Messages = 100\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\texpected := make([]*segment, 5)\n\tfor i := 0; i < 5; i++ {\n\t\texpected[i] = createSegment(t, dir, int64(i), 20)\n\t}\n\tactual, err := cleaner.Clean(expected)\n\trequire.NoError(t, err)\n\trequire.Equal(t, expected, actual)\n}", "func Test_DeviceService_Remove_Success(t *testing.T) {\n\th := TestHelper{}\n\trep := new(mocks.IDeviceRepository)\n\trepAuth := new(mocks.IDeviceAuthRepository)\n\ts := h.CreateTestDeviceService(rep, repAuth)\n\n\tip := \"127.0.0.1\"\n\trep.On(\"Remove\", ip).Return(nil)\n\n\terr := s.Remove(ip)\n\tassert.NoError(t, err)\n}", "func TestDeleteCleanerBytesBelowLimit(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Bytes = 50\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\texpected := make([]*segment, 5)\n\tfor i := 0; i < 5; i++ {\n\t\texpected[i] = createSegment(t, dir, int64(i), 20)\n\t}\n\tactual, err := cleaner.Clean(expected)\n\trequire.NoError(t, err)\n\trequire.Equal(t, expected, actual)\n}", "func (r *templateRouter) deleteServiceUnitInternal(id ServiceUnitKey, service ServiceUnit) {\n\tdelete(r.serviceUnits, id)\n\tif len(service.ServiceAliasAssociations) > 0 {\n\t\tr.stateChanged = true\n\t}\n}", "func TestAddTwoDeleteScond(t *testing.T) {\n\ttarget := teaser.New()\n\tm1id := target.Add(\"msg1\")\n\tm2id := target.Add(\"msg2\")\n\n\t{\n\t\tdeleted := target.Delete(m2id)\n\t\tassertDeleted(m2id, deleted, t)\n\t}\n\n\t{\n\t\tnextHash, m1vid := target.View()\n\t\tassertMessageId(m1id, m1vid, t)\n\t\tassertMessageHash(\"\", nextHash, t)\n\t}\n}", "func (prce PartitionReconfigurationCompletedEvent) AsServiceDeletedEvent() (*ServiceDeletedEvent, bool) {\n\treturn nil, false\n}", "func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {\n\tif r.DeleteFunc != nil {\n\t\tr.DeleteFunc(obj)\n\t}\n}", "func TestDeleteCleanerBytesMessages(t *testing.T) {\n\topts := deleteCleanerOptions{Name: \"foo\", Logger: noopLogger()}\n\topts.Retention.Messages = 15\n\topts.Retention.Bytes = 240\n\tcleaner := newDeleteCleaner(opts)\n\tdir := tempDir(t)\n\tdefer remove(t, dir)\n\n\tsegs := make([]*segment, 20)\n\tfor i := 0; i < 20; i++ {\n\t\tsegs[i] = createSegment(t, dir, int64(i), 20)\n\t\twriteToSegment(t, segs[i], int64(i), []byte(\"blah\"))\n\t}\n\tactual, err := cleaner.Clean(segs)\n\trequire.NoError(t, err)\n\trequire.Len(t, actual, 5)\n\tfor i := 0; i < 5; i++ {\n\t\trequire.Equal(t, int64(i+15), actual[i].BaseOffset)\n\t}\n}", "func (mr *MockLoadBalancerServiceIfaceMockRecorder) DeleteNetscalerLoadBalancer(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteNetscalerLoadBalancer\", reflect.TypeOf((*MockLoadBalancerServiceIface)(nil).DeleteNetscalerLoadBalancer), p)\n}", "func DeleteListener(t *testing.T, client *gophercloud.ServiceClient, lbID, listenerID string) {\n\tt.Logf(\"Attempting to delete listener %s\", listenerID)\n\n\tif err := listeners.Delete(client, listenerID).ExtractErr(); err != nil {\n\t\tif _, ok := err.(gophercloud.ErrDefault404); !ok {\n\t\t\tt.Fatalf(\"Unable to delete listener: %v\", err)\n\t\t}\n\t}\n\n\tif err := WaitForLoadBalancerState(client, lbID, \"ACTIVE\"); err != nil {\n\t\tt.Fatalf(\"Timed out waiting for loadbalancer to become active: %s\", err)\n\t}\n\n\tt.Logf(\"Successfully deleted listener %s\", listenerID)\n}", "func (s *SegmentChangesWrapper) RemoveFromSegment(segmentName string, keys []string) error {\n\treturn errSegmentStorageNotImplementedMethod\n}", "func (client ThreatIntelligenceIndicatorClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func (m *VirtualEventsWebinarsItemSessionsItemVirtualAppointmentRequestBuilder) Delete(ctx context.Context, requestConfiguration *VirtualEventsWebinarsItemSessionsItemVirtualAppointmentRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func handleDelete(ctx *verifierContext, status *types.VerifyImageStatus) {\n\n\tlog.Functionf(\"handleDelete(%s) refcount %d\",\n\t\tstatus.ImageSha256, status.RefCount)\n\n\tif _, err := os.Stat(status.FileLocation); err == nil {\n\t\tlog.Functionf(\"handleDelete removing %s\",\n\t\t\tstatus.FileLocation)\n\t\tif err := os.RemoveAll(status.FileLocation); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\tlog.Warnf(\"handleDelete: Unable to delete %s: %s\",\n\t\t\tstatus.FileLocation, err)\n\t}\n\n\tunpublishVerifyImageStatus(ctx, status)\n\tlog.Functionf(\"handleDelete done for %s\", status.ImageSha256)\n}", "func (p *fullEndpoint) delete(expectedOldValue *Endpoint) {\n\tatomic.CompareAndSwapPointer(&p.endpoint, unsafe.Pointer(expectedOldValue), nil)\n}" ]
[ "0.6918156", "0.66918373", "0.6287821", "0.6228136", "0.6007048", "0.59755933", "0.5909671", "0.584487", "0.57518405", "0.57057583", "0.5639084", "0.55902565", "0.55884504", "0.5515108", "0.5483161", "0.5406128", "0.53928345", "0.5342651", "0.522433", "0.5224313", "0.5223785", "0.5219264", "0.52087927", "0.5206821", "0.5203784", "0.520203", "0.51817936", "0.51735914", "0.51707417", "0.51616734", "0.5151431", "0.50967556", "0.50795555", "0.506069", "0.5044177", "0.5027104", "0.50248784", "0.5018463", "0.5012947", "0.5009623", "0.5004187", "0.5004061", "0.5002843", "0.49894488", "0.49841377", "0.4981045", "0.49808782", "0.49778894", "0.49749488", "0.4967992", "0.4956399", "0.49544355", "0.495192", "0.4946528", "0.49343523", "0.49328452", "0.49191537", "0.4911995", "0.4911437", "0.49079993", "0.4907786", "0.49066633", "0.49063006", "0.4905725", "0.4903314", "0.49015167", "0.48995045", "0.4893969", "0.48884192", "0.4871115", "0.48588204", "0.4855929", "0.48470765", "0.48440075", "0.48370722", "0.48353913", "0.4833173", "0.48287216", "0.48253116", "0.48211488", "0.48195207", "0.48179314", "0.48172763", "0.4813547", "0.4796854", "0.4791599", "0.47903097", "0.47898686", "0.47791904", "0.47753608", "0.4774209", "0.47740823", "0.47665882", "0.4761923", "0.47600013", "0.47585285", "0.47564507", "0.47530794", "0.47502843", "0.47427756" ]
0.6824238
1
NewMockValidater creates a new mock instance
func NewMockValidater(ctrl *gomock.Controller) *MockValidater { mock := &MockValidater{ctrl: ctrl} mock.recorder = &MockValidaterMockRecorder{mock} return mock }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewMock(t *testing.T) *MockT { return &MockT{t: t} }", "func NewMock(now time.Time) *Mock {\n\treturn &Mock{\n\t\tnow: now,\n\t\tmockTimers: &timerHeap{},\n\t}\n}", "func NewMock() *Mock {\n\treturn &Mock{now: time.Unix(0, 0)}\n}", "func NewMock() *Mock {\n\treturn &Mock{\n\t\tData: MockData{\n\t\t\tUptime: true,\n\t\t\tFile: true,\n\t\t\tTCPResponse: true,\n\t\t\tHTTPStatus: true,\n\t\t},\n\t}\n}", "func NewValidatorProvider(t mockConstructorTestingTNewValidatorProvider) *ValidatorProvider {\n\tmock := &ValidatorProvider{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewFormDataValidator(t mockConstructorTestingTNewFormDataValidator) *FormDataValidator {\n\tmock := &FormDataValidator{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newVersionCheckerMock(version string, tags []string) *VersionChecker {\n\n\tfixedAppVersion := fixVersion(version)\n\n\treturn &VersionChecker{\n\t\tfixedAppVersion: fixedAppVersion,\n\t\tversionSource: &versionCheckerMock{\n\t\t\ttags: tags,\n\t\t\tfixVersionStrFunc: fixVersion,\n\t\t\ttagFilterFunc: versionFilterFunc(fixedAppVersion),\n\t\t},\n\t}\n}", "func New() (*mock, error) {\n\treturn &mock{\n\t\tConfigService: ConfigService{},\n\t\tContainerService: ContainerService{},\n\t\tDistributionService: DistributionService{},\n\t\tImageService: ImageService{},\n\t\tNetworkService: NetworkService{},\n\t\tNodeService: NodeService{},\n\t\tPluginService: PluginService{},\n\t\tSecretService: SecretService{},\n\t\tServiceService: ServiceService{},\n\t\tSystemService: SystemService{},\n\t\tSwarmService: SwarmService{},\n\t\tVolumeService: VolumeService{},\n\t\tVersion: Version,\n\t}, nil\n}", "func NewMock() *Mock {\n\tc := &Mock{\n\t\tFakeIncoming: func() chan []byte {\n\t\t\treturn make(chan []byte, 2)\n\t\t},\n\t\tFakeName: func() string {\n\t\t\treturn \"TestClient\"\n\t\t},\n\t\tFakeGame: func() string {\n\t\t\treturn \"test\"\n\t\t},\n\t\tFakeClose: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeStopTimer: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeRoom: func() interfaces.Room {\n\t\t\treturn nil\n\t\t},\n\t\tFakeSetRoom: func(interfaces.Room) {\n\n\t\t},\n\t}\n\n\tc.FakeWritePump = func() {\n\t\tfor range c.Incoming() {\n\t\t\t// Do nothing\n\t\t}\n\t}\n\n\tc.FakeSetName = func(string) interfaces.Client {\n\t\treturn c\n\t}\n\treturn c\n}", "func New() *Mock {\n\treturn &Mock{\n\t\tm: mockMap{},\n\t\toldTransport: http.DefaultTransport,\n\t}\n}", "func (m *MockWatcherConstructor) New(arg0 Machine, arg1 string, arg2 []string, arg3, arg4, arg5 string, arg6 time.Duration, arg7 map[string]interface{}) (interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"New\", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\n\tret0, _ := ret[0].(interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func NewFakeValidator(validationShouldPass bool) ValidatorInterface {\n\treturn &FakeValidator{validationSucceeds: validationShouldPass}\n}", "func NewFormFiller(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *FormFiller {\n\tmock := &FormFiller{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newHandler(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *handler {\n\tmock := &handler{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMock(middleware []Middleware) OrganizationService {\n\tvar svc OrganizationService = NewBasicOrganizationServiceServiceMock()\n\tfor _, m := range middleware {\n\t\tsvc = m(svc)\n\t}\n\treturn svc\n}", "func newMockKvCapabilityVerifier(t mockConstructorTestingTnewMockKvCapabilityVerifier) *mockKvCapabilityVerifier {\n\tmock := &mockKvCapabilityVerifier{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewRequester(t mockConstructorTestingTNewRequester) *Requester {\n\tmock := &Requester{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newValidator(svc Service) (Service, error) {\n\treturn validator{next: svc}, nil\n}", "func makeValidValidatorWrapper() ValidatorWrapper {\n\tv := makeValidValidator()\n\tds := Delegations{\n\t\tNewDelegation(v.Address, new(big.Int).Set(v.MinSelfDelegation)),\n\t\tNewDelegation(common.BigToAddress(common.Big1), new(big.Int).Sub(v.MaxTotalDelegation, v.MinSelfDelegation)),\n\t}\n\treturn ValidatorWrapper{\n\t\tValidator: v,\n\t\tDelegations: ds,\n\t}\n}", "func NewMock() Cache {\n\treturn &mock{}\n}", "func NewMock() *MockMetrics {\n\treturn &MockMetrics{}\n}", "func NewObjectVersioner(t mockConstructorTestingTNewObjectVersioner) *ObjectVersioner {\n\tmock := &ObjectVersioner{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMock() *Mock {\n\treturn &Mock{VolumesMock: &VolumesServiceMock{}}\n}", "func NewMock(path string, logger log.FieldLogger) Listener {\n\treturn &mock{\n\t\tpath: path,\n\t\tlogger: logger,\n\t}\n}", "func NewMockObject(uid, name, ns string, res api.Resource) api.Object {\n\treturn NewObject(uuid.NewFromString(uid), name, ns, res)\n}", "func NewMockInterface(t mockConstructorTestingTNewMockInterface) *MockInterface {\n\tmock := &MockInterface{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMocklibrarian(t mockConstructorTestingTNewMocklibrarian) *Mocklibrarian {\n\tmock := &Mocklibrarian{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (s *KeeperTestSuite) TestHandleNewValidator() {\n\tctx := s.ctx\n\n\taddrDels := simtestutil.AddTestAddrsIncremental(s.bankKeeper, s.stakingKeeper, ctx, 1, s.stakingKeeper.TokensFromConsensusPower(ctx, 0))\n\tvalAddrs := simtestutil.ConvertAddrsToValAddrs(addrDels)\n\tpks := simtestutil.CreateTestPubKeys(1)\n\taddr, val := valAddrs[0], pks[0]\n\ttstaking := stakingtestutil.NewHelper(s.T(), ctx, s.stakingKeeper)\n\tctx = ctx.WithBlockHeight(s.slashingKeeper.SignedBlocksWindow(ctx) + 1)\n\n\t// Validator created\n\tamt := tstaking.CreateValidatorWithValPower(addr, val, 100, true)\n\n\tstaking.EndBlocker(ctx, s.stakingKeeper)\n\ts.Require().Equal(\n\t\ts.bankKeeper.GetAllBalances(ctx, sdk.AccAddress(addr)),\n\t\tsdk.NewCoins(sdk.NewCoin(s.stakingKeeper.GetParams(ctx).BondDenom, InitTokens.Sub(amt))),\n\t)\n\ts.Require().Equal(amt, s.stakingKeeper.Validator(ctx, addr).GetBondedTokens())\n\n\t// Now a validator, for two blocks\n\ts.slashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\tctx = ctx.WithBlockHeight(s.slashingKeeper.SignedBlocksWindow(ctx) + 2)\n\ts.slashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\n\tinfo, found := s.slashingKeeper.GetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address()))\n\ts.Require().True(found)\n\ts.Require().Equal(s.slashingKeeper.SignedBlocksWindow(ctx)+1, info.StartHeight)\n\ts.Require().Equal(int64(2), info.IndexOffset)\n\ts.Require().Equal(int64(1), info.MissedBlocksCounter)\n\ts.Require().Equal(time.Unix(0, 0).UTC(), info.JailedUntil)\n\n\t// validator should be bonded still, should not have been jailed or slashed\n\tvalidator, _ := s.stakingKeeper.GetValidatorByConsAddr(ctx, sdk.GetConsAddress(val))\n\ts.Require().Equal(stakingtypes.Bonded, validator.GetStatus())\n\tbondPool := s.stakingKeeper.GetBondedPool(ctx)\n\texpTokens := s.stakingKeeper.TokensFromConsensusPower(ctx, 100)\n\t// adding genesis validator tokens\n\texpTokens = expTokens.Add(s.stakingKeeper.TokensFromConsensusPower(ctx, 1))\n\ts.Require().True(expTokens.Equal(s.bankKeeper.GetBalance(ctx, bondPool.GetAddress(), s.stakingKeeper.BondDenom(ctx)).Amount))\n}", "func NewMockValidator(ctrl *gomock.Controller) *MockValidator {\n\tmock := &MockValidator{ctrl: ctrl}\n\tmock.recorder = &MockValidatorMockRecorder{mock}\n\treturn mock\n}", "func NewMock(path string, nodes uint, replicas uint, vbuckets uint, specs ...BucketSpec) (m *Mock, err error) {\n\tvar lsn *net.TCPListener\n\tchAccept := make(chan bool)\n\tm = &Mock{}\n\n\tdefer func() {\n\t\tclose(chAccept)\n\t\tif lsn != nil {\n\t\t\tif err := lsn.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Failed to close listener: %v\", err)\n\t\t\t}\n\t\t}\n\t\texc := recover()\n\n\t\tif exc == nil {\n\t\t\t// No errors, everything is OK\n\t\t\treturn\n\t\t}\n\n\t\t// Close mock on error, destroying resources\n\t\tm.Close()\n\t\tif mExc, ok := exc.(mockError); !ok {\n\t\t\tpanic(mExc)\n\t\t} else {\n\t\t\tm = nil\n\t\t\terr = mExc\n\t\t}\n\t}()\n\n\tif lsn, err = net.ListenTCP(\"tcp\", &net.TCPAddr{Port: 0}); err != nil {\n\t\tthrowMockError(\"Couldn't set up listening socket\", err)\n\t}\n\t_, ctlPort, err := net.SplitHostPort(lsn.Addr().String())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to split host and port: %v\", err)\n\t}\n\tlog.Printf(\"Listening for control connection at %s\\n\", ctlPort)\n\n\tgo func() {\n\t\tvar err error\n\n\t\tdefer func() {\n\t\t\tchAccept <- false\n\t\t}()\n\t\tif m.conn, err = lsn.Accept(); err != nil {\n\t\t\tthrowMockError(\"Couldn't accept incoming control connection from mock\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tif len(specs) == 0 {\n\t\tspecs = []BucketSpec{{Name: \"default\", Type: BCouchbase}}\n\t}\n\n\toptions := []string{\n\t\t\"-jar\", path, \"--harakiri-monitor\", \"localhost:\" + ctlPort, \"--port\", \"0\",\n\t\t\"--replicas\", strconv.Itoa(int(replicas)),\n\t\t\"--vbuckets\", strconv.Itoa(int(vbuckets)),\n\t\t\"--nodes\", strconv.Itoa(int(nodes)),\n\t\t\"--buckets\", m.buildSpecStrings(specs),\n\t}\n\n\tlog.Printf(\"Invoking java %s\", strings.Join(options, \" \"))\n\tm.cmd = exec.Command(\"java\", options...)\n\n\tm.cmd.Stdout = os.Stdout\n\tm.cmd.Stderr = os.Stderr\n\n\tif err = m.cmd.Start(); err != nil {\n\t\tm.cmd = nil\n\t\tthrowMockError(\"Couldn't start command\", err)\n\t}\n\n\tselect {\n\tcase <-chAccept:\n\t\tbreak\n\n\tcase <-time.After(mockInitTimeout):\n\t\tthrowMockError(\"Timed out waiting for initialization\", errors.New(\"timeout\"))\n\t}\n\n\tm.rw = bufio.NewReadWriter(bufio.NewReader(m.conn), bufio.NewWriter(m.conn))\n\n\t// Read the port buffer, which is delimited by a NUL byte\n\tif portBytes, err := m.rw.ReadBytes(0); err != nil {\n\t\tthrowMockError(\"Couldn't get port information\", err)\n\t} else {\n\t\tportBytes = portBytes[:len(portBytes)-1]\n\t\tif entryPort, err := strconv.Atoi(string(portBytes)); err != nil {\n\t\t\tthrowMockError(\"Incorrectly formatted port from mock\", err)\n\t\t} else {\n\t\t\tm.EntryPort = uint16(entryPort)\n\t\t}\n\t}\n\n\tlog.Printf(\"Mock HTTP port at %d\\n\", m.EntryPort)\n\treturn\n}", "func (m *MockIModel) New(model interface{}) interfaces.IModel {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"New\", model)\n\tret0, _ := ret[0].(interfaces.IModel)\n\treturn ret0\n}", "func NewMockVerifier(ctrl *gomock.Controller) *MockVerifier {\n\tmock := &MockVerifier{ctrl: ctrl}\n\tmock.recorder = &MockVerifierMockRecorder{mock}\n\treturn mock\n}", "func NewModifierMock(t minimock.Tester) *ModifierMock {\n\tm := &ModifierMock{t: t}\n\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.SetMock = mModifierMockSet{mock: m}\n\n\treturn m\n}", "func makeValidValidator() Validator {\n\tcr := validCommissionRates\n\tc := Commission{cr, big.NewInt(300)}\n\td := Description{\n\t\tName: \"Wayne\",\n\t\tIdentity: \"wen\",\n\t\tWebsite: \"harmony.one.wen\",\n\t\tDetails: \"best\",\n\t}\n\tv := Validator{\n\t\tAddress: validatorAddr,\n\t\tSlotPubKeys: []shard.BLSPublicKey{blsPubSigPairs[0].pub},\n\t\tLastEpochInCommittee: big.NewInt(20),\n\t\tMinSelfDelegation: tenK,\n\t\tMaxTotalDelegation: twelveK,\n\t\tStatus: effective.Active,\n\t\tCommission: c,\n\t\tDescription: d,\n\t\tCreationHeight: big.NewInt(12306),\n\t}\n\treturn v\n}", "func NewSibling(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Sibling {\n\tmock := &Sibling{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newMockSubscriber() mockSubscriber {\n\treturn mockSubscriber{}\n}", "func NewMatchFunc(t mockConstructorTestingTNewMatchFunc) *MatchFunc {\n\tmock := &MatchFunc{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (o *FakeObject) New(args ...interface{}) Object { return o.Invoke(args) }", "func newRemoveFunc(t mockConstructorTestingTnewRemoveFunc) *removeFunc {\n\tmock := &removeFunc{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func TestHandleNewValidator(t *testing.T) {\n\tapp := simapp.Setup(false)\n\tctx := app.BaseApp.NewContext(false, tmproto.Header{})\n\n\taddrDels := simapp.AddTestAddrsIncremental(app, ctx, 1, sdk.TokensFromConsensusPower(200, sdk.DefaultPowerReduction))\n\tvalAddrs := simapp.ConvertAddrsToValAddrs(addrDels)\n\tpks := simapp.CreateTestPubKeys(1)\n\taddr, val := valAddrs[0], pks[0]\n\ttstaking := teststaking.NewHelper(t, ctx, app.CustomStakingKeeper, app.CustomGovKeeper)\n\tctx = ctx.WithBlockHeight(1)\n\n\t// Validator created\n\ttstaking.CreateValidator(addr, val, true)\n\n\tstaking.EndBlocker(ctx, app.CustomStakingKeeper)\n\n\t// Now a validator, for two blocks\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\tctx = ctx.WithBlockHeight(2)\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\n\tinfo, found := app.CustomSlashingKeeper.GetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address()))\n\trequire.True(t, found)\n\trequire.Equal(t, int64(1), info.StartHeight)\n\trequire.Equal(t, int64(1), info.MischanceConfidence)\n\trequire.Equal(t, int64(0), info.Mischance)\n\trequire.Equal(t, int64(1), info.MissedBlocksCounter)\n\trequire.Equal(t, int64(1), info.ProducedBlocksCounter)\n\trequire.Equal(t, time.Unix(0, 0).UTC(), info.InactiveUntil)\n\n\t// validator should be active still, should not have been inactivated\n\tvalidator, _ := app.CustomStakingKeeper.GetValidatorByConsAddr(ctx, sdk.GetConsAddress(val))\n\trequire.Equal(t, stakingtypes.Active, validator.GetStatus())\n}", "func newMockedEvaluator(cnvtr converter, tknzr tokenizer) *Evaluator {\n\tresult := &Evaluator{}\n\tresult.cnvtr = cnvtr\n\tresult.tknzr = tknzr\n\tresult.functions = make(map[string]functions.Function)\n\tresult.operators = make(map[string]operators.Operator)\n\tresult.stack = make([]interface{}, 0)\n\treturn result\n}", "func TestMockValidity(t *testing.T) {\n\tnr := 50\n\t_, hlp := agreement.WireAgreement(nr)\n\thash, _ := crypto.RandEntropy(32)\n\thandler := agreement.NewHandler(hlp.Keys[0], *hlp.P)\n\n\tfor i := 0; i < nr; i++ {\n\t\ta := message.MockAgreement(hash, 1, 3, hlp.Keys, hlp.P, i)\n\t\tif !assert.NoError(t, handler.Verify(a)) {\n\t\t\tt.FailNow()\n\t\t}\n\t}\n}", "func (m *MockLogic) Validator() core.ValidatorLogic {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Validator\")\n\tret0, _ := ret[0].(core.ValidatorLogic)\n\treturn ret0\n}", "func (m *MockAtomicLogic) Validator() core.ValidatorLogic {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Validator\")\n\tret0, _ := ret[0].(core.ValidatorLogic)\n\treturn ret0\n}", "func NewMock() Client {\n\treturn &mockClient{}\n}", "func NewMockFilter(ok bool) MockFilter {\r\n\treturn MockFilter{\r\n\t\tok: ok,\r\n\t}\r\n}", "func NewMockStaker(ctrl *gomock.Controller) *MockStaker {\n\tmock := &MockStaker{ctrl: ctrl}\n\tmock.recorder = &MockStakerMockRecorder{mock}\n\treturn mock\n}", "func NewMockChecker(ctrl *gomock.Controller) *MockChecker {\n\tmock := &MockChecker{ctrl: ctrl}\n\tmock.recorder = &MockCheckerMockRecorder{mock}\n\treturn mock\n}", "func NewMockAuth(t mockConstructorTestingTNewMockAuth) *MockAuth {\n\tmock := &MockAuth{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMockWithHooksFrom(i WithHooks) *MockWithHooks {\n\treturn &MockWithHooks{\n\t\tPostHandleFunc: &WithHooksPostHandleFunc{\n\t\t\tdefaultHook: i.PostHandle,\n\t\t},\n\t\tPreHandleFunc: &WithHooksPreHandleFunc{\n\t\t\tdefaultHook: i.PreHandle,\n\t\t},\n\t}\n}", "func NewMockRecurringTaskRequirements(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockRecurringTaskRequirements {\n\tmock := &MockRecurringTaskRequirements{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewConsenterVerifier(t mockConstructorTestingTNewConsenterVerifier) *ConsenterVerifier {\n\tmock := &ConsenterVerifier{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New() *Validator {\n\treturn &Validator{}\n}", "func NewhttpClientMock(valid bool) *HTTPClientMock {\n\treturn &HTTPClientMock{\n\t\tapiKeyPublic: \"apiKeyPublic\",\n\t\tapiKeyPrivate: \"apiKeyPrivate\",\n\t\tclient: http.DefaultClient,\n\t\tvalidCreds: valid,\n\t\tfx: fixtures.New(),\n\t\tCallFunc: func() (int, int, error) {\n\t\t\tif valid == true {\n\t\t\t\treturn 1, 1, nil\n\t\t\t}\n\t\t\treturn 0, 0, errors.New(\"Unexpected error: Unexpected server response code: 401: EOF\")\n\t\t},\n\t\tSendMailV31Func: func(req *http.Request) (*http.Response, error) {\n\t\t\treturn nil, errors.New(\"mock send mail function not implemented yet\")\n\t\t},\n\t}\n}", "func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New() *Validator {\n\treturn &Validator{loadedMap:make(map[string]bool)}\n}", "func NewGatewayMock(t minimock.Tester) *GatewayMock {\n\tm := &GatewayMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.AutherMock = mGatewayMockAuther{mock: m}\n\n\tm.BeforeRunMock = mGatewayMockBeforeRun{mock: m}\n\tm.BeforeRunMock.callArgs = []*GatewayMockBeforeRunParams{}\n\n\tm.BootstrapperMock = mGatewayMockBootstrapper{mock: m}\n\n\tm.EphemeralModeMock = mGatewayMockEphemeralMode{mock: m}\n\tm.EphemeralModeMock.callArgs = []*GatewayMockEphemeralModeParams{}\n\n\tm.GetStateMock = mGatewayMockGetState{mock: m}\n\n\tm.NewGatewayMock = mGatewayMockNewGateway{mock: m}\n\tm.NewGatewayMock.callArgs = []*GatewayMockNewGatewayParams{}\n\n\tm.OnConsensusFinishedMock = mGatewayMockOnConsensusFinished{mock: m}\n\tm.OnConsensusFinishedMock.callArgs = []*GatewayMockOnConsensusFinishedParams{}\n\n\tm.OnPulseFromConsensusMock = mGatewayMockOnPulseFromConsensus{mock: m}\n\tm.OnPulseFromConsensusMock.callArgs = []*GatewayMockOnPulseFromConsensusParams{}\n\n\tm.OnPulseFromPulsarMock = mGatewayMockOnPulseFromPulsar{mock: m}\n\tm.OnPulseFromPulsarMock.callArgs = []*GatewayMockOnPulseFromPulsarParams{}\n\n\tm.RunMock = mGatewayMockRun{mock: m}\n\tm.RunMock.callArgs = []*GatewayMockRunParams{}\n\n\tm.UpdateStateMock = mGatewayMockUpdateState{mock: m}\n\tm.UpdateStateMock.callArgs = []*GatewayMockUpdateStateParams{}\n\n\treturn m\n}", "func NewMock(serverHost string) (*MockClient, error) {\n\treturn &MockClient{}, nil\n}", "func CreateMock(method interface{}, url interface{}, headers interface{}, body interface{}) *go_mock_yourself_http.Mock {\n\tmockRequest := new(go_mock_yourself_http.Request)\n\n\tif method != nil {\n\t\tmockRequest.SetMethod(method)\n\t}\n\n\tif url != nil {\n\t\tmockRequest.SetUrl(url)\n\t}\n\n\tif body != nil {\n\t\tmockRequest.SetBody(body)\n\t}\n\n\tif headers != nil {\n\t\tmockRequest.SetHeaders(headers)\n\t}\n\n\tmockResponse := new(go_mock_yourself_http.Response)\n\tmockResponse.SetStatusCode(222)\n\tmockResponse.SetBody(\"i'm a cute loving mock, almost as cute as mumi, bichi and rasti\")\n\n\tmock, _ := go_mock_yourself_http.NewMock(\"my lovely testing mock\", mockRequest, mockResponse)\n\treturn mock\n}", "func newRunner(output string, err error) *MockRunner {\n\tm := &MockRunner{}\n\tm.On(\"Run\", mock.Anything).Return([]byte(output), err)\n\treturn m\n}", "func New(opts ...Option) staking.Contract {\n\tbs := &stakingContractMock{}\n\n\tfor _, o := range opts {\n\t\to(bs)\n\t}\n\n\treturn bs\n}", "func NewMockValidatorKeeper(ctrl *gomock.Controller) *MockValidatorKeeper {\n\tmock := &MockValidatorKeeper{ctrl: ctrl}\n\tmock.recorder = &MockValidatorKeeperMockRecorder{mock}\n\treturn mock\n}", "func New() *Validator {\n\treturn &Validator{Errors: make(map[string]string)}\n}", "func (m *MockCreator) New() (go_reporter.Reporter, error) {\n\tret := m.ctrl.Call(m, \"New\")\n\tret0, _ := ret[0].(go_reporter.Reporter)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s AlwaysPanicStakingMock) Validator(sdk.Context, sdk.ValAddress) stakingtypes.ValidatorI {\n\tpanic(\"unexpected call\")\n}", "func NewMockrequester(ctrl *gomock.Controller) *Mockrequester {\n\tmock := &Mockrequester{ctrl: ctrl}\n\tmock.recorder = &MockrequesterMockRecorder{mock}\n\treturn mock\n}", "func NewMockSupport() *MockSupport {\n\treturn &MockSupport{\n\t\tPublisher: NewBlockPublisher(),\n\t}\n}", "func NewMockmakeRequester(ctrl *gomock.Controller) *MockmakeRequester {\n\tmock := &MockmakeRequester{ctrl: ctrl}\n\tmock.recorder = &MockmakeRequesterMockRecorder{mock}\n\treturn mock\n}", "func NewRequester4(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Requester4 {\n\tmock := &Requester4{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMockWatcher(ctrl *gomock.Controller) *MockWatcher {\n\tmock := &MockWatcher{ctrl: ctrl}\n\tmock.recorder = &MockWatcherMockRecorder{mock}\n\treturn mock\n}", "func NewMockWatcher(ctrl *gomock.Controller) *MockWatcher {\n\tmock := &MockWatcher{ctrl: ctrl}\n\tmock.recorder = &MockWatcherMockRecorder{mock}\n\treturn mock\n}", "func NewMockWatcher(ctrl *gomock.Controller) *MockWatcher {\n\tmock := &MockWatcher{ctrl: ctrl}\n\tmock.recorder = &MockWatcherMockRecorder{mock}\n\treturn mock\n}", "func NewMockStore(t mockConstructorTestingTNewMockStore) *MockStore {\n\tmock := &MockStore{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newMock(deps mockDependencies, t testing.TB) (Component, error) {\n\tbackupConfig := config.NewConfig(\"\", \"\", strings.NewReplacer())\n\tbackupConfig.CopyConfig(config.Datadog)\n\n\tconfig.Datadog.CopyConfig(config.NewConfig(\"mock\", \"XXXX\", strings.NewReplacer()))\n\n\tconfig.SetFeatures(t, deps.Params.Features...)\n\n\t// call InitConfig to set defaults.\n\tconfig.InitConfig(config.Datadog)\n\tc := &cfg{\n\t\tConfig: config.Datadog,\n\t}\n\n\tif !deps.Params.SetupConfig {\n\n\t\tif deps.Params.ConfFilePath != \"\" {\n\t\t\tconfig.Datadog.SetConfigType(\"yaml\")\n\t\t\terr := config.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath))\n\t\t\tif err != nil {\n\t\t\t\t// The YAML was invalid, fail initialization of the mock config.\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\twarnings, _ := setupConfig(deps)\n\t\tc.warnings = warnings\n\t}\n\n\t// Overrides are explicit and will take precedence over any other\n\t// setting\n\tfor k, v := range deps.Params.Overrides {\n\t\tconfig.Datadog.Set(k, v)\n\t}\n\n\t// swap the existing config back at the end of the test.\n\tt.Cleanup(func() { config.Datadog.CopyConfig(backupConfig) })\n\n\treturn c, nil\n}", "func NewClientMock(t minimock.Tester) *ClientMock {\n\tm := &ClientMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.ActivatePrototypeMock = mClientMockActivatePrototype{mock: m}\n\tm.ActivatePrototypeMock.callArgs = []*ClientMockActivatePrototypeParams{}\n\n\tm.DeployCodeMock = mClientMockDeployCode{mock: m}\n\tm.DeployCodeMock.callArgs = []*ClientMockDeployCodeParams{}\n\n\tm.GetAbandonedRequestMock = mClientMockGetAbandonedRequest{mock: m}\n\tm.GetAbandonedRequestMock.callArgs = []*ClientMockGetAbandonedRequestParams{}\n\n\tm.GetCodeMock = mClientMockGetCode{mock: m}\n\tm.GetCodeMock.callArgs = []*ClientMockGetCodeParams{}\n\n\tm.GetObjectMock = mClientMockGetObject{mock: m}\n\tm.GetObjectMock.callArgs = []*ClientMockGetObjectParams{}\n\n\tm.GetPendingsMock = mClientMockGetPendings{mock: m}\n\tm.GetPendingsMock.callArgs = []*ClientMockGetPendingsParams{}\n\n\tm.HasPendingsMock = mClientMockHasPendings{mock: m}\n\tm.HasPendingsMock.callArgs = []*ClientMockHasPendingsParams{}\n\n\tm.InjectCodeDescriptorMock = mClientMockInjectCodeDescriptor{mock: m}\n\tm.InjectCodeDescriptorMock.callArgs = []*ClientMockInjectCodeDescriptorParams{}\n\n\tm.InjectFinishMock = mClientMockInjectFinish{mock: m}\n\n\tm.InjectObjectDescriptorMock = mClientMockInjectObjectDescriptor{mock: m}\n\tm.InjectObjectDescriptorMock.callArgs = []*ClientMockInjectObjectDescriptorParams{}\n\n\tm.RegisterIncomingRequestMock = mClientMockRegisterIncomingRequest{mock: m}\n\tm.RegisterIncomingRequestMock.callArgs = []*ClientMockRegisterIncomingRequestParams{}\n\n\tm.RegisterOutgoingRequestMock = mClientMockRegisterOutgoingRequest{mock: m}\n\tm.RegisterOutgoingRequestMock.callArgs = []*ClientMockRegisterOutgoingRequestParams{}\n\n\tm.RegisterResultMock = mClientMockRegisterResult{mock: m}\n\tm.RegisterResultMock.callArgs = []*ClientMockRegisterResultParams{}\n\n\tm.StateMock = mClientMockState{mock: m}\n\n\treturn m\n}", "func NewMockLocker(ctrl *gomock.Controller) *MockLocker {\n\tmock := &MockLocker{ctrl: ctrl}\n\tmock.recorder = &MockLockerMockRecorder{mock}\n\treturn mock\n}", "func NewMockLocker(ctrl *gomock.Controller) *MockLocker {\n\tmock := &MockLocker{ctrl: ctrl}\n\tmock.recorder = &MockLockerMockRecorder{mock}\n\treturn mock\n}", "func New(h webhook.Hook) *Validator {\n\treturn &Validator{h: h}\n}", "func NewMockValidatorLogic(ctrl *gomock.Controller) *MockValidatorLogic {\n\tmock := &MockValidatorLogic{ctrl: ctrl}\n\tmock.recorder = &MockValidatorLogicMockRecorder{mock}\n\treturn mock\n}", "func NewMockUi() *MockUi {\n\tm := new(MockUi)\n\tm.once.Do(m.init)\n\treturn m\n}", "func NewForge(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Forge {\n\tmock := &Forge{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMockGenerator(name string, memo string, results []*Result, err error) Generator {\n\treturn &MockGenerator{name: name, memo: memo, results: results, err: err}\n}", "func (m *MockNotifierProvider) New(arg0 upgraded.State, arg1 *cert.Info, arg2 config.Schema) upgraded.Notifier {\n\tret := m.ctrl.Call(m, \"New\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(upgraded.Notifier)\n\treturn ret0\n}", "func NewMockSyncValidator(ctrl *gomock.Controller) *MockSyncValidator {\n\tmock := &MockSyncValidator{ctrl: ctrl}\n\tmock.recorder = &MockSyncValidatorMockRecorder{mock}\n\treturn mock\n}", "func NewManager(t mockConstructorTestingTNewManager) *Manager {\n\tmock := &Manager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMock(response string) *Operator {\n\treturn &Operator{cli: client.NewMock(response)}\n}", "func NewBlank(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Blank {\n\tmock := &Blank{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client {\n\treturn &http.Client{\n\t\tTransport: transportFunc(doer),\n\t}\n}", "func newMockTenantLimits(limits map[string]*Limits) *mockTenantLimits {\n\treturn &mockTenantLimits{\n\t\tlimits: limits,\n\t}\n}", "func NewMockFormatter(ctrl *gomock.Controller) *MockFormatter {\n\tmock := &MockFormatter{ctrl: ctrl}\n\tmock.recorder = &MockFormatterMockRecorder{mock}\n\treturn mock\n}", "func NewLogPollerWrapper(t mockConstructorTestingTNewLogPollerWrapper) *LogPollerWrapper {\n\tmock := &LogPollerWrapper{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewSynchronizable(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Synchronizable {\n\tmock := &Synchronizable{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMyReader(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MyReader {\n\tmock := &MyReader{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMockPipelineFrom(i iface.Pipeline) *MockPipeline {\n\treturn &MockPipeline{\n\t\tAddFunc: &PipelineAddFunc{\n\t\t\tdefaultHook: i.Add,\n\t\t},\n\t\tRunFunc: &PipelineRunFunc{\n\t\t\tdefaultHook: i.Run,\n\t\t},\n\t}\n}", "func NewShifterMock(t minimock.Tester) *ShifterMock {\n\tm := &ShifterMock{t: t}\n\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.ShiftMock = mShifterMockShift{mock: m}\n\n\treturn m\n}", "func (m *MockIVerification) CreateVerification(v *Verification) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateVerification\", v)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func InitMockCalculator(d int, err error) {\n\tvar mock mockCalculator\n\n\tif err == nil {\n\t\tmock.distance = d\n\t} else {\n\t\tmock.err = &err\n\t}\n\n\tcalc = &mock\n}" ]
[ "0.6273712", "0.5970394", "0.5969597", "0.59486234", "0.59387463", "0.59225535", "0.59168476", "0.58693707", "0.5769528", "0.57305455", "0.57235", "0.5701721", "0.5605328", "0.56008047", "0.5596261", "0.5586228", "0.55857927", "0.54954785", "0.5493031", "0.54787886", "0.54775804", "0.54721403", "0.5464557", "0.5447273", "0.54268473", "0.5422708", "0.54136306", "0.5402575", "0.53500134", "0.5289533", "0.5288124", "0.5266565", "0.52633256", "0.52607054", "0.52512914", "0.52406734", "0.5231544", "0.52223575", "0.5222158", "0.5211515", "0.52104086", "0.5209677", "0.5205635", "0.518763", "0.5184335", "0.51771265", "0.5176741", "0.5175034", "0.5174554", "0.51728356", "0.51723176", "0.5152423", "0.51479524", "0.514186", "0.5138621", "0.5138621", "0.5138621", "0.5138621", "0.5132639", "0.5128261", "0.5125022", "0.5106695", "0.5103071", "0.5097648", "0.50908494", "0.5087887", "0.5086806", "0.5086615", "0.5063432", "0.50628674", "0.50589764", "0.50587445", "0.5058097", "0.5058097", "0.5058097", "0.5055212", "0.5050463", "0.5048661", "0.50439304", "0.50439304", "0.5039149", "0.50386125", "0.50320303", "0.5027127", "0.5025633", "0.50237733", "0.5019176", "0.50101006", "0.50065196", "0.5003962", "0.5002064", "0.5001242", "0.49992874", "0.49985245", "0.49955744", "0.49932826", "0.49851117", "0.49741554", "0.49731538", "0.49726647" ]
0.6302369
0
EXPECT returns an object that allows the caller to indicate expected use
func (m *MockValidater) EXPECT() *MockValidaterMockRecorder { return m.recorder }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mmGetObject *mClientMockGetObject) Expect(ctx context.Context, head insolar.Reference) *mClientMockGetObject {\n\tif mmGetObject.mock.funcGetObject != nil {\n\t\tmmGetObject.mock.t.Fatalf(\"ClientMock.GetObject mock is already set by Set\")\n\t}\n\n\tif mmGetObject.defaultExpectation == nil {\n\t\tmmGetObject.defaultExpectation = &ClientMockGetObjectExpectation{}\n\t}\n\n\tmmGetObject.defaultExpectation.params = &ClientMockGetObjectParams{ctx, head}\n\tfor _, e := range mmGetObject.expectations {\n\t\tif minimock.Equal(e.params, mmGetObject.defaultExpectation.params) {\n\t\t\tmmGetObject.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetObject.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetObject\n}", "func (r Requester) Assert(actual, expected interface{}) Requester {\n\t//r.actualResponse = actual\n\t//r.expectedResponse = expected\n\treturn r\n}", "func (r *Request) Expect(t *testing.T) *Response {\n\tr.apiTest.t = t\n\treturn r.apiTest.response\n}", "func (m *MockNotary) Notarize(arg0 string) (map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Notarize\", arg0)\n\tret0, _ := ret[0].(map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (tc TestCases) expect() {\n\tfmt.Println(cnt)\n\tcnt++\n\tif !reflect.DeepEqual(tc.resp, tc.respExp) {\n\t\ttc.t.Error(fmt.Sprintf(\"\\nRequested: \", tc.req, \"\\nExpected: \", tc.respExp, \"\\nFound: \", tc.resp))\n\t}\n}", "func (r *Request) Expect(t TestingT) *Response {\n\tr.apiTest.t = t\n\treturn r.apiTest.response\n}", "func Expect(t cbtest.T, actual interface{}, matcher matcher.Matcher, labelAndArgs ...interface{}) {\n\tt.Helper()\n\tres := ExpectE(t, actual, matcher, labelAndArgs...)\n\tif !res {\n\t\tt.FailNow()\n\t}\n}", "func (m *MockisObject_Obj) EXPECT() *MockisObject_ObjMockRecorder {\n\treturn m.recorder\n}", "func Expect(t *testing.T, v, m interface{}) {\n\tvt, vok := v.(Equaler)\n\tmt, mok := m.(Equaler)\n\n\tvar state bool\n\tif vok && mok {\n\t\tstate = vt.Equal(mt)\n\t} else {\n\t\tstate = reflect.DeepEqual(v, m)\n\t}\n\n\tif state {\n\t\tflux.FatalFailed(t, \"Value %+v and %+v are not a match\", v, m)\n\t\treturn\n\t}\n\tflux.LogPassed(t, \"Value %+v and %+v are a match\", v, m)\n}", "func (mmState *mClientMockState) Expect() *mClientMockState {\n\tif mmState.mock.funcState != nil {\n\t\tmmState.mock.t.Fatalf(\"ClientMock.State mock is already set by Set\")\n\t}\n\n\tif mmState.defaultExpectation == nil {\n\t\tmmState.defaultExpectation = &ClientMockStateExpectation{}\n\t}\n\n\treturn mmState\n}", "func (mmProvide *mContainerMockProvide) Expect(constructor interface{}) *mContainerMockProvide {\n\tif mmProvide.mock.funcProvide != nil {\n\t\tmmProvide.mock.t.Fatalf(\"ContainerMock.Provide mock is already set by Set\")\n\t}\n\n\tif mmProvide.defaultExpectation == nil {\n\t\tmmProvide.defaultExpectation = &ContainerMockProvideExpectation{}\n\t}\n\n\tmmProvide.defaultExpectation.params = &ContainerMockProvideParams{constructor}\n\tfor _, e := range mmProvide.expectations {\n\t\tif minimock.Equal(e.params, mmProvide.defaultExpectation.params) {\n\t\t\tmmProvide.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmProvide.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmProvide\n}", "func Mock() Env {\n\treturn mock.New()\n}", "func (mmGetCode *mClientMockGetCode) Expect(ctx context.Context, ref insolar.Reference) *mClientMockGetCode {\n\tif mmGetCode.mock.funcGetCode != nil {\n\t\tmmGetCode.mock.t.Fatalf(\"ClientMock.GetCode mock is already set by Set\")\n\t}\n\n\tif mmGetCode.defaultExpectation == nil {\n\t\tmmGetCode.defaultExpectation = &ClientMockGetCodeExpectation{}\n\t}\n\n\tmmGetCode.defaultExpectation.params = &ClientMockGetCodeParams{ctx, ref}\n\tfor _, e := range mmGetCode.expectations {\n\t\tif minimock.Equal(e.params, mmGetCode.defaultExpectation.params) {\n\t\t\tmmGetCode.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetCode.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetCode\n}", "func expect(t *testing.T, method, url string, testieOptions ...func(*http.Request)) *testie {\n\treq, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, opt := range testieOptions {\n\t\topt(req)\n\t}\n\n\treturn testReq(t, req)\n}", "func (_m *MockOStream) EXPECT() *MockOStreamMockRecorder {\n\treturn _m.recorder\n}", "func (mmGetUser *mStorageMockGetUser) Expect(ctx context.Context, userID int64) *mStorageMockGetUser {\n\tif mmGetUser.mock.funcGetUser != nil {\n\t\tmmGetUser.mock.t.Fatalf(\"StorageMock.GetUser mock is already set by Set\")\n\t}\n\n\tif mmGetUser.defaultExpectation == nil {\n\t\tmmGetUser.defaultExpectation = &StorageMockGetUserExpectation{}\n\t}\n\n\tmmGetUser.defaultExpectation.params = &StorageMockGetUserParams{ctx, userID}\n\tfor _, e := range mmGetUser.expectations {\n\t\tif minimock.Equal(e.params, mmGetUser.defaultExpectation.params) {\n\t\t\tmmGetUser.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetUser.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetUser\n}", "func (mmGetObject *mClientMockGetObject) Return(o1 ObjectDescriptor, err error) *ClientMock {\n\tif mmGetObject.mock.funcGetObject != nil {\n\t\tmmGetObject.mock.t.Fatalf(\"ClientMock.GetObject mock is already set by Set\")\n\t}\n\n\tif mmGetObject.defaultExpectation == nil {\n\t\tmmGetObject.defaultExpectation = &ClientMockGetObjectExpectation{mock: mmGetObject.mock}\n\t}\n\tmmGetObject.defaultExpectation.results = &ClientMockGetObjectResults{o1, err}\n\treturn mmGetObject.mock\n}", "func (mmGather *mGathererMockGather) Expect() *mGathererMockGather {\n\tif mmGather.mock.funcGather != nil {\n\t\tmmGather.mock.t.Fatalf(\"GathererMock.Gather mock is already set by Set\")\n\t}\n\n\tif mmGather.defaultExpectation == nil {\n\t\tmmGather.defaultExpectation = &GathererMockGatherExpectation{}\n\t}\n\n\treturn mmGather\n}", "func (m *MockParser) EXPECT() *MockParserMockRecorder {\n\treturn m.recorder\n}", "func (m *MockParser) EXPECT() *MockParserMockRecorder {\n\treturn m.recorder\n}", "func (mmWriteTo *mDigestHolderMockWriteTo) Expect(w io.Writer) *mDigestHolderMockWriteTo {\n\tif mmWriteTo.mock.funcWriteTo != nil {\n\t\tmmWriteTo.mock.t.Fatalf(\"DigestHolderMock.WriteTo mock is already set by Set\")\n\t}\n\n\tif mmWriteTo.defaultExpectation == nil {\n\t\tmmWriteTo.defaultExpectation = &DigestHolderMockWriteToExpectation{}\n\t}\n\n\tmmWriteTo.defaultExpectation.params = &DigestHolderMockWriteToParams{w}\n\tfor _, e := range mmWriteTo.expectations {\n\t\tif minimock.Equal(e.params, mmWriteTo.defaultExpectation.params) {\n\t\t\tmmWriteTo.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmWriteTo.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmWriteTo\n}", "func (rb *RequestBuilder) EXPECT() *ResponseAsserter {\n\treq := httptest.NewRequest(rb.method, rb.path, rb.body)\n\tfor k, v := range rb.hdr {\n\t\treq.Header[k] = v\n\t}\n\n\trec := httptest.NewRecorder()\n\trb.cas.h.ServeHTTP(rec, req)\n\n\treturn &ResponseAsserter{\n\t\trec: rec,\n\t\treq: req,\n\t\tb: rb,\n\t\tfail: rb.fail.\n\t\t\tCopy().\n\t\t\tWithRequest(req).\n\t\t\tWithResponse(rec),\n\t}\n}", "func (mmGetState *mGatewayMockGetState) Expect() *mGatewayMockGetState {\n\tif mmGetState.mock.funcGetState != nil {\n\t\tmmGetState.mock.t.Fatalf(\"GatewayMock.GetState mock is already set by Set\")\n\t}\n\n\tif mmGetState.defaultExpectation == nil {\n\t\tmmGetState.defaultExpectation = &GatewayMockGetStateExpectation{}\n\t}\n\n\treturn mmGetState\n}", "func (m *mParcelMockGetSign) Expect() *mParcelMockGetSign {\n\tm.mock.GetSignFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetSignExpectation{}\n\t}\n\n\treturn m\n}", "func (mmCreateTag *mTagCreatorMockCreateTag) Expect(t1 semantic.Tag) *mTagCreatorMockCreateTag {\n\tif mmCreateTag.mock.funcCreateTag != nil {\n\t\tmmCreateTag.mock.t.Fatalf(\"TagCreatorMock.CreateTag mock is already set by Set\")\n\t}\n\n\tif mmCreateTag.defaultExpectation == nil {\n\t\tmmCreateTag.defaultExpectation = &TagCreatorMockCreateTagExpectation{}\n\t}\n\n\tmmCreateTag.defaultExpectation.params = &TagCreatorMockCreateTagParams{t1}\n\tfor _, e := range mmCreateTag.expectations {\n\t\tif minimock.Equal(e.params, mmCreateTag.defaultExpectation.params) {\n\t\t\tmmCreateTag.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmCreateTag.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmCreateTag\n}", "func (m *MockActorUsecase) EXPECT() *MockActorUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockGetCaller) Expect() *mParcelMockGetCaller {\n\tm.mock.GetCallerFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetCallerExpectation{}\n\t}\n\n\treturn m\n}", "func mockAlwaysRun() bool { return true }", "func (m *MockArg) EXPECT() *MockArgMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (st *SDKTester) Test(resp interface{}) {\n\tif resp == nil || st.respWant == nil {\n\t\tst.t.Logf(\"response want/got is nil, abort\\n\")\n\t\treturn\n\t}\n\n\trespMap := st.getFieldMap(resp)\n\tfor i, v := range st.respWant {\n\t\tif reflect.DeepEqual(v, respMap[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tswitch x := respMap[i].(type) {\n\t\tcase Stringer:\n\t\t\tif !assert.Equal(st.t, v, x.String()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tif value, ok := x[\"Value\"]; ok {\n\t\t\t\tif !assert.Equal(st.t, v, value) {\n\t\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t\t}\n\t\t\t}\n\t\tcase Inter:\n\t\t\tif !assert.Equal(st.t, v, x.Int()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tdefault:\n\t\t\tif !assert.Equal(st.t, v, respMap[i]) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockCreator) EXPECT() *MockCreatorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockCreator) EXPECT() *MockCreatorMockRecorder {\n\treturn m.recorder\n}", "func TestCallFunc_arguments(t *testing.T) {\n\n}", "func (m *mParcelMockGetSender) Expect() *mParcelMockGetSender {\n\tm.mock.GetSenderFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetSenderExpectation{}\n\t}\n\n\treturn m\n}", "func TestGetNone4A(t *testing.T) {\n}", "func expectEqual(value, expected interface{}) {\n\tif value != expected {\n\t\tfmt.Printf(\"Fehler: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t} else {\n\t\tfmt.Printf(\"OK: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t}\n}", "func (mmHasPendings *mClientMockHasPendings) Expect(ctx context.Context, object insolar.Reference) *mClientMockHasPendings {\n\tif mmHasPendings.mock.funcHasPendings != nil {\n\t\tmmHasPendings.mock.t.Fatalf(\"ClientMock.HasPendings mock is already set by Set\")\n\t}\n\n\tif mmHasPendings.defaultExpectation == nil {\n\t\tmmHasPendings.defaultExpectation = &ClientMockHasPendingsExpectation{}\n\t}\n\n\tmmHasPendings.defaultExpectation.params = &ClientMockHasPendingsParams{ctx, object}\n\tfor _, e := range mmHasPendings.expectations {\n\t\tif minimock.Equal(e.params, mmHasPendings.defaultExpectation.params) {\n\t\t\tmmHasPendings.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmHasPendings.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmHasPendings\n}", "func (mmGetPacketSignature *mPacketParserMockGetPacketSignature) Expect() *mPacketParserMockGetPacketSignature {\n\tif mmGetPacketSignature.mock.funcGetPacketSignature != nil {\n\t\tmmGetPacketSignature.mock.t.Fatalf(\"PacketParserMock.GetPacketSignature mock is already set by Set\")\n\t}\n\n\tif mmGetPacketSignature.defaultExpectation == nil {\n\t\tmmGetPacketSignature.defaultExpectation = &PacketParserMockGetPacketSignatureExpectation{}\n\t}\n\n\treturn mmGetPacketSignature\n}", "func Run(t testing.TB, cloud cloud.Client, src string, opts ...RunOption) {\n\n\tif cloud == nil {\n\t\tcloud = mockcloud.Client(nil)\n\t}\n\n\tvm := otto.New()\n\n\tpkg, err := godotto.Apply(context.Background(), vm, cloud)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvm.Set(\"cloud\", pkg)\n\tvm.Set(\"equals\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tgot, err := call.Argument(0).Export()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\twant, err := call.Argument(1).Export()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\tok, cause := deepEqual(got, want)\n\t\tif ok {\n\t\t\treturn otto.UndefinedValue()\n\t\t}\n\t\tmsg := \"assertion failed!\\n\" + cause\n\n\t\tif len(call.ArgumentList) > 2 {\n\t\t\tformat, err := call.ArgumentList[2].ToString()\n\t\t\tif err != nil {\n\t\t\t\tottoutil.Throw(vm, err.Error())\n\t\t\t}\n\t\t\tmsg += \"\\n\" + format\n\t\t}\n\t\tottoutil.Throw(vm, msg)\n\t\treturn otto.UndefinedValue()\n\t})\n\tvm.Set(\"assert\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tv, err := call.Argument(0).ToBoolean()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\tif v {\n\t\t\treturn otto.UndefinedValue()\n\t\t}\n\t\tmsg := \"assertion failed!\"\n\t\tif len(call.ArgumentList) > 1 {\n\t\t\tformat, err := call.ArgumentList[1].ToString()\n\t\t\tif err != nil {\n\t\t\t\tottoutil.Throw(vm, err.Error())\n\t\t\t}\n\t\t\tmsg += \"\\n\" + format\n\t\t}\n\t\tottoutil.Throw(vm, msg)\n\t\treturn otto.UndefinedValue()\n\t})\n\tscript, err := vm.Compile(\"\", src)\n\tif err != nil {\n\t\tt.Fatalf(\"invalid code: %v\", err)\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(vm); err != nil {\n\t\t\tt.Fatalf(\"can't apply option: %v\", err)\n\t\t}\n\t}\n\n\tif _, err := vm.Run(script); err != nil {\n\t\tif oe, ok := err.(*otto.Error); ok {\n\t\t\tt.Fatal(oe.String())\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestSetGoodArgs(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetGoodArgs\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\n\t// Testing the init. It always return true. No parameters in init. \n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\"),\n\t[]byte(\"agentInfo.atype\"),[]byte(\"1.2.3.4\"),\n\t[]byte(\"agentInfo.id\"),[]byte(\"agentidentifier\"),\n\t[]byte(\"agentinfo.name\"),[]byte(\"7.8.9\"),\n\t[]byte(\"agentinfo.idp\"),[]byte(\"urn:tiani-spirit:sts\"),\n\t[]byte(\"locationInfo.id\"),[]byte(\"urn:oid:1.2.3\"),\n\t[]byte(\"locationInfo.name\"),[]byte(\"General Hospital\"),\n\t[]byte(\"locationInfo.locality\"),[]byte(\"Nashville, TN\"),\n\t[]byte(\"locationInfo.docid\"),[]byte(\"1.2.3\"),\n\t[]byte(\"action\"),[]byte(\"ex:CREATE\"),\n\t[]byte(\"date\"),[]byte(\"2018-11-10T12:15:55.028Z\")})\n\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n}", "func (mmRegisterResult *mClientMockRegisterResult) Expect(ctx context.Context, request insolar.Reference, result RequestResult) *mClientMockRegisterResult {\n\tif mmRegisterResult.mock.funcRegisterResult != nil {\n\t\tmmRegisterResult.mock.t.Fatalf(\"ClientMock.RegisterResult mock is already set by Set\")\n\t}\n\n\tif mmRegisterResult.defaultExpectation == nil {\n\t\tmmRegisterResult.defaultExpectation = &ClientMockRegisterResultExpectation{}\n\t}\n\n\tmmRegisterResult.defaultExpectation.params = &ClientMockRegisterResultParams{ctx, request, result}\n\tfor _, e := range mmRegisterResult.expectations {\n\t\tif minimock.Equal(e.params, mmRegisterResult.defaultExpectation.params) {\n\t\t\tmmRegisterResult.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmRegisterResult.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmRegisterResult\n}", "func Mock() Cluster { return mockCluster{} }", "func (m *MockS3API) EXPECT() *MockS3APIMockRecorder {\n\treturn m.recorder\n}", "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func (mmGetPendings *mClientMockGetPendings) Expect(ctx context.Context, objectRef insolar.Reference) *mClientMockGetPendings {\n\tif mmGetPendings.mock.funcGetPendings != nil {\n\t\tmmGetPendings.mock.t.Fatalf(\"ClientMock.GetPendings mock is already set by Set\")\n\t}\n\n\tif mmGetPendings.defaultExpectation == nil {\n\t\tmmGetPendings.defaultExpectation = &ClientMockGetPendingsExpectation{}\n\t}\n\n\tmmGetPendings.defaultExpectation.params = &ClientMockGetPendingsParams{ctx, objectRef}\n\tfor _, e := range mmGetPendings.expectations {\n\t\tif minimock.Equal(e.params, mmGetPendings.defaultExpectation.params) {\n\t\t\tmmGetPendings.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetPendings.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetPendings\n}", "func (m *MockOrg) EXPECT() *MockOrgMockRecorder {\n\treturn m.recorder\n}", "func (mmGetUserLocation *mStorageMockGetUserLocation) Expect(ctx context.Context, userID int64) *mStorageMockGetUserLocation {\n\tif mmGetUserLocation.mock.funcGetUserLocation != nil {\n\t\tmmGetUserLocation.mock.t.Fatalf(\"StorageMock.GetUserLocation mock is already set by Set\")\n\t}\n\n\tif mmGetUserLocation.defaultExpectation == nil {\n\t\tmmGetUserLocation.defaultExpectation = &StorageMockGetUserLocationExpectation{}\n\t}\n\n\tmmGetUserLocation.defaultExpectation.params = &StorageMockGetUserLocationParams{ctx, userID}\n\tfor _, e := range mmGetUserLocation.expectations {\n\t\tif minimock.Equal(e.params, mmGetUserLocation.defaultExpectation.params) {\n\t\t\tmmGetUserLocation.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetUserLocation.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetUserLocation\n}", "func (mmCreate *mPaymentRepositoryMockCreate) Expect(ctx context.Context, from int64, to int64, amount int64) *mPaymentRepositoryMockCreate {\n\tif mmCreate.mock.funcCreate != nil {\n\t\tmmCreate.mock.t.Fatalf(\"PaymentRepositoryMock.Create mock is already set by Set\")\n\t}\n\n\tif mmCreate.defaultExpectation == nil {\n\t\tmmCreate.defaultExpectation = &PaymentRepositoryMockCreateExpectation{}\n\t}\n\n\tmmCreate.defaultExpectation.params = &PaymentRepositoryMockCreateParams{ctx, from, to, amount}\n\tfor _, e := range mmCreate.expectations {\n\t\tif minimock.Equal(e.params, mmCreate.defaultExpectation.params) {\n\t\t\tmmCreate.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmCreate.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmCreate\n}", "func (mmAuther *mGatewayMockAuther) Expect() *mGatewayMockAuther {\n\tif mmAuther.mock.funcAuther != nil {\n\t\tmmAuther.mock.t.Fatalf(\"GatewayMock.Auther mock is already set by Set\")\n\t}\n\n\tif mmAuther.defaultExpectation == nil {\n\t\tmmAuther.defaultExpectation = &GatewayMockAutherExpectation{}\n\t}\n\n\treturn mmAuther\n}", "func TestObjectsMeetReq(t *testing.T) {\n\tvar kr verifiable.StorageReader\n\tvar kw verifiable.StorageWriter\n\n\tvar m verifiable.MutatorService\n\n\tvar o verifiable.AuthorizationOracle\n\n\tkr = &memory.TransientStorage{}\n\tkw = &memory.TransientStorage{}\n\n\tkr = &bolt.Storage{}\n\tkw = &bolt.Storage{}\n\n\tkr = &badger.Storage{}\n\tkw = &badger.Storage{}\n\n\tm = &instant.Mutator{}\n\tm = (&batch.Mutator{}).MustCreate()\n\n\to = policy.Open\n\to = &policy.Static{}\n\n\tlog.Println(kr, kw, m, o) // \"use\" these so that go compiler will be quiet\n}", "func (mmInvoke *mContainerMockInvoke) Expect(function interface{}) *mContainerMockInvoke {\n\tif mmInvoke.mock.funcInvoke != nil {\n\t\tmmInvoke.mock.t.Fatalf(\"ContainerMock.Invoke mock is already set by Set\")\n\t}\n\n\tif mmInvoke.defaultExpectation == nil {\n\t\tmmInvoke.defaultExpectation = &ContainerMockInvokeExpectation{}\n\t}\n\n\tmmInvoke.defaultExpectation.params = &ContainerMockInvokeParams{function}\n\tfor _, e := range mmInvoke.expectations {\n\t\tif minimock.Equal(e.params, mmInvoke.defaultExpectation.params) {\n\t\t\tmmInvoke.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmInvoke.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmInvoke\n}", "func (mmGetPosition *mStoreMockGetPosition) Expect(account string, contractID string) *mStoreMockGetPosition {\n\tif mmGetPosition.mock.funcGetPosition != nil {\n\t\tmmGetPosition.mock.t.Fatalf(\"StoreMock.GetPosition mock is already set by Set\")\n\t}\n\n\tif mmGetPosition.defaultExpectation == nil {\n\t\tmmGetPosition.defaultExpectation = &StoreMockGetPositionExpectation{}\n\t}\n\n\tmmGetPosition.defaultExpectation.params = &StoreMockGetPositionParams{account, contractID}\n\tfor _, e := range mmGetPosition.expectations {\n\t\tif minimock.Equal(e.params, mmGetPosition.defaultExpectation.params) {\n\t\t\tmmGetPosition.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetPosition.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetPosition\n}", "func (mmGetAbandonedRequest *mClientMockGetAbandonedRequest) Expect(ctx context.Context, objectRef insolar.Reference, reqRef insolar.Reference) *mClientMockGetAbandonedRequest {\n\tif mmGetAbandonedRequest.mock.funcGetAbandonedRequest != nil {\n\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"ClientMock.GetAbandonedRequest mock is already set by Set\")\n\t}\n\n\tif mmGetAbandonedRequest.defaultExpectation == nil {\n\t\tmmGetAbandonedRequest.defaultExpectation = &ClientMockGetAbandonedRequestExpectation{}\n\t}\n\n\tmmGetAbandonedRequest.defaultExpectation.params = &ClientMockGetAbandonedRequestParams{ctx, objectRef, reqRef}\n\tfor _, e := range mmGetAbandonedRequest.expectations {\n\t\tif minimock.Equal(e.params, mmGetAbandonedRequest.defaultExpectation.params) {\n\t\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetAbandonedRequest.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetAbandonedRequest\n}", "func (mmSend *mSenderMockSend) Expect(ctx context.Context, email Email) *mSenderMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"SenderMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &SenderMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &SenderMockSendParams{ctx, email}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func callAndVerify(msg string, client pb.GreeterClient, shouldFail bool) error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\t_, err := client.SayHello(ctx, &pb.HelloRequest{Name: msg})\n\tif want, got := shouldFail == true, err != nil; got != want {\n\t\treturn fmt.Errorf(\"want and got mismatch, want shouldFail=%v, got fail=%v, rpc error: %v\", want, got, err)\n\t}\n\treturn nil\n}", "func (m *Mockrequester) EXPECT() *MockrequesterMockRecorder {\n\treturn m.recorder\n}", "func expectEqual(actual interface{}, extra interface{}, explain ...interface{}) {\n\tgomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)\n}", "func (m *MockstackDescriber) EXPECT() *MockstackDescriberMockRecorder {\n\treturn m.recorder\n}", "func (req *outgoingRequest) Assert(t *testing.T, fixture *fixture) {\n\tassert.Equal(t, req.path, fixture.calledPath, \"called path not as expected\")\n\tassert.Equal(t, req.method, fixture.calledMethod, \"called path not as expected\")\n\tassert.Equal(t, req.body, fixture.requestBody, \"call body no as expected\")\n}", "func (mmVerify *mDelegationTokenFactoryMockVerify) Expect(parcel mm_insolar.Parcel) *mDelegationTokenFactoryMockVerify {\n\tif mmVerify.mock.funcVerify != nil {\n\t\tmmVerify.mock.t.Fatalf(\"DelegationTokenFactoryMock.Verify mock is already set by Set\")\n\t}\n\n\tif mmVerify.defaultExpectation == nil {\n\t\tmmVerify.defaultExpectation = &DelegationTokenFactoryMockVerifyExpectation{}\n\t}\n\n\tmmVerify.defaultExpectation.params = &DelegationTokenFactoryMockVerifyParams{parcel}\n\tfor _, e := range mmVerify.expectations {\n\t\tif minimock.Equal(e.params, mmVerify.defaultExpectation.params) {\n\t\t\tmmVerify.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmVerify.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmVerify\n}", "func (mmRead *mDigestHolderMockRead) Expect(p []byte) *mDigestHolderMockRead {\n\tif mmRead.mock.funcRead != nil {\n\t\tmmRead.mock.t.Fatalf(\"DigestHolderMock.Read mock is already set by Set\")\n\t}\n\n\tif mmRead.defaultExpectation == nil {\n\t\tmmRead.defaultExpectation = &DigestHolderMockReadExpectation{}\n\t}\n\n\tmmRead.defaultExpectation.params = &DigestHolderMockReadParams{p}\n\tfor _, e := range mmRead.expectations {\n\t\tif minimock.Equal(e.params, mmRead.defaultExpectation.params) {\n\t\t\tmmRead.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmRead.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmRead\n}", "func (mmSend *mClientMockSend) Expect(ctx context.Context, n *Notification) *mClientMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"ClientMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &ClientMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &ClientMockSendParams{ctx, n}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func (mmAsByteString *mDigestHolderMockAsByteString) Expect() *mDigestHolderMockAsByteString {\n\tif mmAsByteString.mock.funcAsByteString != nil {\n\t\tmmAsByteString.mock.t.Fatalf(\"DigestHolderMock.AsByteString mock is already set by Set\")\n\t}\n\n\tif mmAsByteString.defaultExpectation == nil {\n\t\tmmAsByteString.defaultExpectation = &DigestHolderMockAsByteStringExpectation{}\n\t}\n\n\treturn mmAsByteString\n}", "func Expect(msg string) error {\n\tif msg != \"\" {\n\t\treturn errors.New(msg)\n\t} else {\n\t\treturn nil\n\t}\n}", "func (mmEncrypt *mRingMockEncrypt) Expect(t1 secrets.Text) *mRingMockEncrypt {\n\tif mmEncrypt.mock.funcEncrypt != nil {\n\t\tmmEncrypt.mock.t.Fatalf(\"RingMock.Encrypt mock is already set by Set\")\n\t}\n\n\tif mmEncrypt.defaultExpectation == nil {\n\t\tmmEncrypt.defaultExpectation = &RingMockEncryptExpectation{}\n\t}\n\n\tmmEncrypt.defaultExpectation.params = &RingMockEncryptParams{t1}\n\tfor _, e := range mmEncrypt.expectations {\n\t\tif minimock.Equal(e.params, mmEncrypt.defaultExpectation.params) {\n\t\t\tmmEncrypt.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmEncrypt.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmEncrypt\n}", "func (mmBootstrapper *mGatewayMockBootstrapper) Expect() *mGatewayMockBootstrapper {\n\tif mmBootstrapper.mock.funcBootstrapper != nil {\n\t\tmmBootstrapper.mock.t.Fatalf(\"GatewayMock.Bootstrapper mock is already set by Set\")\n\t}\n\n\tif mmBootstrapper.defaultExpectation == nil {\n\t\tmmBootstrapper.defaultExpectation = &GatewayMockBootstrapperExpectation{}\n\t}\n\n\treturn mmBootstrapper\n}", "func (m *MockNotary) EXPECT() *MockNotaryMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockSetSender) Expect(p insolar.Reference) *mParcelMockSetSender {\n\tm.mock.SetSenderFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockSetSenderExpectation{}\n\t}\n\tm.mainExpectation.input = &ParcelMockSetSenderInput{p}\n\treturn m\n}", "func (mmGetPacketType *mPacketParserMockGetPacketType) Expect() *mPacketParserMockGetPacketType {\n\tif mmGetPacketType.mock.funcGetPacketType != nil {\n\t\tmmGetPacketType.mock.t.Fatalf(\"PacketParserMock.GetPacketType mock is already set by Set\")\n\t}\n\n\tif mmGetPacketType.defaultExpectation == nil {\n\t\tmmGetPacketType.defaultExpectation = &PacketParserMockGetPacketTypeExpectation{}\n\t}\n\n\treturn mmGetPacketType\n}", "func (mmParsePacketBody *mPacketParserMockParsePacketBody) Expect() *mPacketParserMockParsePacketBody {\n\tif mmParsePacketBody.mock.funcParsePacketBody != nil {\n\t\tmmParsePacketBody.mock.t.Fatalf(\"PacketParserMock.ParsePacketBody mock is already set by Set\")\n\t}\n\n\tif mmParsePacketBody.defaultExpectation == nil {\n\t\tmmParsePacketBody.defaultExpectation = &PacketParserMockParsePacketBodyExpectation{}\n\t}\n\n\treturn mmParsePacketBody\n}", "func (mmAsBytes *mDigestHolderMockAsBytes) Expect() *mDigestHolderMockAsBytes {\n\tif mmAsBytes.mock.funcAsBytes != nil {\n\t\tmmAsBytes.mock.t.Fatalf(\"DigestHolderMock.AsBytes mock is already set by Set\")\n\t}\n\n\tif mmAsBytes.defaultExpectation == nil {\n\t\tmmAsBytes.defaultExpectation = &DigestHolderMockAsBytesExpectation{}\n\t}\n\n\treturn mmAsBytes\n}", "func (m *MockArticleLogic) EXPECT() *MockArticleLogicMockRecorder {\n\treturn m.recorder\n}", "func (mmKey *mIteratorMockKey) Expect() *mIteratorMockKey {\n\tif mmKey.mock.funcKey != nil {\n\t\tmmKey.mock.t.Fatalf(\"IteratorMock.Key mock is already set by Set\")\n\t}\n\n\tif mmKey.defaultExpectation == nil {\n\t\tmmKey.defaultExpectation = &IteratorMockKeyExpectation{}\n\t}\n\n\treturn mmKey\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *mOutboundMockCanAccept) Expect(p Inbound) *mOutboundMockCanAccept {\n\tm.mock.CanAcceptFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockCanAcceptExpectation{}\n\t}\n\tm.mainExpectation.input = &OutboundMockCanAcceptInput{p}\n\treturn m\n}", "func (m *MockLoaderFactory) EXPECT() *MockLoaderFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockPKG) EXPECT() *MockPKGMockRecorder {\n\treturn m.recorder\n}", "func (m *MockbucketDescriber) EXPECT() *MockbucketDescriberMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockType) Expect() *mParcelMockType {\n\tm.mock.TypeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockTypeExpectation{}\n\t}\n\n\treturn m\n}", "func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {\n\treturn m.recorder\n}", "func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {\n\treturn m.recorder\n}", "func (mmExchange *mMDNSClientMockExchange) Expect(msg *mdns.Msg, address string) *mMDNSClientMockExchange {\n\tif mmExchange.mock.funcExchange != nil {\n\t\tmmExchange.mock.t.Fatalf(\"MDNSClientMock.Exchange mock is already set by Set\")\n\t}\n\n\tif mmExchange.defaultExpectation == nil {\n\t\tmmExchange.defaultExpectation = &MDNSClientMockExchangeExpectation{}\n\t}\n\n\tmmExchange.defaultExpectation.params = &MDNSClientMockExchangeParams{msg, address}\n\tfor _, e := range mmExchange.expectations {\n\t\tif minimock.Equal(e.params, mmExchange.defaultExpectation.params) {\n\t\t\tmmExchange.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmExchange.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmExchange\n}", "func (m *MockStream) EXPECT() *MockStreamMockRecorder {\n\treturn m.recorder\n}", "func (c Chkr) Expect(v validator, args ...interface{}) {\n\tif c.runTest(v, args...) {\n\t\tc.Fail()\n\t}\n}", "func (mmClone *mStorageMockClone) Expect(ctx context.Context, from insolar.PulseNumber, to insolar.PulseNumber, keepActual bool) *mStorageMockClone {\n\tif mmClone.mock.funcClone != nil {\n\t\tmmClone.mock.t.Fatalf(\"StorageMock.Clone mock is already set by Set\")\n\t}\n\n\tif mmClone.defaultExpectation == nil {\n\t\tmmClone.defaultExpectation = &StorageMockCloneExpectation{}\n\t}\n\n\tmmClone.defaultExpectation.params = &StorageMockCloneParams{ctx, from, to, keepActual}\n\tfor _, e := range mmClone.expectations {\n\t\tif minimock.Equal(e.params, mmClone.defaultExpectation.params) {\n\t\t\tmmClone.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmClone.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmClone\n}", "func (m *MockCodeGenerator) EXPECT() *MockCodeGeneratorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockNodeAttestor) EXPECT() *MockNodeAttestorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockNodeAttestor) EXPECT() *MockNodeAttestorMockRecorder {\n\treturn m.recorder\n}", "func (_m *MockIStream) EXPECT() *MockIStreamMockRecorder {\n\treturn _m.recorder\n}", "func (m *mOutboundMockGetEndpointType) Expect() *mOutboundMockGetEndpointType {\n\tm.mock.GetEndpointTypeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockGetEndpointTypeExpectation{}\n\t}\n\n\treturn m\n}", "func (m *MockAZInfoProvider) EXPECT() *MockAZInfoProviderMockRecorder {\n\treturn m.recorder\n}" ]
[ "0.58157563", "0.5714918", "0.5672776", "0.5639812", "0.56273276", "0.5573085", "0.5567367", "0.5529613", "0.55066866", "0.5486919", "0.54729885", "0.54647803", "0.5460882", "0.54414886", "0.5440682", "0.5405729", "0.54035264", "0.53890616", "0.53831995", "0.53831995", "0.5369224", "0.53682834", "0.5358863", "0.5340405", "0.5338385", "0.5327707", "0.53230935", "0.53132576", "0.5307127", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.53035146", "0.5295391", "0.5295391", "0.5291368", "0.52822006", "0.52821374", "0.52767164", "0.5273333", "0.5273239", "0.5265769", "0.52593946", "0.52572596", "0.5256972", "0.52545565", "0.5249454", "0.52421427", "0.52410823", "0.5238541", "0.52360845", "0.5235068", "0.5227199", "0.5227038", "0.52227145", "0.52144563", "0.5212412", "0.52120364", "0.5211835", "0.5211705", "0.5208191", "0.5194654", "0.5190334", "0.51877177", "0.5187148", "0.5185659", "0.51827794", "0.51817787", "0.5175451", "0.51730126", "0.5169131", "0.5167294", "0.5162394", "0.51599216", "0.51597583", "0.5159494", "0.51442164", "0.51442164", "0.51442164", "0.5143891", "0.51437116", "0.51395434", "0.51341194", "0.5133995", "0.51337904", "0.51337904", "0.51298875", "0.5129523", "0.5128482", "0.5123544", "0.51224196", "0.51162475", "0.51162475", "0.51148367", "0.51146877", "0.51091874" ]
0.0
-1
IsIngressValid mocks base method
func (m *MockValidater) IsIngressValid(ingress *v1.Ingress) (bool, string) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IsIngressValid", ingress) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(string) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestCurrencyInstructedAmountValid(t *testing.T) {\n\tcia := mockCurrencyInstructedAmount()\n\tcia.Amount = \"1-0\"\n\n\terr := cia.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"Amount\", ErrNonAmount, cia.Amount).Error())\n}", "func (m MigProfile) AssertValid() error {\n\tmatch, err := regexp.MatchString(`^[0-9]+g\\.[0-9]+gb$`, string(m))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error running regex: %v\", err)\n\t}\n\tif match {\n\t\treturn nil\n\t}\n\n\tmatch, err = regexp.MatchString(`^[0-9]+c\\.[0-9]+g\\.[0-9]+gb$`, string(m))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error running regex: %v\", err)\n\t}\n\tif match {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"no match for format %%dc.%%dg.%%dgb or %%dg.%%dgb\")\n}", "func (m *MockAPI) IsInstallable(arg0 *models.Host) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsInstallable\", arg0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockRepo) IsValid() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsValid\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestCheckValidCurve(t *testing.T) {\n\tfmt.Println(CheckValidCurve())\n}", "func (m *MockHasher) IsValid(arg0, arg1 string) bool {\n\tret := m.ctrl.Call(m, \"IsValid\", arg0, arg1)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestPositiveValidation(t *testing.T){\n\tlog.Printf(\"Astrix goes for Peace - Pass good responses\")\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tconn, err := grpc.DialContext(ctx,\"bufnet\", grpc.WithDialer(bufDialer),grpc.WithInsecure())\n\tif err != nil {\n\t\tt.Fatalf(\"did not connect: %v\", err)\n\t}\n\tdefer conn.Close()\n\tc := pb.NewSearchServiceClient(conn)\n\t\tr, err := c.Search(ctx, &pb.SearchRequest{Query: \"Protocol Buffer\",EmailId: \"[email protected]\"})\n\tif err != nil {\n\t\tt.Fatalf(\"could not execute search: %v\", err)\n\t}\n\t//Lets validate the respose\n\tres := r.Validate()\n\tif res != nil {\n\t\tt.Fatalf(\"Response validation failed: %v\", err)\n\t}\n\tlog.Printf(\"Greeting: %s\", r.SearchResponse)\n}", "func TestValidateIATBHOriginatorStatusCode(t *testing.T) {\n\ttestValidateIATBHOriginatorStatusCode(t)\n}", "func (m *MockAPIConfigFromFlags) Validate() error {\n\tret := m.ctrl.Call(m, \"Validate\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestValid(t *testing.T) {\n\tinput := \"abcdefg\"\n\toutput := Invalid(input)\n\n\tif output != false {\n\t\tt.Error(\"Valid test failed\")\n\t}\n}", "func (m *MockLoadBalance) IsNamespaced() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsNamespaced\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func validateEgressGateway(ctx context.Context, cli client.Client, egw *operatorv1.EgressGateway) error {\n\tnativeIP := operatorv1.NativeIPDisabled\n\tif egw.Spec.AWS != nil && egw.Spec.AWS.NativeIP != nil {\n\t\tnativeIP = *egw.Spec.AWS.NativeIP\n\t}\n\n\t// Validate IPPools specified.\n\t// If name is specified, check IPPool exists.\n\t// If CIDR is specified, check if CIDR matches with any IPPool.\n\t// If Aws.NativeIP is enabled, check if the IPPool is backed by aws-subnet ID.\n\tif len(egw.Spec.IPPools) == 0 {\n\t\treturn fmt.Errorf(\"At least one IPPool must be specified\")\n\t}\n\n\tfor _, ippool := range egw.Spec.IPPools {\n\t\terr := validateIPPool(ctx, cli, ippool, nativeIP)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, externalNetwork := range egw.Spec.ExternalNetworks {\n\t\terr := validateExternalNetwork(ctx, cli, externalNetwork)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Check if ElasticIPs are specified only if NativeIP is enabled.\n\tif egw.Spec.AWS != nil {\n\t\tif len(egw.Spec.AWS.ElasticIPs) > 0 && (*egw.Spec.AWS.NativeIP == operatorv1.NativeIPDisabled) {\n\t\t\treturn fmt.Errorf(\"NativeIP must be enabled when elastic IPs are used\")\n\t\t}\n\t}\n\n\t// Check if neither ICMPProbe nor HTTPProbe is configured.\n\tif egw.Spec.EgressGatewayFailureDetection != nil {\n\t\tif egw.Spec.EgressGatewayFailureDetection.ICMPProbe == nil &&\n\t\t\tegw.Spec.EgressGatewayFailureDetection.HTTPProbe == nil {\n\t\t\treturn fmt.Errorf(\"Either ICMP or HTTP probe must be configured\")\n\t\t}\n\t\t// Check if ICMP and HTTP probe timeout is greater than interval.\n\t\tif egw.Spec.EgressGatewayFailureDetection.ICMPProbe != nil {\n\t\t\tif *egw.Spec.EgressGatewayFailureDetection.ICMPProbe.TimeoutSeconds <\n\t\t\t\t*egw.Spec.EgressGatewayFailureDetection.ICMPProbe.IntervalSeconds {\n\t\t\t\treturn fmt.Errorf(\"ICMP probe timeout must be greater than interval\")\n\t\t\t}\n\t\t}\n\t\tif egw.Spec.EgressGatewayFailureDetection.HTTPProbe != nil {\n\t\t\tif *egw.Spec.EgressGatewayFailureDetection.HTTPProbe.TimeoutSeconds <\n\t\t\t\t*egw.Spec.EgressGatewayFailureDetection.HTTPProbe.IntervalSeconds {\n\t\t\t\treturn fmt.Errorf(\"HTTP probe timeout must be greater than interval\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func IsMockInvalid(cc ContractCall) bool {\n\treturn false\n}", "func TestIntegratedServiceManager_ValidateSpec(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tspec integratedservices.IntegratedServiceSpec\n\t\tchecker func(err error) bool\n\t}{\n\t\t{\n\t\t\tname: \"initial test case\",\n\t\t\tspec: integratedservices.IntegratedServiceSpec{\n\t\t\t\t\"customAnchore\": obj{\n\t\t\t\t\t\"enabled\": true,\n\t\t\t\t\t\"url\": \"anchore.example.com\", // mandatory\n\t\t\t\t\t\"secretId\": \"mysecretid\", // mandatory\n\t\t\t\t},\n\t\t\t\t\"policy\": obj{\n\t\t\t\t\t\"policyId\": \"myPolicyID, select, from backend\",\n\t\t\t\t},\n\t\t\t\t\"releaseWhiteList\": []obj{ // optional\n\t\t\t\t\t{\n\t\t\t\t\t\t\"name\": \"name of release 1\", // mandatory\n\t\t\t\t\t\t\"reason\": \"reason of whitelisting\", // mandatory\n\t\t\t\t\t\t\"regexp\": \"whitelisted-[0-1]{2}.[a-z]{2,3}-releases\", // optional\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"name\": \"name of release 2\",\n\t\t\t\t\t\t\"reason\": \"reason of whitelisting\",\n\t\t\t\t\t\t\"regexp\": \"whitelisted-[0-1]{2}.[a-z]{2,3}-releases\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"webhookConfig\": obj{\n\t\t\t\t\t\"enabled\": true, //\n\t\t\t\t\t\"selector\": \"include or exclude\", // mandatory\n\t\t\t\t\t\"namespaces\": []string{\"default\", \"test\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tchecker: func(err error) bool {\n\t\t\t\treturn false\n\t\t\t},\n\t\t},\n\t\t// todo add more test fixtures here\n\t}\n\n\tctx := context.Background()\n\tintegratedServiceManager := MakeIntegratedServiceManager(nil, Config{})\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\terr := integratedServiceManager.ValidateSpec(ctx, test.spec)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"test failed with errors: %v\", err)\n\t\t\t}\n\t\t})\n\t}\n}", "func testValidateIATBHOriginatorStatusCode(t testing.TB) {\n\tbh := mockIATBatchHeaderFF()\n\tbh.OriginatorStatusCode = 7\n\terr := bh.Validate()\n\tif !base.Match(err, ErrOrigStatusCode) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestBaseGradedBlock_Valid(t *testing.T) {\n\tt.Run(\"V1\", func(t *testing.T) {\n\t\ttestBaseGradedBlock_valid(t, 1)\n\t})\n\tt.Run(\"V2\", func(t *testing.T) {\n\t\ttestBaseGradedBlock_valid(t, 2)\n\t})\n\tt.Run(\"V3\", func(t *testing.T) {\n\t\ttestBaseGradedBlock_valid(t, 3)\n\t})\n\tt.Run(\"V4\", func(t *testing.T) {\n\t\ttestBaseGradedBlock_valid(t, 4)\n\t})\n\tt.Run(\"V5\", func(t *testing.T) {\n\t\ttestBaseGradedBlock_valid(t, 5)\n\t})\n}", "func (m *MockNetworkDescriber) IsIPv6Enabled() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsIPv6Enabled\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockClusterScoper) IsIPv6Enabled() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsIPv6Enabled\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockIDistributedEnforcer) EnforceEx(arg0 ...interface{}) (bool, []string, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range arg0 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"EnforceEx\", varargs...)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].([]string)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (m *MockClientChooser) AllowInsecure(path string) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AllowInsecure\", path)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func validIngress() *extensions.Ingress {\n\treturn newIngress(map[string]utils.FakeIngressRuleValueMap{\n\t\t\"foo.bar.com\": testPathMap,\n\t})\n}", "func Test_QualityOperation_IsValidTrue(t *testing.T) {\n\timg := MakeMockMutableImage()\n\top := &QualityOperation{\n\t\tImage: &img,\n\t\tNewQuality: 90,\n\t}\n\n\tassert.Equal(t, true, op.IsValid())\n}", "func TestAdjustmentAmountValid(t *testing.T) {\n\tadj := mockAdjustment()\n\tadj.RemittanceAmount.Amount = \"X,\"\n\n\terr := adj.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"Amount\", ErrNonAmount, adj.RemittanceAmount.Amount).Error())\n}", "func (m *MockDynamicNode) Bounds() zounds.Rectangle {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Bounds\")\n\tret0, _ := ret[0].(zounds.Rectangle)\n\treturn ret0\n}", "func TestTestComand_Validate(t *testing.T) {\n\tdur, _ := time.ParseDuration(\"2s\")\n\tctx, _ := context.WithTimeout(context.Background(), dur)\n\n\tcom := base.NewTestCommand(\"test\")\n\n\tprops := com.Properties()\n\tif validProp, err := props.Get(base_test.PROPERTY_ID_OPERATIONVALID); err != nil {\n\t\tt.Error(\"TestCommand Properties() did not provide a ValidProperty\")\n\t} else {\n\t\tvalidProp.Set(false)\n\t\tres := com.Validate(props)\n\t\tselect {\n\t\tcase <-res.Finished():\n\t\t\tif res.Success() {\n\t\t\t\tt.Error(\"TestCommand thinks it is valid when it shouldn't be\")\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tt.Error(\"TestCommand Validate timed out: \", ctx.Err().Error())\n\t\t}\n\n\t\tvalidProp.Set(true)\n\t\tres = com.Validate(props)\n\t\tselect {\n\t\tcase <-res.Finished():\n\t\t\tif !res.Success() {\n\t\t\t\tt.Error(\"TestCommand thinks it is invald when it shouldn't be\")\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tt.Error(\"TestCommand Validate timed out: \", ctx.Err().Error())\n\t\t}\n\t}\n}", "func (m *MockValidater) CheckNoConflictsInIngress(ingress *v1.Ingress) (bool, string) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckNoConflictsInIngress\", ingress)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(string)\n\treturn ret0, ret1\n}", "func (m *MockisAclSelector_SegSelector) isAclSelector_SegSelector() {\n\tm.ctrl.Call(m, \"isAclSelector_SegSelector\")\n}", "func TestValidatePending(t *testing.T) {\n\tsender, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tencodedSenderPublicKey, _ := publickey.Encode(&sender.PublicKey)\n\tsenderPKH := hashing.New(encodedSenderPublicKey)\n\trecipient, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tencodedRecipientPublicKey, _ := publickey.Encode(&recipient.PublicKey)\n\trecipientPKH := hashing.New(encodedRecipientPublicKey)\n\n\tzeroValueContract, _ := contracts.New(1, sender, recipientPKH, 0, 1)\n\tzeroValueContract.Sign(sender)\n\n\tnilSenderContract, _ := contracts.New(1, nil, senderPKH, 500, 1)\n\n\tsenderRecipContract, _ := contracts.New(1, sender, senderPKH, 500, 1)\n\tsenderRecipContract.Sign(sender)\n\n\tinvalidSignatureContract, _ := contracts.New(1, sender, recipientPKH, 500, 1)\n\tinvalidSignatureContract.Sign(recipient)\n\n\tinsufficentFundsContract, _ := contracts.New(1, sender, recipientPKH, 2000000, 1)\n\tinsufficentFundsContract.Sign(sender)\n\n\tinvalidNonceContract, _ := contracts.New(1, sender, recipientPKH, 20, 0)\n\tinvalidNonceContract.Sign(sender)\n\n\tinvalidNonceContract2, _ := contracts.New(1, sender, recipientPKH, 20, 2)\n\tinvalidNonceContract2.Sign(sender)\n\n\t// Start: pBalance = 100, pNonce = 0\n\tvalidFirstContract, _ := contracts.New(1, sender, recipientPKH, 50, 1)\n\tvalidFirstContract.Sign(sender)\n\n\t// pBalance = 50, pNonce = 1\n\tkeyNotInTable, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tencodedSenderPublicKey, err := publickey.Encode(&keyNotInTable.PublicKey)\n\tif err != nil {\n\t\tt.Errorf(\"failure to encode Sender Public Key: %v\", err)\n\t}\n\tkeyNotInTablePKH := hashing.New(encodedSenderPublicKey)\n\n\tInvalidBalanceContract, _ := contracts.New(1, sender, keyNotInTablePKH, 51, 2)\n\tInvalidBalanceContract.Sign(sender)\n\n\tInvalidNonceContract, _ := contracts.New(1, sender, keyNotInTablePKH, 20, 3)\n\tInvalidNonceContract.Sign(sender)\n\n\tValidSecondContract, _ := contracts.New(1, sender, keyNotInTablePKH, 50, 2)\n\tValidSecondContract.Sign(sender)\n\n\ttests := []struct {\n\t\tname string\n\t\tc *contracts.Contract\n\t\tpBalance uint64\n\t\tpNonce uint64\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"Zero value\",\n\t\t\tc: zeroValueContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Nil sender\",\n\t\t\tc: nilSenderContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Sender == Recipient\",\n\t\t\tc: senderRecipContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid signature\",\n\t\t\tc: invalidSignatureContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Insufficient funds\",\n\t\t\tc: insufficentFundsContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid nonce\",\n\t\t\tc: invalidNonceContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid nonce 2\",\n\t\t\tc: invalidNonceContract2,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Totally valid\",\n\t\t\tc: validFirstContract,\n\t\t\tpBalance: 100,\n\t\t\tpNonce: 0,\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid balance\",\n\t\t\tc: InvalidBalanceContract,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid state nonce\",\n\t\t\tc: InvalidNonceContract,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Totally valid 2\",\n\t\t\tc: ValidSecondContract,\n\t\t\twantErr: false,\n\t\t},\n\t}\n\n\tvar updatedBal uint64\n\tvar updatedNonce uint64\n\tfor i, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif i > 7 {\n\t\t\t\ttt.pBalance = updatedBal\n\t\t\t\ttt.pNonce = updatedNonce\n\t\t\t}\n\n\t\t\tvar err error\n\t\t\terr = ValidatePending(tt.c, &tt.pBalance, &tt.pNonce)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ValidatePending() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tupdatedBal = tt.pBalance\n\t\t\tupdatedNonce = tt.pNonce\n\t\t})\n\t}\n}", "func (s *k8sStore) ingressIsValid(ing *networkingv1.Ingress) bool {\n\tvar endpointKey string\n\tif ing.Spec.DefaultBackend != nil { // stream\n\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, ing.Spec.DefaultBackend.Service.Name)\n\t} else { // http\n\tLoop:\n\t\tfor _, rule := range ing.Spec.Rules {\n\t\t\tfor _, path := range rule.IngressRuleValue.HTTP.Paths {\n\t\t\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, path.Backend.Service.Name)\n\t\t\t\tif endpointKey != \"\" {\n\t\t\t\t\tbreak Loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\titem, exists, err := s.listers.Endpoint.GetByKey(endpointKey)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can not get endpoint by key(%s): %v\", endpointKey, err)\n\t\treturn false\n\t}\n\tif !exists {\n\t\tlogrus.Debugf(\"Endpoint %s does not exist.\", endpointKey)\n\t\treturn false\n\t}\n\tendpoint, ok := item.(*corev1.Endpoints)\n\tif !ok {\n\t\tlogrus.Errorf(\"Cant not convert %v to %v\", reflect.TypeOf(item), reflect.TypeOf(endpoint))\n\t\treturn false\n\t}\n\tif len(endpoint.Subsets) == 0 {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\tif !hasReadyAddresses(endpoint) {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (m *MockisCryptoAsymApiReqSetupPrivateKeyEx_Key) isCryptoAsymApiReqSetupPrivateKeyEx_Key() {\n\tm.ctrl.Call(m, \"isCryptoAsymApiReqSetupPrivateKeyEx_Key\")\n}", "func (m *MockisKey_KeyInfo) isKey_KeyInfo() {\n\tm.ctrl.Call(m, \"isKey_KeyInfo\")\n}", "func (m *MockAssets) ValidStateName(path string) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidStateName\", path)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestValidDoesNotPanic(t *testing.T) {\n\tvar s1, s2 GKArray\n\tvar q float64\n\tnTests := 100\n\tfuzzer := fuzz.New()\n\tfor i := 0; i < nTests; i++ {\n\t\tfuzzer.Fuzz(&s1)\n\t\tfuzzer.Fuzz(&s2)\n\t\tfuzzer.Fuzz(&q)\n\t\ts1 = makeValid(s1)\n\t\ts2 = makeValid(s2)\n\t\tassert.True(t, isValid(s1))\n\t\tassert.True(t, isValid(s2))\n\t\tassert.NotPanics(t, func() { s1.Quantile(q); s1.Merge(&s2) })\n\t}\n}", "func (m *MockisCryptoApiRequest_CryptoApiReq) isCryptoApiRequest_CryptoApiReq() {\n\tm.ctrl.Call(m, \"isCryptoApiRequest_CryptoApiReq\")\n}", "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func (mr *MockValidaterMockRecorder) IsIngressValid(ingress interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsIngressValid\", reflect.TypeOf((*MockValidater)(nil).IsIngressValid), ingress)\n}", "func assertInvariants(t *testing.T, msg string,\n\tpOrig Pool, cOrig Candidates, pMod Pool, cMods Candidates, tokens int64) {\n\n\t// total tokens conserved\n\trequire.Equal(t,\n\t\tpOrig.UnbondedPool+pOrig.BondedPool,\n\t\tpMod.UnbondedPool+pMod.BondedPool+tokens,\n\t\t\"Tokens not conserved - msg: %v\\n, pOrig.BondedShares: %v, pOrig.UnbondedShares: %v, pMod.BondedShares: %v, pMod.UnbondedShares: %v, pOrig.UnbondedPool: %v, pOrig.BondedPool: %v, pMod.UnbondedPool: %v, pMod.BondedPool: %v, tokens: %v\\n\",\n\t\tmsg,\n\t\tpOrig.BondedShares, pOrig.UnbondedShares,\n\t\tpMod.BondedShares, pMod.UnbondedShares,\n\t\tpOrig.UnbondedPool, pOrig.BondedPool,\n\t\tpMod.UnbondedPool, pMod.BondedPool, tokens)\n\n\t// nonnegative bonded shares\n\trequire.False(t, pMod.BondedShares.LT(sdk.ZeroRat()),\n\t\t\"Negative bonded shares - msg: %v\\npOrig: %#v\\npMod: %#v\\ntokens: %v\\n\",\n\t\tmsg, pOrig, pMod, tokens)\n\n\t// nonnegative unbonded shares\n\trequire.False(t, pMod.UnbondedShares.LT(sdk.ZeroRat()),\n\t\t\"Negative unbonded shares - msg: %v\\npOrig: %#v\\npMod: %#v\\ntokens: %v\\n\",\n\t\tmsg, pOrig, pMod, tokens)\n\n\t// nonnegative bonded ex rate\n\trequire.False(t, pMod.bondedShareExRate().LT(sdk.ZeroRat()),\n\t\t\"Applying operation \\\"%s\\\" resulted in negative bondedShareExRate: %d\",\n\t\tmsg, pMod.bondedShareExRate().Evaluate())\n\n\t// nonnegative unbonded ex rate\n\trequire.False(t, pMod.unbondedShareExRate().LT(sdk.ZeroRat()),\n\t\t\"Applying operation \\\"%s\\\" resulted in negative unbondedShareExRate: %d\",\n\t\tmsg, pMod.unbondedShareExRate().Evaluate())\n\n\tfor _, cMod := range cMods {\n\n\t\t// nonnegative ex rate\n\t\trequire.False(t, cMod.delegatorShareExRate().LT(sdk.ZeroRat()),\n\t\t\t\"Applying operation \\\"%s\\\" resulted in negative candidate.delegatorShareExRate(): %v (candidate.Address: %s)\",\n\t\t\tmsg,\n\t\t\tcMod.delegatorShareExRate(),\n\t\t\tcMod.Address,\n\t\t)\n\n\t\t// nonnegative assets\n\t\trequire.False(t, cMod.Assets.LT(sdk.ZeroRat()),\n\t\t\t\"Applying operation \\\"%s\\\" resulted in negative candidate.Assets: %v (candidate.Liabilities: %v, candidate.delegatorShareExRate: %v, candidate.Address: %s)\",\n\t\t\tmsg,\n\t\t\tcMod.Assets,\n\t\t\tcMod.Liabilities,\n\t\t\tcMod.delegatorShareExRate(),\n\t\t\tcMod.Address,\n\t\t)\n\n\t\t// nonnegative liabilities\n\t\trequire.False(t, cMod.Liabilities.LT(sdk.ZeroRat()),\n\t\t\t\"Applying operation \\\"%s\\\" resulted in negative candidate.Liabilities: %v (candidate.Assets: %v, candidate.delegatorShareExRate: %v, candidate.Address: %s)\",\n\t\t\tmsg,\n\t\t\tcMod.Liabilities,\n\t\t\tcMod.Assets,\n\t\t\tcMod.delegatorShareExRate(),\n\t\t\tcMod.Address,\n\t\t)\n\n\t}\n\n}", "func (m *MockCacheOptions) Validate() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Validate\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestIsValid(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tlevel Level\n\t\texpect bool\n\t}{\n\t\t{name: \"Panic Level\", level: Panic, expect: true},\n\t\t{name: \"Fatal Level\", level: Fatal, expect: true},\n\t\t{name: \"Error Level\", level: Error, expect: true},\n\t\t{name: \"Overflow Level\", level: overflowLevelValue + 1, expect: false},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tif got := test.level.IsValid(); got != test.expect {\n\t\t\t\tt.Errorf(\"IsValid = %v; want %v\", got, test.expect)\n\t\t\t}\n\t\t})\n\t}\n}", "func (m *MockStaticNode) Bounds() zounds.Rectangle {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Bounds\")\n\tret0, _ := ret[0].(zounds.Rectangle)\n\treturn ret0\n}", "func (m *MockNetworkDescriber) IsVnetManaged() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsVnetManaged\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestValidateIATBHOriginatorIdentification(t *testing.T) {\n\ttestValidateIATBHOriginatorIdentification(t)\n}", "func Test_QualityOperation_IsValidFalse(t *testing.T) {\n\timg := MakeMockMutableImage()\n\top := &QualityOperation{\n\t\tImage: &img,\n\t\tNewQuality: -12,\n\t}\n\n\tassert.Equal(t, false, op.IsValid())\n\n\top2 := &QualityOperation{\n\t\tImage: &img,\n\t\tNewQuality: 500,\n\t}\n\n\tassert.Equal(t, false, op2.IsValid())\n}", "func (m *MockIAssistant) GslbIngressExposedIPs(gslb *v1beta1.Gslb) ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GslbIngressExposedIPs\", gslb)\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClusterScoper) IsVnetManaged() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsVnetManaged\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockKernelData) IsObjectAffine(obj client.Object) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsObjectAffine\", obj)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestGridValidation(t *testing.T) {\n\t// Check that a config that should be good produces no errors.\n\t{\n\t\tconfig := validGridSearchConfig()\n\t\tassert.NilError(t, check.Validate(config))\n\t}\n\n\t// Check that too many trials triggers an error.\n\t{\n\t\tconfig := validGridSearchConfig()\n\t\tconfig.Hyperparameters[\"log\"].LogHyperparameter.Count = intP(MaxAllowedTrials)\n\t\tconfig.Hyperparameters[\"int\"].IntHyperparameter.Count = intP(2)\n\t\tassert.ErrorContains(t, check.Validate(config), \"number of trials\")\n\t}\n\n\t// Check that counts for int hyperparameters are clamped properly.\n\t{\n\t\tconfig := validGridSearchConfig()\n\t\tconfig.Hyperparameters[\"log\"].LogHyperparameter.Count = intP(1)\n\t\tconfig.Hyperparameters[\"int\"].IntHyperparameter.Count = intP(100000)\n\t\tassert.NilError(t, check.Validate(config))\n\t}\n\n\t// Check that a missing count triggers an error.\n\t{\n\t\tconfig := validGridSearchConfig()\n\t\tconfig.Hyperparameters[\"log\"].LogHyperparameter.Count = nil\n\t\tassert.ErrorContains(t, check.Validate(config), \"must specify counts for grid search: log\")\n\t}\n}", "func TestCreditDebitIndicatorValid(t *testing.T) {\n\tadj := mockAdjustment()\n\tadj.CreditDebitIndicator = \"ZZZZ\"\n\n\terr := adj.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"CreditDebitIndicator\", ErrCreditDebitIndicator, adj.CreditDebitIndicator).Error())\n}", "func TestRetryRequired(t *testing.T) {\n\tcheck := assert.New(t)\n\tretryRequired := checkRetryRequired(http.StatusServiceUnavailable)\n\tcheck.Equal(retryRequired, true)\n}", "func (m *MockSecurityGroupServiceIface) NewAuthorizeSecurityGroupEgressParams() *AuthorizeSecurityGroupEgressParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewAuthorizeSecurityGroupEgressParams\")\n\tret0, _ := ret[0].(*AuthorizeSecurityGroupEgressParams)\n\treturn ret0\n}", "func TestGxClient(t *testing.T) {\n\tserverConfig := &diameter.DiameterServerConfig{DiameterServerConnConfig: diameter.DiameterServerConnConfig{\n\t\tAddr: \"127.0.0.1:3898\",\n\t\tProtocol: \"tcp\"},\n\t}\n\tclientConfig := getClientConfig()\n\tstartServer(clientConfig, serverConfig)\n\tgxClient := gx.NewGxClient(\n\t\tclientConfig,\n\t\tserverConfig,\n\t\tgetMockReAuthHandler(),\n\t\tnil,\n\t)\n\n\t// send init\n\tccrInit := &gx.CreditControlRequest{\n\t\tSessionID: \"1\",\n\t\tType: credit_control.CRTInit,\n\t\tIMSI: testIMSI1,\n\t\tRequestNumber: 0,\n\t\tIPAddr: \"192.168.1.1\",\n\t\tSpgwIPV4: \"10.10.10.10\",\n\t}\n\tdone := make(chan interface{}, 1000)\n\n\tassert.NoError(t, gxClient.SendCreditControlRequest(serverConfig, done, ccrInit))\n\tanswer := gx.GetAnswer(done)\n\tassert.Equal(t, ccrInit.SessionID, answer.SessionID)\n\tassert.Equal(t, ccrInit.RequestNumber, answer.RequestNumber)\n\tassert.Equal(t, 5, len(answer.RuleInstallAVP))\n\n\tvar ruleNames []string\n\tvar ruleBaseNames []string\n\tvar ruleDefinitions []*gx.RuleDefinition\n\tfor _, installRule := range answer.RuleInstallAVP {\n\t\truleNames = append(ruleNames, installRule.RuleNames...)\n\t\truleBaseNames = append(ruleBaseNames, installRule.RuleBaseNames...)\n\t\truleDefinitions = append(ruleDefinitions, installRule.RuleDefinitions...)\n\t}\n\tassert.ElementsMatch(t, imsi1Rules, ruleNames)\n\tassert.ElementsMatch(t, imsi1BaseRules, ruleBaseNames)\n\tassert.Equal(t, 1, len(ruleDefinitions))\n\tassert.Equal(t, \"dynrule1\", ruleDefinitions[0].RuleName)\n\tassert.Equal(t, \"mkey\", *ruleDefinitions[0].MonitoringKey)\n\tassert.Equal(t, uint32(128000), *ruleDefinitions[0].Qos.MaxReqBwUL)\n\tassert.Equal(t, uint32(128000), *ruleDefinitions[0].Qos.MaxReqBwDL)\n\tif ruleDefinitions[0].Qos.GbrUL != nil {\n\t\tassert.Equal(t, uint32(64000), *ruleDefinitions[0].Qos.GbrUL)\n\t}\n\tif ruleDefinitions[0].Qos.GbrDL != nil {\n\t\tassert.Equal(t, uint32(64000), *ruleDefinitions[0].Qos.GbrDL)\n\t}\n\tif ruleDefinitions[0].Qos.Qci != nil {\n\t\tassert.Equal(t, int32(8), int32(*ruleDefinitions[0].Qos.Qci))\n\t}\n\n\t// send terminate\n\tccrTerminate := &gx.CreditControlRequest{\n\t\tSessionID: \"1\",\n\t\tType: credit_control.CRTTerminate,\n\t\tIMSI: testIMSI1,\n\t\tRequestNumber: 0,\n\t\tIPAddr: \"192.168.1.1\",\n\t}\n\tassert.NoError(t, gxClient.SendCreditControlRequest(serverConfig, done, ccrTerminate))\n\tterminate := gx.GetAnswer(done)\n\tassert.Equal(t, ccrTerminate.SessionID, terminate.SessionID)\n\tassert.Equal(t, ccrTerminate.RequestNumber, terminate.RequestNumber)\n\tassert.Empty(t, terminate.RuleInstallAVP)\n\n\t// send init\n\tccrInit = &gx.CreditControlRequest{\n\t\tSessionID: \"2\",\n\t\tType: credit_control.CRTInit,\n\t\tIMSI: testIMSI3,\n\t\tRequestNumber: 0,\n\t\tIPAddr: \"192.168.1.1\",\n\t\tSpgwIPV4: \"10.10.10.10\",\n\t}\n\tassert.NoError(t, gxClient.SendCreditControlRequest(serverConfig, done, ccrInit))\n\tanswer = gx.GetAnswer(done)\n\tassert.Equal(t, ccrInit.SessionID, answer.SessionID)\n\tassert.Equal(t, ccrInit.RequestNumber, answer.RequestNumber)\n\tassert.Equal(t, 5, len(answer.RuleInstallAVP))\n\n\truleNames = []string{}\n\truleBaseNames = []string{}\n\truleDefinitions = []*gx.RuleDefinition{}\n\tfor _, installRule := range answer.RuleInstallAVP {\n\t\truleNames = append(ruleNames, installRule.RuleNames...)\n\t\truleBaseNames = append(ruleBaseNames, installRule.RuleBaseNames...)\n\t\truleDefinitions = append(ruleDefinitions, installRule.RuleDefinitions...)\n\t}\n\tassert.ElementsMatch(t, imsi3Rules, ruleNames)\n\tassert.ElementsMatch(t, imsi3BaseRules, ruleBaseNames)\n\tassert.Equal(t, 1, len(ruleDefinitions))\n\tassert.Equal(t, \"dynrule3\", ruleDefinitions[0].RuleName)\n\tassert.Equal(t, \"mkey3\", *ruleDefinitions[0].MonitoringKey)\n\tassert.Equal(t, uint32(1), ruleDefinitions[0].RedirectInformation.RedirectSupport)\n\tassert.Equal(t, uint32(2), ruleDefinitions[0].RedirectInformation.RedirectAddressType)\n\tassert.Equal(t, \"http://www.example.com/\", ruleDefinitions[0].RedirectInformation.RedirectServerAddress)\n\n\t// send terminate\n\tccrTerminate = &gx.CreditControlRequest{\n\t\tSessionID: \"2\",\n\t\tType: credit_control.CRTTerminate,\n\t\tIMSI: testIMSI3,\n\t\tRequestNumber: 0,\n\t\tIPAddr: \"192.168.1.1\",\n\t}\n\tassert.NoError(t, gxClient.SendCreditControlRequest(serverConfig, done, ccrTerminate))\n\tterminate = gx.GetAnswer(done)\n\tassert.Equal(t, ccrTerminate.SessionID, terminate.SessionID)\n\tassert.Equal(t, ccrTerminate.RequestNumber, terminate.RequestNumber)\n\tassert.Empty(t, terminate.RuleInstallAVP)\n\n\t// Connection Disabling should cause CCR to fail\n\tgxClient.DisableConnections(10 * time.Second)\n\tassert.Error(t, gxClient.SendCreditControlRequest(serverConfig, done, ccrInit))\n\n\t// CCR Success after Enabling\n\tgxClient.EnableConnections()\n\tassert.NoError(t, gxClient.SendCreditControlRequest(serverConfig, done, ccrInit))\n\n\thwaddr, err := net.ParseMAC(\"00:00:5e:00:53:01\")\n\tassert.NoError(t, err)\n\tipv6addr := gx.Ipv6PrefixFromMAC(hwaddr)\n\tassert.Equal(t, ipv6addr[:6], []byte{0, 0x80, 0xfd, 0xfa, 0xce, 0xb0})\n\tassert.NotEqual(t, ipv6addr[6:10], []byte{0x0c, 0xab, 0xcd, 0xef})\n\tassert.Equal(t, ipv6addr[10:], []byte{0x2, 0x0, 0x5e, 0xff, 0xfe, 0x0, 0x53, 0x1})\n\n}", "func TestValidateIATBHServiceClassCode(t *testing.T) {\n\ttestValidateIATBHServiceClassCode(t)\n}", "func AssertEksSubnetRequired(obj EksSubnet) error {\n\treturn nil\n}", "func testValidateIATBHStandardEntryClassCode(t testing.TB) {\n\tbh := mockIATBatchHeaderFF()\n\tbh.StandardEntryClassCode = \"ABC\"\n\terr := bh.Validate()\n\tif !base.Match(err, ErrSECCode) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func (b *BaseImpl) ShouldCheckBound() bool {\n\treturn true\n}", "func Test_ApplyOperation_IsValidTrue(t *testing.T) {\n\timg := MakeMockMutableImage()\n\top := &ApplyOperation{\n\t\tImage: &img,\n\t}\n\tassert.Equal(t, true, op.IsValid())\n}", "func (m *MockapprunnerDescriber) IsPrivate() (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsPrivate\")\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestValues_IsValueValidAgainstRange(t *testing.T) {\n\tvar err error\n\n\t// We did not simulate these formats in full specs\n\terr = IsValueValidAgainstRange(float32(123.45), \"number\", \"float32\", \"prefix\", \"path\")\n\tassert.NoError(t, err)\n\n\terr = IsValueValidAgainstRange(float64(123.45), \"number\", \"float32\", \"prefix\", \"path\")\n\tassert.NoError(t, err)\n\n\terr = IsValueValidAgainstRange(int64(123), \"number\", \"float\", \"prefix\", \"path\")\n\tassert.NoError(t, err)\n\n\terr = IsValueValidAgainstRange(int64(123), \"integer\", \"\", \"prefix\", \"path\")\n\tassert.NoError(t, err)\n\n\terr = IsValueValidAgainstRange(int64(123), \"integer\", \"int64\", \"prefix\", \"path\")\n\tassert.NoError(t, err)\n\n\terr = IsValueValidAgainstRange(int64(123), \"integer\", \"uint64\", \"prefix\", \"path\")\n\tassert.NoError(t, err)\n\n\t// Error case (do not occur in normal course of a validation)\n\terr = IsValueValidAgainstRange(float64(math.MaxFloat64), \"integer\", \"\", \"prefix\", \"path\")\n\tif assert.Error(t, err) {\n\t\tassert.Contains(t, err.Error(), \"must be of type integer (default format)\")\n\t}\n\n\t// Checking a few limits\n\terr = IsValueValidAgainstRange(\"123\", \"number\", \"\", \"prefix\", \"path\")\n\tif assert.Error(t, err) {\n\t\tassert.Contains(t, err.Error(), \"called with invalid (non numeric) val type\")\n\t}\n\n\terr = IsValueValidAgainstRange(int64(2147483647), \"integer\", \"int32\", \"prefix\", \"path\")\n\tassert.NoError(t, err)\n\n\terr = IsValueValidAgainstRange(int64(2147483647), \"integer\", \"uint32\", \"prefix\", \"path\")\n\tassert.NoError(t, err)\n}", "func Test_validateBinarySearchTree(t *testing.T) {\n\n}", "func (m *MockIDistributedEnforcer) HasPolicy(arg0 ...interface{}) bool {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range arg0 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"HasPolicy\", varargs...)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (checker *CheckerType) NeedsExpectedValue() bool {\n return true\n}", "func (ab *AbTest) AbTestIn(buvid string) (ok bool) {\n\tration := crc32.ChecksumIEEE([]byte(buvid)) % 100\n\tif ration < uint32(ab.Threshold) {\n\t\tok = true\n\t}\n\treturn\n}", "func TestIsAllowed(t *testing.T) {\n\tdummyProxy := &proxy{\n\t\tvalidHostsSupplier: func() []string {\n\t\t\treturn []string{\"test1.com\", \"test2.io\", \"test3.org\"}\n\t\t},\n\t}\n\n\tassert.Equal(t, false, dummyProxy.isAllowed(\"\"))\n\tassert.Equal(t, false, dummyProxy.isAllowed(\"test1.org\"))\n\tassert.Equal(t, false, dummyProxy.isAllowed(\"test4.com\"))\n\tassert.Equal(t, true, dummyProxy.isAllowed(\"test2.io\"))\n\n\tdummyProxy = &proxy{\n\t\tvalidHostsSupplier: func() []string {\n\t\t\treturn []string{\"*test1.com\", \"test2.io\", \"test3.org\"}\n\t\t},\n\t}\n\n\tassert.Equal(t, true, dummyProxy.isAllowed(\"123test1.com\"))\n\tassert.Equal(t, false, dummyProxy.isAllowed(\"123test1.io\"))\n\n\tdummyProxy = &proxy{\n\t\tvalidHostsSupplier: func() []string {\n\t\t\treturn []string{\"foo.%.alpha.com\", \"test2.io\", \"test3.org\"}\n\t\t},\n\t}\n\n\tassert.Equal(t, false, dummyProxy.isAllowed(\"123test1.com\"))\n\tassert.Equal(t, true, dummyProxy.isAllowed(\"foo.bar.alpha.com\"))\n\tassert.Equal(t, false, dummyProxy.isAllowed(\"foo.bar.baz.alpha.com\"))\n}", "func (m *MockUnsafeLinkServiceServer) mustEmbedUnimplementedLinkServiceServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedLinkServiceServer\")\n}", "func testValidateIATBHServiceClassCode(t testing.TB) {\n\tbh := mockIATBatchHeaderFF()\n\tbh.ServiceClassCode = 999\n\terr := bh.Validate()\n\tif !base.Match(err, ErrServiceClass) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func (m *MockMempool) EnableTxsAvailable() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"EnableTxsAvailable\")\n}", "func (m *MockMappedResource) HasAws() bool {\n\tret := m.ctrl.Call(m, \"HasAws\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockIDistributedEnforcer) Enforce(arg0 ...interface{}) (bool, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range arg0 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Enforce\", varargs...)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClusterScoper) AvailabilitySetEnabled() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AvailabilitySetEnabled\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestIsEnReservation(t *testing.T) {\n\ttests := struct {\n\t\tscheduleId int64\n\t}{128}\n\n\t_, err := ss.IsEnReservation(ctx, tests.scheduleId)\n\tif err != nil {\n\t\t//t.Errorf(\"IsEnReservation(%d) fail, err %s, result %+v\", tests.scheduleId, err, r)\n\t\treturn\n\t}\n}", "func (m *MockIDistributedEnforcer) HasGroupingPolicy(arg0 ...interface{}) bool {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range arg0 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"HasGroupingPolicy\", varargs...)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockRateLimiter) AllowN(now time.Time, numToken int) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AllowN\", now, numToken)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestPaymentMethodValid(t *testing.T) {\n\tpm := NewFIPaymentMethodToBeneficiary()\n\tpm.PaymentMethod = \"\"\n\n\terr := pm.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"PaymentMethod\", ErrFieldInclusion, pm.PaymentMethod).Error())\n}", "func (m *MockMovableNode) Bounds() zounds.Rectangle {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Bounds\")\n\tret0, _ := ret[0].(zounds.Rectangle)\n\treturn ret0\n}", "func test_checkIfWorthReclaimingGas(t *testing.T) {\n\tworthIt, amountToReclaim, err := eth_gateway.EthWrapper.CheckIfWorthReclaimingGas(ethAddress01, eth_gateway.GasLimitETHSend)\n\n\tif worthIt {\n\t\tt.Logf(\"Should try to reclaim gas: %v\\n\", \"true\")\n\t} else {\n\t\tt.Logf(\"Should try to reclaim gas: %v\\n\", \"false\")\n\t}\n\n\tt.Logf(\"Will attempt to reclaim this much: %v\\n\", amountToReclaim.String())\n\n\tif err != nil {\n\t\tt.Fatalf(\"Received an error: %v\\n\", err.Error())\n\t}\n}", "func (m *MockTaskTable) HasSubtasksInStates(arg0 string, arg1, arg2 int64, arg3 ...interface{}) (bool, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1, arg2}\n\tfor _, a := range arg3 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"HasSubtasksInStates\", varargs...)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockManagedClusterScoper) AvailabilitySetEnabled() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AvailabilitySetEnabled\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockIDistributedEnforcer) EnforceExWithMatcher(arg0 string, arg1 ...interface{}) (bool, []string, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0}\n\tfor _, a := range arg1 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"EnforceExWithMatcher\", varargs...)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].([]string)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (m *MockRateLimiter) Allow() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Allow\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func registerValid(reg asm.Register) error {\n\tif reg > asm.R9 {\n\t\treturn errors.Errorf(\"invalid register %v\", reg)\n\t}\n\n\treturn nil\n}", "func (s *RateLimitSuite) TestSpecIsOK(c *C) {\n\tc.Assert(plugin.NewRegistry().AddSpec(GetSpec()), IsNil)\n}", "func (s *RateLimitSuite) TestSpecIsOK(c *C) {\n\tc.Assert(plugin.NewRegistry().AddSpec(GetSpec()), IsNil)\n}", "func (m *MockCandidatePropertyGetter) IsPublic() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsPublic\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (i identity) hgtValid() bool {\n\tif len(i.HGT) < 3 {\n\t\treturn false\n\t}\n\tswitch i.HGT[len(i.HGT)-2:] {\n\tcase \"cm\":\n\t\tn, err := strconv.Atoi(i.HGT[:len(i.HGT)-2])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn 150 <= n && n <= 193\n\tcase \"in\":\n\t\tn, err := strconv.Atoi(i.HGT[:len(i.HGT)-2])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn 59 <= n && n <= 76\n\t}\n\treturn false\n}", "func (d *devicelib) AssertValidMigProfileFormat(profile string) error {\n\t_, _, _, _, err := parseMigProfile(profile)\n\treturn err\n}", "func TestValidateState(t *testing.T) {\n\tclientset, err := k8sutils.MustGetClientset()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconfig := k8sutils.MustGetRestConfig(t)\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)\n\tdefer cancel()\n\n\tvalidator, err := validate.CreateValidator(ctx, clientset, config, namespace, *cniType, *restartCase, *osType)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := validator.Validate(ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (m *MockSecurityGroupServiceIface) AuthorizeSecurityGroupEgress(p *AuthorizeSecurityGroupEgressParams) (*AuthorizeSecurityGroupEgressResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AuthorizeSecurityGroupEgress\", p)\n\tret0, _ := ret[0].(*AuthorizeSecurityGroupEgressResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestDelegatorProxyValidatorShares7Steps(t *testing.T) {\n\n}", "func (ctl *Ctl) SpecIsValid() (bool, error) {\n\treturn true, nil\n}", "func (ctl *Ctl) SpecIsValid() (bool, error) {\n\treturn true, nil\n}", "func ShouldNotBeIn(actual interface{}, expected ...interface{}) error {\n\tif err := atLeast(1, expected); err != nil {\n\t\treturn err\n\t}\n\n\tif err := ShouldBeIn(actual, expected...); err != nil {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"expected '%v' not in %v but it was\", actual, expected)\n}", "func (m *MockClusterDescriber) AvailabilitySetEnabled() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AvailabilitySetEnabled\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (s *HelpersS) TestCheckerInterface(c *gocheck.C) {\n testHelperSuccess(c, \"Check(1, Equals, 1)\", true, func() interface{} {\n return c.Check(1, gocheck.Equals, 1)\n })\n}", "func TestValidateContractPerms(t *testing.T) {\n\ttests := []struct {\n\t\tcRef string\n\t\tcidRoles string\n\t\tallow bool\n\t\tmsg string\n\t}{\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tcidRoles: \"user\",\n\t\t\tallow: false,\n\t\t\tmsg: \"Should not allow\",\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateWallet,\n\t\t\tcidRoles: \"user\",\n\t\t\tallow: true,\n\t\t\tmsg: \"Should allow\",\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tcidRoles: \"admin\",\n\t\t\tallow: true,\n\t\t\tmsg: \"Should allow\",\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateWallet,\n\t\t\tcidRoles: \"admin\",\n\t\t\tallow: false,\n\t\t\tmsg: \"Should not allow\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Logf(\"%v %v to invoke %v contract\", tt.msg, tt.cidRoles, tt.cRef)\n\n\t\tappAuth := simpleSetup(t, tt.cidRoles)\n\t\terr := appAuth.ValidateContractPerms(tt.cRef)\n\n\t\tif !tt.allow {\n\t\t\tassert.Error(t, err)\n\t\t} else {\n\t\t\tassert.NoError(t, err)\n\t\t}\n\t}\n}", "func (m *MockMappedResource) IsMapped() bool {\n\tret := m.ctrl.Call(m, \"IsMapped\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockUnsafePdfServiceServer) mustEmbedUnimplementedPdfServiceServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedPdfServiceServer\")\n}", "func validateCreateTestExonerationRequest(req *pb.CreateTestExonerationRequest, requireInvocation bool) error {\n\tif requireInvocation || req.Invocation != \"\" {\n\t\tif err := pbutil.ValidateInvocationName(req.Invocation); err != nil {\n\t\t\treturn errors.Annotate(err, \"invocation\").Err()\n\t\t}\n\t}\n\n\tex := req.GetTestExoneration()\n\tif err := pbutil.ValidateTestID(ex.GetTestId()); err != nil {\n\t\treturn errors.Annotate(err, \"test_exoneration: test_id\").Err()\n\t}\n\tif err := pbutil.ValidateVariant(ex.GetVariant()); err != nil {\n\t\treturn errors.Annotate(err, \"test_exoneration: variant\").Err()\n\t}\n\n\thasVariant := len(ex.GetVariant().GetDef()) != 0\n\thasVariantHash := ex.VariantHash != \"\"\n\tif hasVariant && hasVariantHash {\n\t\tcomputedHash := pbutil.VariantHash(ex.GetVariant())\n\t\tif computedHash != ex.VariantHash {\n\t\t\treturn errors.Reason(\"computed and supplied variant hash don't match\").Err()\n\t\t}\n\t}\n\n\tif err := pbutil.ValidateRequestID(req.RequestId); err != nil {\n\t\treturn errors.Annotate(err, \"request_id\").Err()\n\t}\n\n\tif ex.ExplanationHtml == \"\" {\n\t\treturn errors.Reason(\"test_exoneration: explanation_html: unspecified\").Err()\n\t}\n\tif ex.Reason == pb.ExonerationReason_EXONERATION_REASON_UNSPECIFIED {\n\t\treturn errors.Reason(\"test_exoneration: reason: unspecified\").Err()\n\t}\n\treturn nil\n}", "func TestAdjustmentCurrencyCodeValid(t *testing.T) {\n\tadj := mockAdjustment()\n\tadj.RemittanceAmount.CurrencyCode = \"XZP\"\n\n\terr := adj.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"CurrencyCode\", ErrNonCurrencyCode, adj.RemittanceAmount.CurrencyCode).Error())\n}", "func AssertGeneralSuccessRequired(obj GeneralSuccess) error {\n\treturn nil\n}", "func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) {\n\trequire := require.New(t)\n\tclk := mockable.Clock{}\n\tctx := snow.DefaultContextTest()\n\tsigners := [][]*secp256k1.PrivateKey{preFundedKeys}\n\n\tvar (\n\t\tstx *Tx\n\t\taddSubnetValidatorTx *AddSubnetValidatorTx\n\t\terr error\n\t)\n\n\t// Case : signed tx is nil\n\terr = stx.SyntacticVerify(ctx)\n\trequire.ErrorIs(err, ErrNilSignedTx)\n\n\t// Case : unsigned tx is nil\n\terr = addSubnetValidatorTx.SyntacticVerify(ctx)\n\trequire.ErrorIs(err, ErrNilTx)\n\n\tvalidatorWeight := uint64(2022)\n\tsubnetID := ids.ID{'s', 'u', 'b', 'n', 'e', 't', 'I', 'D'}\n\tinputs := []*avax.TransferableInput{{\n\t\tUTXOID: avax.UTXOID{\n\t\t\tTxID: ids.ID{'t', 'x', 'I', 'D'},\n\t\t\tOutputIndex: 2,\n\t\t},\n\t\tAsset: avax.Asset{ID: ids.ID{'a', 's', 's', 'e', 't'}},\n\t\tIn: &secp256k1fx.TransferInput{\n\t\t\tAmt: uint64(5678),\n\t\t\tInput: secp256k1fx.Input{SigIndices: []uint32{0}},\n\t\t},\n\t}}\n\toutputs := []*avax.TransferableOutput{{\n\t\tAsset: avax.Asset{ID: ids.ID{'a', 's', 's', 'e', 't'}},\n\t\tOut: &secp256k1fx.TransferOutput{\n\t\t\tAmt: uint64(1234),\n\t\t\tOutputOwners: secp256k1fx.OutputOwners{\n\t\t\t\tThreshold: 1,\n\t\t\t\tAddrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()},\n\t\t\t},\n\t\t},\n\t}}\n\tsubnetAuth := &secp256k1fx.Input{\n\t\tSigIndices: []uint32{0, 1},\n\t}\n\taddSubnetValidatorTx = &AddSubnetValidatorTx{\n\t\tBaseTx: BaseTx{BaseTx: avax.BaseTx{\n\t\t\tNetworkID: ctx.NetworkID,\n\t\t\tBlockchainID: ctx.ChainID,\n\t\t\tIns: inputs,\n\t\t\tOuts: outputs,\n\t\t\tMemo: []byte{1, 2, 3, 4, 5, 6, 7, 8},\n\t\t}},\n\t\tSubnetValidator: SubnetValidator{\n\t\t\tValidator: Validator{\n\t\t\t\tNodeID: ctx.NodeID,\n\t\t\t\tStart: uint64(clk.Time().Unix()),\n\t\t\t\tEnd: uint64(clk.Time().Add(time.Hour).Unix()),\n\t\t\t\tWght: validatorWeight,\n\t\t\t},\n\t\t\tSubnet: subnetID,\n\t\t},\n\t\tSubnetAuth: subnetAuth,\n\t}\n\n\t// Case: valid tx\n\tstx, err = NewSigned(addSubnetValidatorTx, Codec, signers)\n\trequire.NoError(err)\n\trequire.NoError(stx.SyntacticVerify(ctx))\n\n\t// Case: Wrong network ID\n\taddSubnetValidatorTx.SyntacticallyVerified = false\n\taddSubnetValidatorTx.NetworkID++\n\tstx, err = NewSigned(addSubnetValidatorTx, Codec, signers)\n\trequire.NoError(err)\n\terr = stx.SyntacticVerify(ctx)\n\trequire.ErrorIs(err, avax.ErrWrongNetworkID)\n\taddSubnetValidatorTx.NetworkID--\n\n\t// Case: Specifies primary network SubnetID\n\taddSubnetValidatorTx.SyntacticallyVerified = false\n\taddSubnetValidatorTx.Subnet = ids.Empty\n\tstx, err = NewSigned(addSubnetValidatorTx, Codec, signers)\n\trequire.NoError(err)\n\terr = stx.SyntacticVerify(ctx)\n\trequire.ErrorIs(err, errAddPrimaryNetworkValidator)\n\taddSubnetValidatorTx.Subnet = subnetID\n\n\t// Case: No weight\n\taddSubnetValidatorTx.SyntacticallyVerified = false\n\taddSubnetValidatorTx.Wght = 0\n\tstx, err = NewSigned(addSubnetValidatorTx, Codec, signers)\n\trequire.NoError(err)\n\terr = stx.SyntacticVerify(ctx)\n\trequire.ErrorIs(err, ErrWeightTooSmall)\n\taddSubnetValidatorTx.Wght = validatorWeight\n\n\t// Case: Subnet auth indices not unique\n\taddSubnetValidatorTx.SyntacticallyVerified = false\n\tinput := addSubnetValidatorTx.SubnetAuth.(*secp256k1fx.Input)\n\toldInput := *input\n\tinput.SigIndices[0] = input.SigIndices[1]\n\tstx, err = NewSigned(addSubnetValidatorTx, Codec, signers)\n\trequire.NoError(err)\n\terr = stx.SyntacticVerify(ctx)\n\trequire.ErrorIs(err, secp256k1fx.ErrInputIndicesNotSortedUnique)\n\t*input = oldInput\n\n\t// Case: adding to Primary Network\n\taddSubnetValidatorTx.SyntacticallyVerified = false\n\taddSubnetValidatorTx.Subnet = constants.PrimaryNetworkID\n\tstx, err = NewSigned(addSubnetValidatorTx, Codec, signers)\n\trequire.NoError(err)\n\terr = stx.SyntacticVerify(ctx)\n\trequire.ErrorIs(err, errAddPrimaryNetworkValidator)\n}" ]
[ "0.5781869", "0.55637103", "0.5547249", "0.5535955", "0.54065716", "0.5282466", "0.5256136", "0.52264816", "0.52080566", "0.51688087", "0.5165131", "0.515119", "0.51441544", "0.51045865", "0.50919193", "0.50727695", "0.5067553", "0.5067023", "0.50631016", "0.5046651", "0.5031744", "0.50280446", "0.5013043", "0.5006258", "0.49929523", "0.49872828", "0.49770844", "0.4974381", "0.49715698", "0.49715438", "0.49578178", "0.49532938", "0.49502218", "0.49457398", "0.49337897", "0.4925424", "0.48962188", "0.48928088", "0.48856565", "0.48711842", "0.48572496", "0.4854828", "0.48397845", "0.4834684", "0.4822901", "0.4813677", "0.48123562", "0.48073444", "0.4797569", "0.47948086", "0.4792957", "0.47891572", "0.4779684", "0.47779247", "0.47720486", "0.47662282", "0.47553113", "0.4755269", "0.47468296", "0.47450304", "0.4742991", "0.47414935", "0.4739217", "0.47240007", "0.47212556", "0.47173402", "0.4716894", "0.47149166", "0.4709888", "0.46995813", "0.46946788", "0.4688516", "0.46878275", "0.46814257", "0.46801755", "0.4661147", "0.46595994", "0.46585387", "0.4653603", "0.46528608", "0.46494713", "0.46494713", "0.46492007", "0.46477854", "0.46466404", "0.46461362", "0.4641911", "0.46414804", "0.46377766", "0.46377766", "0.4633351", "0.46311113", "0.46279502", "0.46265692", "0.46204448", "0.46200538", "0.46142402", "0.4612312", "0.46099955", "0.46093786" ]
0.56673485
1
IsIngressValid indicates an expected call of IsIngressValid
func (mr *MockValidaterMockRecorder) IsIngressValid(ingress interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIngressValid", reflect.TypeOf((*MockValidater)(nil).IsIngressValid), ingress) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *k8sStore) ingressIsValid(ing *networkingv1.Ingress) bool {\n\tvar endpointKey string\n\tif ing.Spec.DefaultBackend != nil { // stream\n\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, ing.Spec.DefaultBackend.Service.Name)\n\t} else { // http\n\tLoop:\n\t\tfor _, rule := range ing.Spec.Rules {\n\t\t\tfor _, path := range rule.IngressRuleValue.HTTP.Paths {\n\t\t\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, path.Backend.Service.Name)\n\t\t\t\tif endpointKey != \"\" {\n\t\t\t\t\tbreak Loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\titem, exists, err := s.listers.Endpoint.GetByKey(endpointKey)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can not get endpoint by key(%s): %v\", endpointKey, err)\n\t\treturn false\n\t}\n\tif !exists {\n\t\tlogrus.Debugf(\"Endpoint %s does not exist.\", endpointKey)\n\t\treturn false\n\t}\n\tendpoint, ok := item.(*corev1.Endpoints)\n\tif !ok {\n\t\tlogrus.Errorf(\"Cant not convert %v to %v\", reflect.TypeOf(item), reflect.TypeOf(endpoint))\n\t\treturn false\n\t}\n\tif len(endpoint.Subsets) == 0 {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\tif !hasReadyAddresses(endpoint) {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (m *MockValidater) IsIngressValid(ingress *v1.Ingress) (bool, string) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsIngressValid\", ingress)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(string)\n\treturn ret0, ret1\n}", "func validIngress() *extensions.Ingress {\n\treturn newIngress(map[string]utils.FakeIngressRuleValueMap{\n\t\t\"foo.bar.com\": testPathMap,\n\t})\n}", "func (r *Network) Ingress() pulumi.BoolOutput {\n\treturn (pulumi.BoolOutput)(r.s.State[\"ingress\"])\n}", "func IsIngressAvailable(ingress *networkingv1.Ingress) bool {\n\t// Ingress is ready if it has at least one endpoint\n\tendpoints := ingress.Status.LoadBalancer.Ingress\n\treturn len(endpoints) > 0\n}", "func (k Key) IsIngress() bool {\n\treturn k.TrafficDirection == trafficdirection.Ingress.Uint8()\n}", "func IsIngressAvailable(ingress *extensionsv1beta1.Ingress) bool {\n\t// Ingress is ready if it has at least one endpoint\n\tendpoints := ingress.Status.LoadBalancer.Ingress\n\treturn len(endpoints) > 0\n}", "func (na *cnmNetworkAllocator) IsVIPOnIngressNetwork(vip *api.Endpoint_VirtualIP) bool {\n\tif vip == nil {\n\t\treturn false\n\t}\n\n\tlocalNet := na.getNetwork(vip.NetworkID)\n\tif localNet != nil && localNet.nw != nil {\n\t\treturn networkallocator.IsIngressNetwork(localNet.nw)\n\t}\n\treturn false\n}", "func Validate(ingress *networkingv1.Ingress) error {\n\tif supportsTLS(ingress) && containsWildcard(ingress.Spec.TLS[0].Hosts[0]) {\n\t\treturn errors.Errorf(\"ingress TLS host %q contains wildcards\", ingress.Spec.TLS[0].Hosts[0])\n\t}\n\n\tif len(ingress.Spec.Rules) == 0 {\n\t\treturn errors.New(\"ingress does not have any rules\")\n\t}\n\n\tif containsWildcard(ingress.Spec.Rules[0].Host) {\n\t\treturn errors.Errorf(\"ingress host %q contains wildcards\", ingress.Spec.Rules[0].Host)\n\t}\n\n\treturn nil\n}", "func IsAdmitted(route *routev1.Route) bool {\n\tfor _, ingress := range route.Status.Ingress {\n\t\tif isIngressAdmitted(ingress) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (k Key) IsEgress() bool {\n\treturn k.TrafficDirection == trafficdirection.Egress.Uint8()\n}", "func checkIfSecurityGroupAllowsEgressForIPandPort(securityGroupFrom types.SecurityGroup, securityGroupToID string, port int32, ipDestination net.IP) *Check {\n\tlog.Debugf(\"Checking security group egress - %s\\n\", *securityGroupFrom.GroupId)\n\tfor _, egress := range securityGroupFrom.IpPermissionsEgress {\n\t\tif port >= egress.FromPort && port <= egress.ToPort {\n\t\t\tlog.Debugf(\"found port opening %s\", toStringIPPermission(egress))\n\t\t\tif len(egress.Ipv6Ranges) > 0 {\n\t\t\t\treturn &Check{\n\t\t\t\t\tIsPassing: false,\n\t\t\t\t\tReason: \"IPV6 is not supported yet\",\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(egress.PrefixListIds) > 0 {\n\t\t\t\treturn &Check{\n\t\t\t\t\tIsPassing: false,\n\t\t\t\t\tReason: \"PrefixListIds are not supported yet\",\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// User ids cover sestinations like security group\n\t\t\tlog.Debugf(\"user ids %d\", len(egress.UserIdGroupPairs))\n\t\t\tif len(egress.UserIdGroupPairs) > 0 {\n\t\t\t\tfor _, userIDGroup := range egress.UserIdGroupPairs {\n\t\t\t\t\tlog.Debugf(\"group id %s\", *userIDGroup.GroupId)\n\t\t\t\t\t// check if this group id is security group\n\t\t\t\t\tif strings.HasPrefix(*userIDGroup.GroupId, \"sg-\") {\n\t\t\t\t\t\tif strings.EqualFold(*userIDGroup.GroupId, securityGroupToID) {\n\t\t\t\t\t\t\treturn &Check{\n\t\t\t\t\t\t\t\tIsPassing: true,\n\t\t\t\t\t\t\t\tReason: fmt.Sprintf(\"found outbound rule pointing tu security group - %s\", *userIDGroup.GroupId),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn &Check{\n\t\t\t\t\t\t\tIsPassing: false,\n\t\t\t\t\t\t\tReason: fmt.Sprintf(\"this destination is not supported yet - userIDGroup %s\", *userIDGroup.GroupId),\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// IP ranges cover only hardcoded cidr values\n\t\t\tlog.Debugf(\"ipranges ipv4 %d\", len(egress.IpRanges))\n\t\t\tfor _, ipRange := range egress.IpRanges {\n\t\t\t\tlog.Debugf(\"Checking if security group with '%s' can handle '%s'\", *ipRange.CidrIp, ipDestination)\n\t\t\t\t_, cidr, err := net.ParseCIDR(*ipRange.CidrIp)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"%s\", err)\n\t\t\t\t}\n\n\t\t\t\tif cidr.Contains(ipDestination) {\n\t\t\t\t\treturn &Check{\n\t\t\t\t\t\tIsPassing: true,\n\t\t\t\t\t\tReason: fmt.Sprintf(\"found outbound rule pointing at ipv4 cidr range %s\", *ipRange.CidrIp),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &Check{\n\t\tIsPassing: false,\n\t\tReason: \"source outbound security group is not allowing this traffic\",\n\t}\n}", "func IsIPNetValid(nw *net.IPNet) bool {\n\treturn nw.String() != \"0.0.0.0/0\"\n}", "func ValidateIngress(ingress *extensions.Ingress) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath(\"spec\"))...)\n\treturn allErrs\n}", "func validateEgressGateway(ctx context.Context, cli client.Client, egw *operatorv1.EgressGateway) error {\n\tnativeIP := operatorv1.NativeIPDisabled\n\tif egw.Spec.AWS != nil && egw.Spec.AWS.NativeIP != nil {\n\t\tnativeIP = *egw.Spec.AWS.NativeIP\n\t}\n\n\t// Validate IPPools specified.\n\t// If name is specified, check IPPool exists.\n\t// If CIDR is specified, check if CIDR matches with any IPPool.\n\t// If Aws.NativeIP is enabled, check if the IPPool is backed by aws-subnet ID.\n\tif len(egw.Spec.IPPools) == 0 {\n\t\treturn fmt.Errorf(\"At least one IPPool must be specified\")\n\t}\n\n\tfor _, ippool := range egw.Spec.IPPools {\n\t\terr := validateIPPool(ctx, cli, ippool, nativeIP)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, externalNetwork := range egw.Spec.ExternalNetworks {\n\t\terr := validateExternalNetwork(ctx, cli, externalNetwork)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Check if ElasticIPs are specified only if NativeIP is enabled.\n\tif egw.Spec.AWS != nil {\n\t\tif len(egw.Spec.AWS.ElasticIPs) > 0 && (*egw.Spec.AWS.NativeIP == operatorv1.NativeIPDisabled) {\n\t\t\treturn fmt.Errorf(\"NativeIP must be enabled when elastic IPs are used\")\n\t\t}\n\t}\n\n\t// Check if neither ICMPProbe nor HTTPProbe is configured.\n\tif egw.Spec.EgressGatewayFailureDetection != nil {\n\t\tif egw.Spec.EgressGatewayFailureDetection.ICMPProbe == nil &&\n\t\t\tegw.Spec.EgressGatewayFailureDetection.HTTPProbe == nil {\n\t\t\treturn fmt.Errorf(\"Either ICMP or HTTP probe must be configured\")\n\t\t}\n\t\t// Check if ICMP and HTTP probe timeout is greater than interval.\n\t\tif egw.Spec.EgressGatewayFailureDetection.ICMPProbe != nil {\n\t\t\tif *egw.Spec.EgressGatewayFailureDetection.ICMPProbe.TimeoutSeconds <\n\t\t\t\t*egw.Spec.EgressGatewayFailureDetection.ICMPProbe.IntervalSeconds {\n\t\t\t\treturn fmt.Errorf(\"ICMP probe timeout must be greater than interval\")\n\t\t\t}\n\t\t}\n\t\tif egw.Spec.EgressGatewayFailureDetection.HTTPProbe != nil {\n\t\t\tif *egw.Spec.EgressGatewayFailureDetection.HTTPProbe.TimeoutSeconds <\n\t\t\t\t*egw.Spec.EgressGatewayFailureDetection.HTTPProbe.IntervalSeconds {\n\t\t\t\treturn fmt.Errorf(\"HTTP probe timeout must be greater than interval\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func ValidateIngressSpec(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\t// TODO: Is a default backend mandatory?\n\tif spec.Backend != nil {\n\t\tallErrs = append(allErrs, validateIngressBackend(spec.Backend, fldPath.Child(\"backend\"))...)\n\t} else if len(spec.Rules) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, spec.Rules, \"either `backend` or `rules` must be specified\"))\n\t}\n\tif len(spec.Rules) > 0 {\n\t\tallErrs = append(allErrs, validateIngressRules(spec.Rules, fldPath.Child(\"rules\"))...)\n\t}\n\tif len(spec.TLS) > 0 {\n\t\tallErrs = append(allErrs, validateIngressTLS(spec, fldPath.Child(\"tls\"))...)\n\t}\n\treturn allErrs\n}", "func (s *k8sStore) checkIngress(ing *networkingv1.Ingress) bool {\n\ti, err := l4.NewParser(s).Parse(ing)\n\tif err != nil {\n\t\tlogrus.Warningf(\"Uxpected error with ingress: %v\", err)\n\t\treturn false\n\t}\n\n\tcfg := i.(*l4.Config)\n\tif cfg.L4Enable {\n\t\t_, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:%d\", cfg.L4Host, cfg.L4Port))\n\t\tif err == nil {\n\t\t\tlogrus.Warningf(\"%s, in Ingress(%v), is already in use.\",\n\t\t\t\tfmt.Sprintf(\"%s:%d\", cfg.L4Host, cfg.L4Port), ing)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\treturn true\n}", "func backingSvcIsValid(backing v1alpha1.BackingService) bool {\n\tif !reflect.DeepEqual(backing.Preconfigured, v1alpha1.PreconfiguredBackingService{}) {\n\t\treturn true\n\t} else {\n\t\treturn backing.Name != \"\" && !reflect.DeepEqual(backing.Resources, []v1alpha1.BackingServiceResource{})\n\t}\n}", "func checkIfSecurityGroupAllowsIngressForIPandPort(securityGroupTo types.SecurityGroup, securityGroupFromID string, port int32, ipFrom net.IP) *Check {\n\tlog.Debugf(\"Checking security group ingress - %s\\n\", *securityGroupTo.GroupId)\n\tfor _, ingress := range securityGroupTo.IpPermissions {\n\t\tif port >= ingress.FromPort && port <= ingress.ToPort {\n\t\t\tlog.Debugf(\"found port opening %s\", toStringIPPermission(ingress))\n\t\t\tif len(ingress.Ipv6Ranges) > 0 {\n\t\t\t\treturn &Check{\n\t\t\t\t\tIsPassing: false,\n\t\t\t\t\tReason: \"IPV6 is not supported yet\",\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(ingress.PrefixListIds) > 0 {\n\t\t\t\treturn &Check{\n\t\t\t\t\tIsPassing: false,\n\t\t\t\t\tReason: \"PrefixListIds are not supported yet\",\n\t\t\t\t}\n\t\t\t}\n\t\t\t// User ids cover sestinations like security group\n\t\t\tif len(ingress.UserIdGroupPairs) > 0 {\n\t\t\t\tlog.Debugf(\"found %d security groups matching this port\", len(ingress.UserIdGroupPairs))\n\t\t\t\tfor _, userIDGroup := range ingress.UserIdGroupPairs {\n\t\t\t\t\tlog.Debugf(\"checking if security group id %s matches\", *userIDGroup.GroupId)\n\t\t\t\t\t// check if this group id is security group\n\t\t\t\t\tif strings.HasPrefix(*userIDGroup.GroupId, \"sg-\") {\n\t\t\t\t\t\tif strings.EqualFold(*userIDGroup.GroupId, securityGroupFromID) {\n\t\t\t\t\t\t\treturn &Check{\n\t\t\t\t\t\t\t\tIsPassing: true,\n\t\t\t\t\t\t\t\tReason: fmt.Sprintf(\"found inbound rule pointing to security group - %s\", *userIDGroup.GroupId),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn &Check{\n\t\t\t\t\t\t\tIsPassing: false,\n\t\t\t\t\t\t\tReason: fmt.Sprintf(\"this source is not supported yet - userIDGroup %s\", *userIDGroup.GroupId),\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// IP ranges cover only hardcoded cidr values\n\t\t\tlog.Debugf(\"ipranges ipv4 %d\", len(ingress.IpRanges))\n\t\t\tfor _, ipRange := range ingress.IpRanges {\n\t\t\t\tlog.Debugf(\"Checking if security group with '%s' can handle '%s'\", *ipRange.CidrIp, ipFrom)\n\t\t\t\t_, cidr, err := net.ParseCIDR(*ipRange.CidrIp)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"%s\", err)\n\t\t\t\t}\n\n\t\t\t\tif cidr.Contains(ipFrom) {\n\t\t\t\t\treturn &Check{\n\t\t\t\t\t\tIsPassing: true,\n\t\t\t\t\t\tReason: fmt.Sprintf(\"found inbound rule pointing at ipv4 cidr range %s\", *ipRange.CidrIp),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn &Check{\n\t\tIsPassing: false,\n\t\tReason: \"destination inbound security group is not allowing this traffic\",\n\t}\n}", "func (options *ConformanceTestOptions) ShouldTestIngressOfType(t string) bool {\n\treturn indexOf(options.TestCase.Ingress.IngressConfig.Controllers, t) > -1\n}", "func (options *ConformanceTestOptions) SkipIngress() bool {\n\treturn options.TestCase.Ingress.Skip\n}", "func (e BadRequest) IsBadRequest() {}", "func (mr *MockIAssistantMockRecorder) GslbIngressExposedIPs(gslb interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GslbIngressExposedIPs\", reflect.TypeOf((*MockIAssistant)(nil).GslbIngressExposedIPs), gslb)\n}", "func isCatchAllIngress(spec networking.IngressSpec) bool {\n\treturn spec.DefaultBackend != nil && len(spec.Rules) == 0\n}", "func (mr *MockValidaterMockRecorder) CheckNoConflictsInIngress(ingress interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CheckNoConflictsInIngress\", reflect.TypeOf((*MockValidater)(nil).CheckNoConflictsInIngress), ingress)\n}", "func (o *FiltersSecurityGroup) HasInboundRuleFromPortRanges() bool {\n\tif o != nil && o.InboundRuleFromPortRanges != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isValidRoute(dst *net.IPNet, gw net.IP) bool {\n\tif dst == nil || gw == nil {\n\t\treturn false\n\t}\n\tif gw.IsUnspecified() {\n\t\treturn false\n\t}\n\treturn true\n}", "func isRouteHSTSAllowed(ingress *configv1.Ingress, newRoute *routeapi.Route, namespace *corev1.Namespace) error {\n\trequirements := ingress.Spec.RequiredHSTSPolicies\n\tfor _, requirement := range requirements {\n\t\t// Check if the required namespaceSelector (if any) and the domainPattern match\n\t\tif matches, err := requiredNamespaceDomainMatchesRoute(requirement, newRoute, namespace); err != nil {\n\t\t\treturn err\n\t\t} else if !matches {\n\t\t\t// If one of either the namespaceSelector or domain didn't match, we will continue to look\n\t\t\tcontinue\n\t\t}\n\n\t\trouteHSTS, err := hstsConfigFromRoute(newRoute)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If there is no annotation but there needs to be one, return error\n\t\tif routeHSTS != nil {\n\t\t\tif err = routeHSTS.meetsRequirements(requirement); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// Validation only checks the first matching required HSTS rule.\n\t\treturn nil\n\t}\n\n\t// None of the requirements matched this route's domain/namespace, it is automatically allowed\n\treturn nil\n}", "func getIngressFilteringEnabled(bc *intv1alpha1.BrokerCell) bool {\n\tif val, ok := bc.GetAnnotations()[resources.IngressFilteringEnabledAnnotationKey]; ok {\n\t\treturn val == \"true\"\n\t}\n\n\treturn false\n}", "func (m *IngressVip) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateClusterID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIP(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVerification(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *FiltersSecurityGroup) HasInboundRuleIpRanges() bool {\n\tif o != nil && o.InboundRuleIpRanges != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (ctl *Ctl) SpecIsValid() (bool, error) {\n\treturn true, nil\n}", "func (ctl *Ctl) SpecIsValid() (bool, error) {\n\treturn true, nil\n}", "func (m *MockIAssistant) GslbIngressExposedIPs(gslb *v1beta1.Gslb) ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GslbIngressExposedIPs\", gslb)\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Ingress(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Ingress request to simple-service successful!\")\n}", "func (m *IoK8sAPINetworkingV1IngressSpec) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDefaultBackend(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTLS(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func routeStatusAdmitted(route routev1.Route, ingressControllerName string) bool {\n\t// Iterate through the related Ingress Controllers.\n\tfor _, ingress := range route.Status.Ingress {\n\t\t// Check if the RouterName matches the name of the Ingress Controller.\n\t\tif ingress.RouterName == ingressControllerName {\n\t\t\t// Check if the Route was admitted by the Ingress Controller.\n\t\t\tfor _, cond := range ingress.Conditions {\n\t\t\t\tif cond.Type == routev1.RouteAdmitted && cond.Status == corev1.ConditionTrue {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}", "func (s *StatusSyncer) shouldTargetIngress(ingress *knetworking.Ingress) bool {\n\tvar ingressClass *knetworking.IngressClass\n\tif ingress.Spec.IngressClassName != nil {\n\t\tingressClass = s.ingressClasses.Get(*ingress.Spec.IngressClassName, \"\")\n\t}\n\treturn shouldProcessIngressWithClass(s.meshConfig.Mesh(), ingress, ingressClass)\n}", "func (s SubnetTemplateSpec) IsNatGatewayEnabled() bool {\n\treturn s.NatGateway.Name != \"\"\n}", "func (o *CreateInternalNetworkIPRangeBadRequest) IsSuccess() bool {\n\treturn false\n}", "func isAddressInIngress(ingress []v1.LoadBalancerIngress, address string) (int, bool) {\n\tfor i, addr := range ingress {\n\t\tif strings.Compare(addr.IP, address) == 0 {\n\t\t\treturn i, true\n\t\t}\n\t}\n\treturn 0, false\n}", "func isIPValid(ip string) (bool, int) {\n\tpip := net.ParseIP(ip)\n\tif pip.To4() == nil {\n\t\treturn false, 0\n\t}\n\tfor _, reservedIP := range gaw.ReservedIPs {\n\t\t_, subnet, err := net.ParseCIDR(reservedIP)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif subnet.Contains(pip) {\n\t\t\treturn false, -1\n\t\t}\n\t}\n\treturn true, 1\n}", "func (o *ResourceLimits) HasReservedIpsOnContract() bool {\n\tif o != nil && o.ReservedIpsOnContract != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsForbidden(ip string) bool {\n\tforbidden := map[string]bool{\n\t\t\"\": true,\n\t\t\"0.0.0.0\": true,\n\t\t\"::\": true,\n\t\t\"0:0:0:0:0:0:0:0\": true,\n\t}\n\treturn forbidden[ip]\n}", "func (o *CreateInternalNetworkIPRangeForbidden) IsSuccess() bool {\n\treturn false\n}", "func IngressEndpoint() string {\n\treturn grpcCfg.IngressEndpoint\n}", "func (rs *RouteStatus) MarkIngressNotConfigured() {\n\trouteCondSet.Manage(rs).MarkUnknown(RouteConditionIngressReady,\n\t\t\"IngressNotConfigured\", \"Ingress has not yet been reconciled.\")\n}", "func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath(\"status\", \"loadBalancer\"))...)\n\treturn allErrs\n}", "func (gpm *GenericPoolManager) IsValid(fsvc *fscache.FuncSvc) bool {\n\tfor _, obj := range fsvc.KubernetesObjects {\n\t\tif strings.ToLower(obj.Kind) == \"pod\" {\n\t\t\tpod, err := gpm.kubernetesClient.CoreV1().Pods(obj.Namespace).Get(obj.Name, metav1.GetOptions{})\n\t\t\tif err == nil && utils.IsReadyPod(pod) {\n\t\t\t\t// Normally, the address format is http://[pod-ip]:[port], however, if the\n\t\t\t\t// Istio is enabled the address format changes to http://[svc-name]:[port].\n\t\t\t\t// So if the Istio is enabled and pod is in ready state, we return true directly;\n\t\t\t\t// Otherwise, we need to ensure that the address contains pod ip.\n\t\t\t\tif gpm.enableIstio ||\n\t\t\t\t\t(!gpm.enableIstio && strings.Contains(fsvc.Address, pod.Status.PodIP)) {\n\t\t\t\t\tgpm.logger.Debug(\"valid address\",\n\t\t\t\t\t\tzap.String(\"address\", fsvc.Address),\n\t\t\t\t\t\tzap.Any(\"function\", fsvc.Function),\n\t\t\t\t\t\tzap.String(\"executor\", string(fsvc.Executor)),\n\t\t\t\t\t)\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (p Provider) IsRequestValid(ratesRequest model.RatesRequest) (bool, error) {\n\treturn p.BaseProvider.IsRequestValid(p, ratesRequest)\n}", "func (c *configuration) Ingress(clientSet ClientSet) *Ingress {\n\tif clientSet != nil {\n\t\treturn NewIngress(clientSet)\n\t}\n\treturn nil\n\n}", "func (t *TestSpec) IsPolicyInvalid() bool {\n\tvar exclude []string\n\texclude = append(t.l3.exclude, t.l4.exclude...)\n\texclude = append(exclude, t.l7.exclude...)\n\n\tfor _, value := range exclude {\n\t\tif strings.Contains(t.String(), value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ValidateIngressUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath(\"spec\"))...)\n\treturn allErrs\n}", "func TestIngressNoUpdate(t *testing.T) {\n\tingrNoUpdate := &networkingv1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"red-ns\",\n\t\t\tName: \"testingr-noupdate\",\n\t\t},\n\t\tSpec: networkingv1.IngressSpec{\n\t\t\tDefaultBackend: &networkingv1.IngressBackend{\n\t\t\t\tService: &networkingv1.IngressServiceBackend{\n\t\t\t\t\tName: \"testsvc\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err := kubeClient.NetworkingV1().Ingresses(\"red-ns\").Create(context.TODO(), ingrNoUpdate, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in adding Ingress: %v\", err)\n\t}\n\twaitAndverify(t, \"Ingress/red-ns/testingr-noupdate\")\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"2\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP: \"2.3.4.5\",\n\t\t\t\t\tHostname: \"testingr2.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"3\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\twaitAndverify(t, \"\")\n}", "func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\t// All backends must reference a single local service by name, and a single service port by name or number.\n\tif len(backend.ServiceName) == 0 {\n\t\treturn append(allErrs, field.Required(fldPath.Child(\"serviceName\"), \"\"))\n\t} else {\n\t\tfor _, msg := range apivalidation.ValidateServiceName(backend.ServiceName, false) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"serviceName\"), backend.ServiceName, msg))\n\t\t}\n\t}\n\tallErrs = append(allErrs, apivalidation.ValidatePortNumOrName(backend.ServicePort, fldPath.Child(\"servicePort\"))...)\n\treturn allErrs\n}", "func isIPValid(ip string) (bool, int) {\n\tpip := net.ParseIP(ip)\n\tif pip.To4() == nil {\n\t\treturn false, 0\n\t}\n\tfor _, reservedIP := range reservedIPs {\n\t\t_, subnet, err := net.ParseCIDR(reservedIP)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif subnet.Contains(pip) {\n\t\t\treturn false, -1\n\t\t}\n\t}\n\treturn true, 1\n}", "func (o *FiltersSecurityGroup) HasOutboundRuleIpRanges() bool {\n\tif o != nil && o.OutboundRuleIpRanges != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o ArgoCDSpecGrafanaOutput) Ingress() ArgoCDSpecGrafanaIngressPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecGrafana) *ArgoCDSpecGrafanaIngress { return v.Ingress }).(ArgoCDSpecGrafanaIngressPtrOutput)\n}", "func (o *FiltersSecurityGroup) HasOutboundRuleFromPortRanges() bool {\n\tif o != nil && o.OutboundRuleFromPortRanges != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *FiltersSecurityGroup) HasInboundRuleToPortRanges() bool {\n\tif o != nil && o.InboundRuleToPortRanges != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m MigProfile) AssertValid() error {\n\tmatch, err := regexp.MatchString(`^[0-9]+g\\.[0-9]+gb$`, string(m))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error running regex: %v\", err)\n\t}\n\tif match {\n\t\treturn nil\n\t}\n\n\tmatch, err = regexp.MatchString(`^[0-9]+c\\.[0-9]+g\\.[0-9]+gb$`, string(m))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error running regex: %v\", err)\n\t}\n\tif match {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"no match for format %%dc.%%dg.%%dgb or %%dg.%%dgb\")\n}", "func isIngressEqual(a []v1.LoadBalancerIngress, b []v1.LoadBalancerIngress) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif _, found := isAddressInIngress(b, a[i].IP); !found {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := 0; i < len(b); i++ {\n\t\tif _, found := isAddressInIngress(a, b[i].IP); !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (o ArgoCDSpecServerGrpcOutput) Ingress() ArgoCDSpecServerGrpcIngressPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecServerGrpc) *ArgoCDSpecServerGrpcIngress { return v.Ingress }).(ArgoCDSpecServerGrpcIngressPtrOutput)\n}", "func (o *FiltersNet) HasIpRanges() bool {\n\tif o != nil && o.IpRanges != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsIngressTLSEnabled(ingress *extsv1beta1.Ingress) bool {\n\treturn len(ingress.Spec.TLS) > 0\n}", "func (iface *Interface) IsValid() bool {\n\tiface.lock.RLock()\n\tdefer iface.lock.RUnlock()\n\n\t// ignore tunnel interface\n\tif iface.Type == IfTypeTunnel {\n\t\treturn true\n\t}\n\n\tif len(iface.Subinterfaces) >= 0 {\n\t\tfor _, subiface := range iface.Subinterfaces {\n\t\t\tif subiface.IsValid() == false {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (a *apiServer) validateEgress(pipelineName string, egress *pps.Egress) error {\n\tif egress == nil {\n\t\treturn nil\n\t}\n\treturn pfsServer.ValidateSQLDatabaseEgress(egress.GetSqlDatabase())\n}", "func (o ArgoCDSpecGrafanaPtrOutput) Ingress() ArgoCDSpecGrafanaIngressPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecGrafana) *ArgoCDSpecGrafanaIngress {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Ingress\n\t}).(ArgoCDSpecGrafanaIngressPtrOutput)\n}", "func IsBadRequest(err error) bool {\n\ttype badRequester interface {\n\t\tBadRequest() bool\n\t}\n\treturn isErrorPredicate(err, func(err error) bool {\n\t\tbadrequest, ok := err.(badRequester)\n\t\treturn ok && badrequest.BadRequest()\n\t})\n}", "func (o ArgoCDSpecServerGrpcPtrOutput) Ingress() ArgoCDSpecServerGrpcIngressPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecServerGrpc) *ArgoCDSpecServerGrpcIngress {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Ingress\n\t}).(ArgoCDSpecServerGrpcIngressPtrOutput)\n}", "func (v ValidatorFunc) HasConstraints(_ ParamIn) bool {\n\treturn true\n}", "func (az *AzK8sConstraintTemplate) HasPrivilegedAccessRestriction() (*bool, error) {\n\treturn az.hasConstraint(azK8sContainerNoPrivilege)\n}", "func (b Backend) Valid(tlsPassthrough bool) error {\n\tif tlsPassthrough {\n\t\t_, _, err := asIpPort(string(b))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to parse backend %s with error: %w\", b, err)\n\t\t}\n\n\t} else {\n\t\tu, err := url.Parse(string(b))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to parse backend with error: %w\", err)\n\t\t}\n\n\t\tif u.Scheme != \"http\" {\n\t\t\treturn fmt.Errorf(\"scheme expected to be http\")\n\t\t}\n\n\t\tip := net.ParseIP(u.Hostname())\n\t\tif len(ip) == 0 || ip.IsLoopback() {\n\t\t\treturn fmt.Errorf(\"invalid ip address in backend: %s\", u.Hostname())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *MockValidater) CheckNoConflictsInIngress(ingress *v1.Ingress) (bool, string) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckNoConflictsInIngress\", ingress)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(string)\n\treturn ret0, ret1\n}", "func parseEgressConfig(config *netConfigJSON, netConfig *NetConfig) error {\n\tif config.EgressConfig == nil {\n\t\treturn nil\n\t}\n\n\tif (config.EgressConfig.ListenerPort == 0 && config.EgressConfig.RedirectIP == nil) ||\n\t\t(config.EgressConfig.ListenerPort != 0 && config.EgressConfig.RedirectIP != nil) {\n\t\treturn fmt.Errorf(\"exactly one of ListenerPort and RedirectIP must be \" +\n\t\t\t\"specified in Egress\")\n\t}\n\tredirectMode := RedirectMode(config.EgressConfig.RedirectMode)\n\tif err := validateRedirectMode(redirectMode, config.EgressConfig); err != nil {\n\t\treturn err\n\t}\n\n\tif config.EgressConfig.ListenerPort != 0 {\n\t\t// Verify that the port is valid.\n\t\tif err := vpc.ValidatePortRange(config.EgressConfig.ListenerPort); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tredirectIP := config.EgressConfig.RedirectIP\n\tif redirectIP != nil {\n\t\tif (config.EnableIPv4 && redirectIP.IPv4 == \"\") ||\n\t\t\t(config.EnableIPv6 && redirectIP.IPv6 == \"\") {\n\t\t\treturn fmt.Errorf(\"missing required parameter: EgressConfig Redirect IP\")\n\t\t}\n\t\tif redirectIP.IPv4 != \"\" {\n\t\t\tif ip := net.ParseIP(redirectIP.IPv4); ip == nil {\n\t\t\t\treturn fmt.Errorf(\"invalid parameter: EgressConfig RedirectIP\")\n\t\t\t}\n\t\t\tnetConfig.EgressRedirectIPv4Addr = redirectIP.IPv4\n\t\t}\n\t\tif redirectIP.IPv6 != \"\" {\n\t\t\tif ip := net.ParseIP(redirectIP.IPv6); ip == nil {\n\t\t\t\treturn fmt.Errorf(\"invalid parameter: EgressConfig RedirectIP\")\n\t\t\t}\n\t\t\tnetConfig.EgressRedirectIPv6Addr = redirectIP.IPv6\n\t\t}\n\t}\n\n\t// Verify that the egress vip is a valid CIDR.\n\tegressVIPConfig := config.EgressConfig.VIP\n\tif egressVIPConfig == nil {\n\t\treturn fmt.Errorf(\"missing required parameter: EgressConfig VIP\")\n\t}\n\n\t// Verify that atleast one of the egress CIDRs are set.\n\tif egressVIPConfig.IPv4CIDR == \"\" && egressVIPConfig.IPv6CIDR == \"\" {\n\t\treturn fmt.Errorf(\"missing required parameter: EgressConfig VIP CIDR\")\n\t}\n\n\t// Verify that the CIDR is set for the respective IP version.\n\tif (config.EnableIPv4 && egressVIPConfig.IPv4CIDR == \"\") ||\n\t\t(config.EnableIPv6 && egressVIPConfig.IPv6CIDR == \"\") {\n\t\treturn fmt.Errorf(\"missing required parameter: EgressConfig VIP CIDR\")\n\t}\n\n\t// Verify the value of IPV4 CIDR.\n\tif egressVIPConfig.IPv4CIDR != \"\" {\n\t\tif ip, _, err := net.ParseCIDR(egressVIPConfig.IPv4CIDR); err != nil || ip.To4() == nil {\n\t\t\treturn fmt.Errorf(\"invalid parameter: EgressConfig IPv4 CIDR Address\")\n\t\t}\n\t}\n\t// Verify the value of IPV6 CIDR.\n\tif egressVIPConfig.IPv6CIDR != \"\" {\n\t\tif ip, _, err := net.ParseCIDR(egressVIPConfig.IPv6CIDR); err != nil || ip.To16() == nil {\n\t\t\treturn fmt.Errorf(\"invalid parameter: EgressConfig IPv6 CIDR Address\")\n\t\t}\n\t}\n\n\t// Populate Egress config.\n\tnetConfig.EgressRedirectMode = redirectMode\n\tnetConfig.EgressPort = config.EgressConfig.ListenerPort\n\tnetConfig.EgressIPv4CIDR = config.EgressConfig.VIP.IPv4CIDR\n\tnetConfig.EgressIPv6CIDR = config.EgressConfig.VIP.IPv6CIDR\n\n\treturn nil\n}", "func TestValid(t *testing.T) {\n\tinput := \"abcdefg\"\n\toutput := Invalid(input)\n\n\tif output != false {\n\t\tt.Error(\"Valid test failed\")\n\t}\n}", "func gatewayClassValid(gatewayClass *gatewayapi_v1alpha2.GatewayClass) bool {\n\tif gatewayClass == nil {\n\t\treturn false\n\t}\n\n\tfor _, cond := range gatewayClass.Status.Conditions {\n\t\tif cond.Type == string(gatewayapi_v1alpha2.GatewayClassConditionStatusAccepted) && cond.Status == metav1.ConditionTrue {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (v Validator) IsBonded() bool {\n\treturn v.GetStatus().Equal(sdk.Bonded)\n}", "func (v Validator) IsUnbonding() bool {\n\treturn v.GetStatus().Equal(sdk.Unbonding)\n}", "func TestValidateIATBHOriginatorStatusCode(t *testing.T) {\n\ttestValidateIATBHOriginatorStatusCode(t)\n}", "func IngressSpec() *IngressSpecApplyConfiguration {\n\treturn &IngressSpecApplyConfiguration{}\n}", "func hasChanges(old *networking.Ingress, current *networking.Ingress) bool {\n\told.Status.LoadBalancer.Ingress = current.Status.LoadBalancer.Ingress\n\told.ResourceVersion = current.ResourceVersion\n\treturn !reflect.DeepEqual(old, current)\n}", "func (b *bogusAddress) IsForNet(chainParams *chaincfg.Params) bool {\n\treturn true // why not?\n}", "func (o ArgoCDSpecServerOutput) Ingress() ArgoCDSpecServerIngressPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecServer) *ArgoCDSpecServerIngress { return v.Ingress }).(ArgoCDSpecServerIngressPtrOutput)\n}", "func (mr *MockSecurityGroupServiceIfaceMockRecorder) AuthorizeSecurityGroupIngress(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AuthorizeSecurityGroupIngress\", reflect.TypeOf((*MockSecurityGroupServiceIface)(nil).AuthorizeSecurityGroupIngress), p)\n}", "func IngressURI(route *routev1.Route, host string) (*url.URL, *routev1.RouteIngress, error) {\n\tscheme := \"http\"\n\tif route.Spec.TLS != nil {\n\t\tscheme = \"https\"\n\t}\n\n\tfor _, ingress := range route.Status.Ingress {\n\t\tif host == \"\" || host == ingress.Host {\n\t\t\turi := &url.URL{\n\t\t\t\tScheme: scheme,\n\t\t\t\tHost: ingress.Host,\n\t\t\t}\n\n\t\t\tfor _, condition := range ingress.Conditions {\n\t\t\t\tif condition.Type == routev1.RouteAdmitted && condition.Status == corev1.ConditionTrue {\n\t\t\t\t\treturn uri, &ingress, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif host == ingress.Host {\n\t\t\t\treturn uri, &ingress, fmt.Errorf(\"ingress for host %s in route %s in namespace %s is not admitted\", host, route.ObjectMeta.Name, route.ObjectMeta.Namespace)\n\t\t\t}\n\t\t}\n\t}\n\n\tif host == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"no admitted ingress for route %s in namespace %s\", route.ObjectMeta.Name, route.ObjectMeta.Namespace)\n\t}\n\treturn nil, nil, fmt.Errorf(\"no ingress for host %s in route %s in namespace %s\", host, route.ObjectMeta.Name, route.ObjectMeta.Namespace)\n}", "func (mr *MockRDSAPIMockRecorder) AuthorizeDBSecurityGroupIngress(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AuthorizeDBSecurityGroupIngress\", reflect.TypeOf((*MockRDSAPI)(nil).AuthorizeDBSecurityGroupIngress), arg0)\n}", "func (r IPRange) Valid() bool {\n\treturn !r.From.IsZero() && !r.To.IsZero() &&\n\t\tr.From.Is4() == r.To.Is4() &&\n\t\t!r.To.Less(r.From)\n}", "func (o ArgoCDSpecServerPtrOutput) Ingress() ArgoCDSpecServerIngressPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecServer) *ArgoCDSpecServerIngress {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Ingress\n\t}).(ArgoCDSpecServerIngressPtrOutput)\n}", "func IsBadRequest(err error) bool {\r\n\tvar t BadRequest\r\n\treturn errors.As(err, &t)\r\n}", "func (s *Scan) IsCIDR() bool {\n\t_, _, err := net.ParseCIDR(s.target)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (i identity) hgtValid() bool {\n\tif len(i.HGT) < 3 {\n\t\treturn false\n\t}\n\tswitch i.HGT[len(i.HGT)-2:] {\n\tcase \"cm\":\n\t\tn, err := strconv.Atoi(i.HGT[:len(i.HGT)-2])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn 150 <= n && n <= 193\n\tcase \"in\":\n\t\tn, err := strconv.Atoi(i.HGT[:len(i.HGT)-2])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn 59 <= n && n <= 76\n\t}\n\treturn false\n}", "func (kind Kind) IsValid() bool {\n\treturn kind&Invalid == 0\n}", "func (el *gameStruct) IsGate() bool {\n\treturn IsGate(el.icon)\n}", "func validateRedirectMode(redirectMode RedirectMode, egressConfig *egressConfigJSON) error {\n\tswitch redirectMode {\n\tcase NAT:\n\t\tif egressConfig.ListenerPort == 0 {\n\t\t\treturn fmt.Errorf(\"missing required parameter: Egress ListenerPort\")\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\tcase TPROXY:\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"invalid parameter: Egress RedirectMode\")\n}", "func (m *IngressSecurityRule) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIcmpOptions(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProtocol(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSource(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTCPOptions(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUDPOptions(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func parseIngressConfig(config *netConfigJSON, netConfig *NetConfig) error {\n\tingressListenerToInterceptPortMap := make(map[int]int)\n\tfor _, s := range config.IngressConfig {\n\t\t// verify that the ports are valid\n\t\tif err := vpc.ValidatePortRange(s.ListenerPort); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif s.InterceptPort != 0 {\n\t\t\tif err := vpc.ValidatePortRange(s.InterceptPort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tingressListenerToInterceptPortMap[s.ListenerPort] = s.InterceptPort\n\t\t}\n\t}\n\tnetConfig.IngressListenerToInterceptPortMap = ingressListenerToInterceptPortMap\n\treturn nil\n}", "func (e Error) IsInvalidFormat() bool {\n\treturn e.kind == invalidFormat\n}", "func (o *CreateInternalNetworkIPRangeBadRequest) IsRedirect() bool {\n\treturn false\n}", "func (mr *MockSecurityGroupServiceIfaceMockRecorder) RevokeSecurityGroupIngress(p interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RevokeSecurityGroupIngress\", reflect.TypeOf((*MockSecurityGroupServiceIface)(nil).RevokeSecurityGroupIngress), p)\n}" ]
[ "0.6530742", "0.6434519", "0.62388897", "0.60543406", "0.60028815", "0.59781194", "0.57483435", "0.5673793", "0.5654639", "0.55734754", "0.55105144", "0.5477696", "0.5467668", "0.5445896", "0.5425829", "0.5408939", "0.53802323", "0.5360881", "0.53574884", "0.53501254", "0.5311355", "0.5310651", "0.5268028", "0.52287906", "0.522121", "0.5214991", "0.52002096", "0.51926583", "0.5182552", "0.51686597", "0.51620114", "0.51119435", "0.51119435", "0.50761414", "0.50567263", "0.504613", "0.50428766", "0.5035383", "0.5035261", "0.49937925", "0.49867207", "0.4985664", "0.49721012", "0.49684432", "0.49557322", "0.4920306", "0.4920218", "0.49043843", "0.49020484", "0.48916474", "0.48854223", "0.4881915", "0.48765084", "0.48703113", "0.4868861", "0.48599884", "0.48532262", "0.48518667", "0.48483974", "0.48314467", "0.4819457", "0.48188964", "0.4815761", "0.48142603", "0.480822", "0.47914943", "0.47894275", "0.47803706", "0.4778534", "0.47780088", "0.4774066", "0.47556213", "0.47349837", "0.47340214", "0.4733691", "0.47297806", "0.47285753", "0.47252354", "0.47216776", "0.47199252", "0.47194448", "0.47072127", "0.469055", "0.4669906", "0.46671504", "0.46596035", "0.46573752", "0.46573532", "0.4649694", "0.4643636", "0.46391338", "0.46370634", "0.46191338", "0.46188933", "0.46061322", "0.46033046", "0.46023875", "0.45844617", "0.45802227", "0.45790905" ]
0.7394648
0
CheckNoConflictsInIngress mocks base method
func (m *MockValidater) CheckNoConflictsInIngress(ingress *v1.Ingress) (bool, string) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CheckNoConflictsInIngress", ingress) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(string) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestIngressNoUpdate(t *testing.T) {\n\tingrNoUpdate := &networkingv1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"red-ns\",\n\t\t\tName: \"testingr-noupdate\",\n\t\t},\n\t\tSpec: networkingv1.IngressSpec{\n\t\t\tDefaultBackend: &networkingv1.IngressBackend{\n\t\t\t\tService: &networkingv1.IngressServiceBackend{\n\t\t\t\t\tName: \"testsvc\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err := kubeClient.NetworkingV1().Ingresses(\"red-ns\").Create(context.TODO(), ingrNoUpdate, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in adding Ingress: %v\", err)\n\t}\n\twaitAndverify(t, \"Ingress/red-ns/testingr-noupdate\")\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"2\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP: \"2.3.4.5\",\n\t\t\t\t\tHostname: \"testingr2.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"3\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\twaitAndverify(t, \"\")\n}", "func verifyInternalIngressController(t *testing.T, name types.NamespacedName, hostname, address, image string) {\n\tkubeConfig, err := config.GetConfig()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get kube config: %v\", err)\n\t}\n\tclient, err := kubernetes.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create kube client: %v\", err)\n\t}\n\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\textraArgs := []string{\n\t\t\"--header\", \"HOST:\" + echoRoute.Spec.Host,\n\t\t\"-v\",\n\t\t\"--retry-delay\", \"20\",\n\t\t\"--max-time\", \"10\",\n\t}\n\tclientPodName := types.NamespacedName{Namespace: name.Namespace, Name: \"curl-\" + name.Name}\n\tclientPodSpec := buildCurlPod(clientPodName.Name, clientPodName.Namespace, image, address, echoRoute.Spec.Host, extraArgs...)\n\tclientPod := clientPodSpec.DeepCopy()\n\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t}\n\t}()\n\n\tvar curlPodLogs string\n\terr = wait.PollImmediate(10*time.Second, 10*time.Minute, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), clientPodName, clientPod); err != nil {\n\t\t\tt.Logf(\"error getting client pod %q: %v, retrying...\", clientPodName, err)\n\t\t\treturn false, nil\n\t\t}\n\t\t// First check if client curl pod is still starting or not running.\n\t\tif clientPod.Status.Phase == corev1.PodPending {\n\t\t\tt.Logf(\"waiting for client pod %q to start\", clientPodName)\n\t\t\treturn false, nil\n\t\t}\n\t\treadCloser, err := client.CoreV1().Pods(clientPod.Namespace).GetLogs(clientPod.Name, &corev1.PodLogOptions{\n\t\t\tContainer: \"curl\",\n\t\t\tFollow: false,\n\t\t}).Stream(context.TODO())\n\t\tif err != nil {\n\t\t\tt.Logf(\"failed to read output from pod %s: %v\", clientPod.Name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tscanner := bufio.NewScanner(readCloser)\n\t\tdefer func() {\n\t\t\tif err := readCloser.Close(); err != nil {\n\t\t\t\tt.Errorf(\"failed to close reader for pod %s: %v\", clientPod.Name, err)\n\t\t\t}\n\t\t}()\n\t\tcurlPodLogs = \"\"\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tcurlPodLogs += line + \"\\n\"\n\t\t\tif strings.Contains(line, \"HTTP/1.0 200 OK\") {\n\t\t\t\tt.Logf(\"verified connectivity with workload with address: %s with response %s\", address, line)\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\t// If failed or succeeded, the pod is stopped, but didn't provide us 200 response, let's try again.\n\t\tif clientPod.Status.Phase == corev1.PodFailed || clientPod.Status.Phase == corev1.PodSucceeded {\n\t\t\tt.Logf(\"client pod %q has stopped...restarting. Curl Pod Logs:\\n%s\", clientPodName, curlPodLogs)\n\t\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil && errors.IsNotFound(err) {\n\t\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\t// Wait for deletion to prevent a race condition. Use PollInfinite since we are already in a Poll.\n\t\t\twait.PollInfinite(5*time.Second, func() (bool, error) {\n\t\t\t\terr = kclient.Get(context.TODO(), clientPodName, clientPod)\n\t\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\t\tt.Logf(\"waiting for %q: to be deleted\", clientPodName)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t})\n\t\t\tclientPod = clientPodSpec.DeepCopy()\n\t\t\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\t\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with address: %s using internal curl client. Curl Pod Logs:\\n%s\", address, curlPodLogs)\n\t}\n}", "func TestRequestIPs(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.20\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.10.20\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.20'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tfirst := false\n\t\tsecond := false\n\t\tthird := false\n\n\t\tfor _, ingress := range svc.Status.LoadBalancer.Ingress {\n\t\t\tswitch ingress.IP {\n\t\t\tcase \"10.0.10.21\":\n\t\t\t\tfirst = true\n\t\t\tcase \"10.0.10.22\":\n\t\t\t\tsecond = true\n\t\t\tcase \"10.0.10.23\":\n\t\t\t\tthird = true\n\t\t\tdefault:\n\t\t\t\tt.Error(\"Unexpected ingress IP\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif !first {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.21'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !second {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.22'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !third {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.23'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tciliumSvcLBIPSAnnotation: \"10.0.10.22,10.0.10.23\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-c\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Reason != \"already_allocated\" {\n\t\t\tt.Error(\"Expected condition reason to be 'already_allocated'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// request an already allocated IP\n\tserviceC := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-c\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceCUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceC, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func verifyExternalIngressController(t *testing.T, name types.NamespacedName, hostname, address string) {\n\tt.Helper()\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\t// If we have a DNS as an external IP address, make sure we can resolve it before moving on.\n\t// This just limits the number of \"could not resolve host\" errors which can be confusing.\n\tif net.ParseIP(address) == nil {\n\t\tif err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {\n\t\t\t_, err := net.LookupIP(address)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"waiting for loadbalancer domain %s to resolve...\", address)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}); err != nil {\n\t\t\tt.Fatalf(\"loadbalancer domain %s was unable to resolve:\", address)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"http://%s\", address), nil)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build client request: %v\", err)\n\t}\n\t// we use HOST header to map to the domain associated on the ingresscontroller.\n\t// This ensures our http call is routed to the correct router.\n\treq.Host = hostname\n\n\thttpClient := http.Client{Timeout: 5 * time.Second}\n\terr = waitForHTTPClientCondition(t, &httpClient, req, 10*time.Second, 10*time.Minute, func(r *http.Response) bool {\n\t\tif r.StatusCode == http.StatusOK {\n\t\t\tt.Logf(\"verified connectivity with workload with req %v and response %v\", req.URL, r.StatusCode)\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with reqURL %s using external client: %v\", req.URL, err)\n\t}\n}", "func TestRemoveRequestedIP(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124,10.0.10.125\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 2 {\n\t\t\tt.Error(\"Expected service to receive exactly two ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Annotations = map[string]string{\n\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.123' to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.124' to be allocated\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.125\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.125' to be released\")\n\t}\n}", "func Test_desiredIngressClass(t *testing.T) {\n\tscope := \"Cluster\"\n\tmakeIngressClass := func(icName string, annotateAsDefault bool) *networkingv1.IngressClass {\n\t\tapiGroup := \"operator.openshift.io\"\n\t\tname := \"openshift-\" + icName\n\t\tclass := networkingv1.IngressClass{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: name,\n\t\t\t},\n\t\t\tSpec: networkingv1.IngressClassSpec{\n\t\t\t\tController: routev1.IngressToRouteIngressClassControllerName,\n\t\t\t\tParameters: &networkingv1.IngressClassParametersReference{\n\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\tKind: \"IngressController\",\n\t\t\t\t\tName: icName,\n\t\t\t\t\tScope: &scope,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif annotateAsDefault {\n\t\t\tclass.Annotations = map[string]string{\n\t\t\t\t\"ingressclass.kubernetes.io/is-default-class\": \"true\",\n\t\t\t}\n\t\t}\n\t\treturn &class\n\t}\n\ttestCases := []struct {\n\t\tdescription string\n\n\t\thaveIngressController bool\n\t\tingressControllerName string\n\t\tingressClasses []networkingv1.IngressClass\n\n\t\texpectWant bool\n\t\texpectIngressClass *networkingv1.IngressClass\n\t}{\n\t\t{\n\t\t\tdescription: \"no ingresscontroller\",\n\t\t\thaveIngressController: false,\n\t\t\texpectWant: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"custom ingresscontroller when no ingressclasses exist\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"custom\",\n\t\t\tingressClasses: []networkingv1.IngressClass{},\n\t\t\texpectWant: true,\n\t\t\texpectIngressClass: makeIngressClass(\"custom\", false),\n\t\t},\n\t\t{\n\t\t\tdescription: \"custom ingresscontroller when its ingressclass already exists\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"custom\",\n\t\t\tingressClasses: []networkingv1.IngressClass{\n\t\t\t\t*makeIngressClass(\"custom\", false),\n\t\t\t},\n\t\t\texpectWant: true,\n\t\t\texpectIngressClass: makeIngressClass(\"custom\", false),\n\t\t},\n\t\t{\n\t\t\tdescription: \"custom ingresscontroller when its ingressclass already exists and is annotated as default\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"custom\",\n\t\t\tingressClasses: []networkingv1.IngressClass{\n\t\t\t\t*makeIngressClass(\"custom\", true),\n\t\t\t},\n\t\t\texpectWant: true,\n\t\t\t// desired doesn't have the annotation, but that's all\n\t\t\t// right because the update logic ignores the user-set\n\t\t\t// annotation.\n\t\t\texpectIngressClass: makeIngressClass(\"custom\", false),\n\t\t},\n\t\t{\n\t\t\tdescription: \"default ingresscontroller when no default ingressclass exists\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"default\",\n\t\t\tingressClasses: []networkingv1.IngressClass{},\n\t\t\texpectWant: true,\n\t\t\t// TODO This test case expects the default ingressclass\n\t\t\t// not to be annotated as default because doing so\n\t\t\t// breaks \"[sig-network] IngressClass [Feature:Ingress]\n\t\t\t// should not set default value if no default\n\t\t\t// IngressClass\"; we need to fix that test and then\n\t\t\t// update this test case.\n\t\t\texpectIngressClass: makeIngressClass(\"default\", false),\n\t\t},\n\t\t{\n\t\t\tdescription: \"default ingresscontroller when some custom ingressclass exists and is annotated as default\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"default\",\n\t\t\tingressClasses: []networkingv1.IngressClass{\n\t\t\t\t*makeIngressClass(\"custom\", true),\n\t\t\t},\n\t\t\texpectWant: true,\n\t\t\texpectIngressClass: makeIngressClass(\"default\", false),\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.description, func(t *testing.T) {\n\t\t\twant, class := desiredIngressClass(tc.haveIngressController, tc.ingressControllerName, tc.ingressClasses)\n\t\t\tif want != tc.expectWant {\n\t\t\t\tt.Errorf(\"expected desiredIngressClass to return %t, got %t\", tc.expectWant, want)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(class, tc.expectIngressClass) {\n\t\t\t\tt.Errorf(\"expected desiredIngressClass to return %+v, got %+v\", tc.expectIngressClass, class)\n\t\t\t}\n\t\t})\n\t}\n}", "func testIngressOverlapCIDRBlocks() []*TestStep {\n\texceptBuilder1 := make([]string, 1)\n\texceptBuilder3 := make([]string, 1)\n\n\t// It does not matter if podCIDR of pod \"x/a\" and \"y/a\" are same\n\t// Build simple allowCIDR block for testing purposes.\n\tallowCIDRStr := podIPs[\"y/a\"] + \"/16\"\n\t_, allowCIDR, err := net.ParseCIDR(allowCIDRStr)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to parse CIDR string %s\", allowCIDRStr)\n\t}\n\tallowCIDRStr = allowCIDR.String()\n\n\texceptBuilder1[0] = podIPs[\"y/a\"] + \"/24\"\n\tvar exceptCIDR *net.IPNet\n\t_, exceptCIDR, err = net.ParseCIDR(exceptBuilder1[0])\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to parse CIDR string %s\", exceptBuilder1[0])\n\t}\n\texceptBuilder1[0] = exceptCIDR.String()\n\n\t// Build exceptCIDR to block just the pod \"y/a\"\n\texceptBuilder3[0] = podIPs[\"y/a\"] + \"/32\"\n\n\tallowWithExcept := func() *Reachability {\n\t\tr := NewReachability(allPods, true)\n\t\tr.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\tr.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\t// Adding connectivity for all the pods in allowCIDR - exceptCIDR\n\t\tfor eachPod := range podIPs {\n\t\t\tif allowCIDR.Contains(net.ParseIP(podIPs[eachPod])) && !exceptCIDR.Contains(net.ParseIP(podIPs[eachPod])) {\n\t\t\t\tr.Expect(Pod(eachPod), Pod(\"x/a\"), true)\n\t\t\t}\n\t\t}\n\t\treturn r\n\t}\n\toverlapAllow := func() *Reachability {\n\t\tr := NewReachability(allPods, true)\n\t\tr.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\tr.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\t// Adding connectivity for all the pods in the created allowCIDR\n\t\tfor eachPod := range podIPs {\n\t\t\tif allowCIDR.Contains(net.ParseIP(podIPs[eachPod])) {\n\t\t\t\tr.Expect(Pod(eachPod), Pod(\"x/a\"), true)\n\t\t\t}\n\t\t}\n\t\treturn r\n\t}\n\toverlapAllowAndExcept := func() *Reachability {\n\t\tr := NewReachability(allPods, true)\n\t\tr.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\tr.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\t// Adding connectivity for all pods in created allowCIDR except pod \"y/a\"\n\t\tfor eachPod := range podIPs {\n\t\t\tif allowCIDR.Contains(net.ParseIP(podIPs[eachPod])) {\n\t\t\t\tr.Expect(Pod(eachPod), Pod(\"x/a\"), true)\n\t\t\t}\n\t\t}\n\t\t//Override the connectivity to pod \"y/a\"\n\t\tr.Expect(Pod(\"y/a\"), Pod(\"x/a\"), false)\n\t\treturn r\n\t}\n\n\t// Policy-1 is added with allow and except CIDR\n\tpolicyName := \"policy-that-has-except-block\"\n\tbuilder1 := &NetworkPolicySpecBuilder{}\n\tbuilder1 = builder1.SetName(\"x\", policyName).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder1.SetTypeIngress()\n\tbuilder1.AddIngress(v1.ProtocolTCP, &p80, nil, &allowCIDRStr, exceptBuilder1, nil, nil, nil, nil)\n\tpolicy1 := builder1.Get()\n\t// Policy-2 is added with allow CIDR that is same as except CIDR of policy-1 (overlap policy)\n\tpolicyName2 := \"overlap-policy\"\n\tbuilder2 := &NetworkPolicySpecBuilder{}\n\tbuilder2 = builder2.SetName(\"x\", policyName2).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder2.SetTypeIngress()\n\tbuilder2.AddIngress(v1.ProtocolTCP, &p80, nil, &exceptBuilder1[0], nil, nil, nil, nil, nil)\n\tpolicy2 := builder2.Get()\n\t// Update policy-2 with exceptCIDR not allowing only pod \"y/a\"\n\tbuilder3 := &NetworkPolicySpecBuilder{}\n\t// by preserving the same name, this policy will also serve to test the 'updated policy with CIDRs\".\n\tbuilder3 = builder3.SetName(\"x\", policyName2).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder3.SetTypeIngress()\n\tbuilder3.AddIngress(v1.ProtocolTCP, &p80, nil, &exceptBuilder1[0], exceptBuilder3, nil, nil, nil, nil)\n\tpolicy3 := builder3.Get()\n\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Pods in built exceptCIDR -- not allowed\",\n\t\t\tallowWithExcept(), // exceptCIDR built from pod \"y/a\"\n\t\t\tpolicy1,\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"All pods in built allowCIDR -- allowed\",\n\t\t\toverlapAllow(), // allowCIDR is same as exceptCIDR from policy-1\n\t\t\tpolicy2,\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Only pod y/a -- not allowed\",\n\t\t\toverlapAllowAndExcept(), // exceptCIDR contains only IP of pod \"y/a\"\n\t\t\tpolicy3,\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t}\n}", "func TestMakeIngressRuleZeroPercentTarget(t *testing.T) {\n\ttargets := []traffic.RevisionTarget{{\n\t\tTrafficTarget: v1.TrafficTarget{\n\t\t\tConfigurationName: \"config\",\n\t\t\tRevisionName: \"revision-dolphin\",\n\t\t\tPercent: ptr.Int64(100),\n\t\t},\n\t}, {\n\t\tTrafficTarget: v1.TrafficTarget{\n\t\t\tConfigurationName: \"new-config\",\n\t\t\tRevisionName: \"new-revision-orca\",\n\t\t\tPercent: ptr.Int64(0),\n\t\t},\n\t}}\n\tdomains := sets.NewString(\"test.org\")\n\ttc := &traffic.Config{\n\t\tTargets: map[string]traffic.RevisionTargets{\n\t\t\ttraffic.DefaultTarget: targets,\n\t\t},\n\t}\n\tro := tc.BuildRollout()\n\trule := makeIngressRule(domains, ns,\n\t\tnetv1alpha1.IngressVisibilityExternalIP, targets, ro.RolloutsByTag(traffic.DefaultTarget), false /* internal encryption */)\n\texpected := netv1alpha1.IngressRule{\n\t\tHosts: []string{\"test.org\"},\n\t\tHTTP: &netv1alpha1.HTTPIngressRuleValue{\n\t\t\tPaths: []netv1alpha1.HTTPIngressPath{{\n\t\t\t\tSplits: []netv1alpha1.IngressBackendSplit{{\n\t\t\t\t\tIngressBackend: netv1alpha1.IngressBackend{\n\t\t\t\t\t\tServiceNamespace: ns,\n\t\t\t\t\t\tServiceName: \"revision-dolphin\",\n\t\t\t\t\t\tServicePort: intstr.FromInt(80),\n\t\t\t\t\t},\n\t\t\t\t\tPercent: 100,\n\t\t\t\t\tAppendHeaders: map[string]string{\n\t\t\t\t\t\t\"Knative-Serving-Revision\": \"revision-dolphin\",\n\t\t\t\t\t\t\"Knative-Serving-Namespace\": ns,\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t\tVisibility: netv1alpha1.IngressVisibilityExternalIP,\n\t}\n\n\tif !cmp.Equal(expected, rule) {\n\t\tt.Error(\"Unexpected rule (-want, +got):\", cmp.Diff(expected, rule))\n\t}\n}", "func testEnsureV1Beta1(fx *e2e.Framework) {\n\tt := fx.T()\n\tnamespace := \"002-ingress-ensure-v1beta1\"\n\n\tfx.CreateNamespace(namespace)\n\tdefer fx.DeleteNamespace(namespace)\n\n\tfx.Fixtures.Echo.Deploy(namespace, \"ingress-conformance-echo\")\n\n\tingressHost := \"v1beta1.projectcontour.io\"\n\ti := &v1beta1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"echo\",\n\t\t},\n\t\tSpec: v1beta1.IngressSpec{\n\t\t\tRules: []v1beta1.IngressRule{\n\t\t\t\t{\n\t\t\t\t\tHost: ingressHost,\n\t\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\tServiceName: \"ingress-conformance-echo\",\n\t\t\t\t\t\t\t\t\t\tServicePort: intstr.FromInt(80),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\trequire.NoError(t, fx.Client.Create(context.TODO(), i))\n\n\tres, ok := fx.HTTP.RequestUntil(&e2e.HTTPRequestOpts{\n\t\tHost: ingressHost,\n\t\tPath: \"/echo\",\n\t\tCondition: e2e.HasStatusCode(200),\n\t})\n\trequire.Truef(t, ok, \"expected 200 response code, got %d\", res.StatusCode)\n}", "func (m *MockAPI) IsInstallable(arg0 *models.Host) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsInstallable\", arg0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestChangeServiceType(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\t// This existing ClusterIP service should be ignored\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeClusterIP,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tvar assignedIP string\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tassignedIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tupdatedService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Update(context.Background(), updatedService, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 0 {\n\t\t\tt.Error(\"Expected service to have no conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tupdatedService = &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeNodePort,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), updatedService, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(assignedIP)) {\n\t\tt.Fatal(\"Expected assigned IP to be released\")\n\t}\n}", "func TestIngressTranslatorWithHTTPOptionDisabled(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tin *v1alpha1.Ingress\n\t\tstate []runtime.Object\n\t\twant *translatedIngress\n\t}{{\n\t\tname: \"tls redirect\",\n\t\tin: ing(\"testspace\", \"testname\", func(ing *v1alpha1.Ingress) {\n\t\t\ting.Spec.TLS = []v1alpha1.IngressTLS{{\n\t\t\t\tHosts: []string{\"foo.example.com\"},\n\t\t\t\tSecretNamespace: \"secretns\",\n\t\t\t\tSecretName: \"secretname\",\n\t\t\t}}\n\t\t\ting.Spec.HTTPOption = v1alpha1.HTTPOptionRedirected\n\t\t}),\n\t\tstate: []runtime.Object{\n\t\t\tns(\"testspace\"),\n\t\t\tsvc(\"servicens\", \"servicename\"),\n\t\t\teps(\"servicens\", \"servicename\"),\n\t\t\tsecret,\n\t\t},\n\t\twant: func() *translatedIngress {\n\t\t\tvHosts := []*route.VirtualHost{\n\t\t\t\tenvoy.NewVirtualHost(\n\t\t\t\t\t\"(testspace/testname).Rules[0]\",\n\t\t\t\t\t[]string{\"foo.example.com\", \"foo.example.com:*\"},\n\t\t\t\t\t[]*route.Route{envoy.NewRoute(\n\t\t\t\t\t\t\"(testspace/testname).Rules[0].Paths[/test]\",\n\t\t\t\t\t\t[]*route.HeaderMatcher{{\n\t\t\t\t\t\t\tName: \"testheader\",\n\t\t\t\t\t\t\tHeaderMatchSpecifier: &route.HeaderMatcher_StringMatch{\n\t\t\t\t\t\t\t\tStringMatch: &envoymatcherv3.StringMatcher{\n\t\t\t\t\t\t\t\t\tMatchPattern: &envoymatcherv3.StringMatcher_Exact{\n\t\t\t\t\t\t\t\t\t\tExact: \"foo\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t\t\"/test\",\n\t\t\t\t\t\t[]*route.WeightedCluster_ClusterWeight{\n\t\t\t\t\t\t\tenvoy.NewWeightedCluster(\"servicens/servicename\", 100, map[string]string{\"baz\": \"gna\"}),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t0,\n\t\t\t\t\t\tmap[string]string{\"foo\": \"bar\"},\n\t\t\t\t\t\t\"rewritten.example.com\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t}\n\t\t\treturn &translatedIngress{\n\t\t\t\tname: types.NamespacedName{\n\t\t\t\t\tNamespace: \"testspace\",\n\t\t\t\t\tName: \"testname\",\n\t\t\t\t},\n\t\t\t\tsniMatches: []*envoy.SNIMatch{{\n\t\t\t\t\tHosts: []string{\"foo.example.com\"},\n\t\t\t\t\tCertSource: types.NamespacedName{\n\t\t\t\t\t\tNamespace: \"secretns\",\n\t\t\t\t\t\tName: \"secretname\",\n\t\t\t\t\t},\n\t\t\t\t\tCertificateChain: cert,\n\t\t\t\t\tPrivateKey: privateKey,\n\t\t\t\t}},\n\t\t\t\tclusters: []*v3.Cluster{\n\t\t\t\t\tenvoy.NewCluster(\n\t\t\t\t\t\t\"servicens/servicename\",\n\t\t\t\t\t\t5*time.Second,\n\t\t\t\t\t\tlbEndpoints,\n\t\t\t\t\t\tfalse,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tv3.Cluster_STATIC,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\texternalVirtualHosts: vHosts,\n\t\t\t\texternalTLSVirtualHosts: vHosts,\n\t\t\t\tinternalVirtualHosts: vHosts,\n\t\t\t}\n\t\t}(),\n\t}, {\n\t\t// cluster local is not affected by HTTPOption.\n\t\tname: \"tls redirect cluster local\",\n\t\tin: ing(\"testspace\", \"testname\", func(ing *v1alpha1.Ingress) {\n\t\t\ting.Spec.TLS = []v1alpha1.IngressTLS{{\n\t\t\t\tHosts: []string{\"foo.example.com\"},\n\t\t\t\tSecretNamespace: \"secretns\",\n\t\t\t\tSecretName: \"secretname\",\n\t\t\t}}\n\t\t\ting.Spec.HTTPOption = v1alpha1.HTTPOptionRedirected\n\t\t\ting.Spec.Rules[0].Visibility = v1alpha1.IngressVisibilityClusterLocal\n\t\t}),\n\t\tstate: []runtime.Object{\n\t\t\tns(\"testspace\"),\n\t\t\tsvc(\"servicens\", \"servicename\"),\n\t\t\teps(\"servicens\", \"servicename\"),\n\t\t\tsecret,\n\t\t},\n\t\twant: func() *translatedIngress {\n\t\t\tvHosts := []*route.VirtualHost{\n\t\t\t\tenvoy.NewVirtualHost(\n\t\t\t\t\t\"(testspace/testname).Rules[0]\",\n\t\t\t\t\t[]string{\"foo.example.com\", \"foo.example.com:*\"},\n\t\t\t\t\t[]*route.Route{envoy.NewRoute(\n\t\t\t\t\t\t\"(testspace/testname).Rules[0].Paths[/test]\",\n\t\t\t\t\t\t[]*route.HeaderMatcher{{\n\t\t\t\t\t\t\tName: \"testheader\",\n\t\t\t\t\t\t\tHeaderMatchSpecifier: &route.HeaderMatcher_StringMatch{\n\t\t\t\t\t\t\t\tStringMatch: &envoymatcherv3.StringMatcher{\n\t\t\t\t\t\t\t\t\tMatchPattern: &envoymatcherv3.StringMatcher_Exact{\n\t\t\t\t\t\t\t\t\t\tExact: \"foo\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t\t\"/test\",\n\t\t\t\t\t\t[]*route.WeightedCluster_ClusterWeight{\n\t\t\t\t\t\t\tenvoy.NewWeightedCluster(\"servicens/servicename\", 100, map[string]string{\"baz\": \"gna\"}),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t0,\n\t\t\t\t\t\tmap[string]string{\"foo\": \"bar\"},\n\t\t\t\t\t\t\"rewritten.example.com\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t}\n\n\t\t\treturn &translatedIngress{\n\t\t\t\tname: types.NamespacedName{\n\t\t\t\t\tNamespace: \"testspace\",\n\t\t\t\t\tName: \"testname\",\n\t\t\t\t},\n\t\t\t\tsniMatches: []*envoy.SNIMatch{{\n\t\t\t\t\tHosts: []string{\"foo.example.com\"},\n\t\t\t\t\tCertSource: types.NamespacedName{\n\t\t\t\t\t\tNamespace: \"secretns\",\n\t\t\t\t\t\tName: \"secretname\",\n\t\t\t\t\t},\n\t\t\t\t\tCertificateChain: cert,\n\t\t\t\t\tPrivateKey: privateKey,\n\t\t\t\t}},\n\t\t\t\tclusters: []*v3.Cluster{\n\t\t\t\t\tenvoy.NewCluster(\n\t\t\t\t\t\t\"servicens/servicename\",\n\t\t\t\t\t\t5*time.Second,\n\t\t\t\t\t\tlbEndpoints,\n\t\t\t\t\t\tfalse,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tv3.Cluster_STATIC,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\texternalVirtualHosts: []*route.VirtualHost{},\n\t\t\t\texternalTLSVirtualHosts: []*route.VirtualHost{},\n\t\t\t\tinternalVirtualHosts: vHosts,\n\t\t\t}\n\t\t}(),\n\t}}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tt.Setenv(\"KOURIER_HTTPOPTION_DISABLED\", \"true\")\n\t\t\tcfg := defaultConfig.DeepCopy()\n\t\t\tctx := (&testConfigStore{config: cfg}).ToContext(context.Background())\n\t\t\tkubeclient := fake.NewSimpleClientset(test.state...)\n\n\t\t\ttranslator := NewIngressTranslator(\n\t\t\t\tfunc(ns, name string) (*corev1.Secret, error) {\n\t\t\t\t\treturn kubeclient.CoreV1().Secrets(ns).Get(ctx, name, metav1.GetOptions{})\n\t\t\t\t},\n\t\t\t\tfunc(ns, name string) (*corev1.Endpoints, error) {\n\t\t\t\t\treturn kubeclient.CoreV1().Endpoints(ns).Get(ctx, name, metav1.GetOptions{})\n\t\t\t\t},\n\t\t\t\tfunc(ns, name string) (*corev1.Service, error) {\n\t\t\t\t\treturn kubeclient.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})\n\t\t\t\t},\n\t\t\t\tfunc(name string) (*corev1.Namespace, error) {\n\t\t\t\t\treturn kubeclient.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})\n\t\t\t\t},\n\t\t\t\t&pkgtest.FakeTracker{},\n\t\t\t)\n\n\t\t\tgot, err := translator.translateIngress(ctx, test.in, false)\n\t\t\tassert.NilError(t, err)\n\t\t\tassert.DeepEqual(t, got, test.want,\n\t\t\t\tcmp.AllowUnexported(translatedIngress{}),\n\t\t\t\tprotocmp.Transform(),\n\t\t\t)\n\t\t})\n\t}\n}", "func TestRouteHTTP2EnableAndDisableIngressController(t *testing.T) {\n\tif err := waitForIngressControllerCondition(t, kclient, 5*time.Minute, defaultName, defaultAvailableConditions...); err != nil {\n\t\tt.Fatalf(\"failed to observe expected conditions: %v\", err)\n\t}\n\n\tic, err := http2GetIngressController(t, kclient, 1*time.Minute)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get ingress controller: %v\", err)\n\t}\n\n\t// By default the router should not have http/2 enabled\n\tif err := waitForRouterDeploymentHTTP2Enabled(t, kclient, routerDeployTimeout, ic, false); err != nil {\n\t\tt.Fatalf(\"expected router deployment to have http/2 disabled: %v\", err)\n\t}\n\n\tif err := setHTTP2EnabledForIngressController(t, kclient, 1*time.Minute, true, ic); err != nil {\n\t\tt.Fatalf(\"failed to update ingresscontroller: %v\", err)\n\t}\n\n\tif err := waitForIngressControllerCondition(t, kclient, 5*time.Minute, defaultName, defaultAvailableConditions...); err != nil {\n\t\tt.Fatalf(\"failed to observe expected conditions: %v\", err)\n\t}\n\n\tif err := waitForRouterDeploymentHTTP2Enabled(t, kclient, routerDeployTimeout, ic, true); err != nil {\n\t\tt.Fatalf(\"expected router deployment to have http/2 enabled: %v\", err)\n\t}\n\n\tif err := setHTTP2EnabledForIngressController(t, kclient, 1*time.Minute, false, ic); err != nil {\n\t\tt.Fatalf(\"failed to update ingresscontroller: %v\", err)\n\t}\n\n\tif err := waitForRouterDeploymentHTTP2Enabled(t, kclient, routerDeployTimeout, ic, false); err != nil {\n\t\tt.Fatalf(\"expected router deployment to have http/2 disabled: %v\", err)\n\t}\n}", "func TestK8gbRepeatedlyRecreatedFromIngress(t *testing.T) {\n\tt.Parallel()\n\t// name of ingress and gslb\n\tconst name = \"test-gslb-failover-simple\"\n\n\tassertStrategy := func(t *testing.T, options *k8s.KubectlOptions) {\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.splitBrainThresholdSeconds\", \"300\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.dnsTtlSeconds\", \"30\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.primaryGeoTag\", settings.PrimaryGeoTag)\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.type\", \"failover\")\n\t}\n\n\t// Path to the Kubernetes resource config we will test\n\tingressResourcePath, err := filepath.Abs(\"../examples/ingress-annotation-failover-simple.yaml\")\n\trequire.NoError(t, err)\n\n\t// To ensure we can reuse the resource config on the same cluster to test different scenarios, we setup a unique\n\t// namespace for the resources for this test.\n\t// Note that namespaces must be lowercase.\n\tnamespaceName := fmt.Sprintf(\"k8gb-test-repeatedly-recreated-from-ingress-%s\", strings.ToLower(random.UniqueId()))\n\n\t// Here we choose to use the defaults, which is:\n\t// - HOME/.kube/config for the kubectl config file\n\t// - Current context of the kubectl config file\n\t// - Random namespace\n\toptions := k8s.NewKubectlOptions(\"\", \"\", namespaceName)\n\n\tk8s.CreateNamespace(t, options, namespaceName)\n\n\tdefer k8s.DeleteNamespace(t, options, namespaceName)\n\n\tdefer k8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress := k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n\n\tk8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.AssertGslbDeleted(t, options, ingress.Name)\n\n\t// recreate ingress\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress = k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n}", "func (mr *MockValidaterMockRecorder) CheckNoConflictsInIngress(ingress interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CheckNoConflictsInIngress\", reflect.TypeOf((*MockValidater)(nil).CheckNoConflictsInIngress), ingress)\n}", "func TestRequestIPWithMismatchedLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"color\": \"red\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\t\tif svc.Status.Conditions[0].Reason != \"pool_selector_mismatch\" {\n\t\t\tt.Error(\"Expected service to receive 'pool_selector_mismatch' condition\")\n\t\t}\n\n\t\treturn true\n\t}, 1*time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected status update of service\")\n\t}\n}", "func TestReallocOnInit(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"192.168.1.12\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP == \"192.168.1.12\" {\n\t\t\tt.Error(\"Expected ingress IP to not be the initial, bad IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n}", "func (m *MockProc) OnSvcAllHostReplace(arg0 []*host.Host) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnSvcAllHostReplace\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestServicesAPIWrongControllerGWClass(t *testing.T) {\n\t// create gateway, nothing happens\n\t// create gatewayclass, VS created\n\t// update to bad gatewayclass (wrong controller), VS deleted\n\tg := gomega.NewGomegaWithT(t)\n\n\tgwClassName, gatewayName, ns := \"avi-lb\", \"my-gateway\", \"default\"\n\tmodelName := \"admin/cluster--default-my-gateway\"\n\n\tSetupGateway(t, gatewayName, ns, gwClassName)\n\tSetupSvcApiService(t, \"svc\", ns, gatewayName, ns, \"TCP\")\n\n\tSetupGatewayClass(t, gwClassName, lib.SvcApiAviGatewayController, \"\")\n\n\tg.Eventually(func() string {\n\t\tgw, _ := SvcAPIClient.NetworkingV1alpha1().Gateways(ns).Get(context.TODO(), gatewayName, metav1.GetOptions{})\n\t\tif len(gw.Status.Addresses) > 0 {\n\t\t\treturn gw.Status.Addresses[0].Value\n\t\t}\n\t\treturn \"\"\n\t}, 40*time.Second).Should(gomega.Equal(\"10.250.250.1\"))\n\n\tgwclassUpdate := FakeGWClass{\n\t\tName: gwClassName,\n\t\tController: \"xyz\",\n\t}.GatewayClass()\n\tgwclassUpdate.ResourceVersion = \"2\"\n\tif _, err := lib.AKOControlConfig().ServicesAPIClientset().NetworkingV1alpha1().GatewayClasses().Update(context.TODO(), gwclassUpdate, metav1.UpdateOptions{}); err != nil {\n\t\tt.Fatalf(\"error in updating GatewayClass: %v\", err)\n\t}\n\n\tg.Eventually(func() int {\n\t\tgw, _ := SvcAPIClient.NetworkingV1alpha1().Gateways(ns).Get(context.TODO(), gatewayName, metav1.GetOptions{})\n\t\treturn len(gw.Status.Addresses)\n\t}, 40*time.Second).Should(gomega.Equal(0))\n\tg.Eventually(func() int {\n\t\tsvc, _ := KubeClient.CoreV1().Services(ns).Get(context.TODO(), \"svc\", metav1.GetOptions{})\n\t\treturn len(svc.Status.LoadBalancer.Ingress)\n\t}, 40*time.Second).Should(gomega.Equal(0))\n\n\tTeardownAdvLBService(t, \"svc\", ns)\n\tTeardownGateway(t, gatewayName, ns)\n\tTeardownGatewayClass(t, gwClassName)\n\tVerifyGatewayVSNodeDeletion(g, modelName)\n}", "func TestConflictResolution(t *testing.T) {\n\tpoolB := mkPool(poolBUID, \"pool-b\", []string{\"10.0.10.0/24\", \"FF::0/48\"})\n\tpoolB.CreationTimestamp = meta_v1.Date(2022, 10, 16, 13, 30, 00, 0, time.UTC)\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tpoolB,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif !isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool B has not been marked conflicting\")\n\t}\n\n\t// All ranges of a conflicting pool must be disabled\n\tpoolBRanges, _ := fixture.lbIPAM.rangesStore.GetRangesForPool(\"pool-b\")\n\tfor _, r := range poolBRanges {\n\t\tif !r.internallyDisabled {\n\t\t\tt.Fatalf(\"Range '%s' from pool B hasn't been disabled\", ipNetStr(r.allocRange.CIDR()))\n\t\t}\n\t}\n\n\t// Phase 2, resolving the conflict\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolB, err := fixture.poolClient.Get(context.Background(), \"pool-b\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(poolB)\n\t}\n\n\t// Remove the conflicting range\n\tpoolB.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: cilium_api_v2alpha1.IPv4orIPv6CIDR(\"FF::0/48\"),\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolB, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool b has not de-conflicted\")\n\t}\n}", "func testDefaultDenyIngressPolicy(t *testing.T, data *TestData) {\n\tserverNode := workerNodeName(1)\n\tserverNodeIP := workerNodeIP(1)\n\tserverPort := int32(80)\n\t_, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, \"test-server-\", serverNode, testNamespace, false)\n\tdefer cleanupFunc()\n\n\tservice, err := data.createService(\"nginx\", testNamespace, serverPort, serverPort, map[string]string{\"app\": \"nginx\"}, false, false, corev1.ServiceTypeNodePort, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when creating nginx NodePort service: %v\", err)\n\t}\n\tdefer data.deleteService(service.Name)\n\n\t// client1 is a host network Pod and is on the same node as the server Pod, simulating kubelet probe traffic.\n\tclient1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, \"test-hostnetwork-client-can-connect-\", serverNode, testNamespace, true)\n\tdefer cleanupFunc()\n\n\t// client2 is a host network Pod and is on a different node from the server Pod, accessing the server Pod via the NodePort service.\n\tclient2Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, \"test-hostnetwork-client-cannot-connect-\", controlPlaneNodeName(), testNamespace, true)\n\tdefer cleanupFunc()\n\n\tspec := &networkingv1.NetworkPolicySpec{\n\t\tPodSelector: metav1.LabelSelector{},\n\t\tPolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},\n\t\tIngress: []networkingv1.NetworkPolicyIngressRule{},\n\t}\n\tnp, err := data.createNetworkPolicy(\"test-networkpolicy-deny-all-ingress\", spec)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when creating network policy: %v\", err)\n\t}\n\tdefer func() {\n\t\tif err = data.deleteNetworkpolicy(np); err != nil {\n\t\t\tt.Fatalf(\"Error when deleting network policy: %v\", err)\n\t\t}\n\t}()\n\n\tnpCheck := func(clientName, serverIP string, serverPort int32, wantErr bool) {\n\t\tif err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); wantErr && err == nil {\n\t\t\tt.Fatalf(\"Pod %s should not be able to connect %s, but was able to connect\", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))\n\t\t} else if !wantErr && err != nil {\n\t\t\tt.Fatalf(\"Pod %s should be able to connect %s, but was not able to connect\", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))\n\t\t}\n\t}\n\n\t// Locally generated traffic can always access the Pods regardless of NetworkPolicy configuration.\n\tif clusterInfo.podV4NetworkCIDR != \"\" {\n\t\tnpCheck(client1Name, serverIPs.ipv4.String(), serverPort, false)\n\t}\n\tif clusterInfo.podV6NetworkCIDR != \"\" {\n\t\tnpCheck(client1Name, serverIPs.ipv6.String(), serverPort, false)\n\t}\n\n\tif testOptions.providerName == \"kind\" {\n\t\tt.Logf(\"Skipped testing NodePort traffic for TestDefaultDenyIngressPolicy because pkt_mark is not properly supported on OVS netdev datapath\")\n\t} else {\n\t\tif clusterInfo.podV4NetworkCIDR != \"\" {\n\t\t\tnpCheck(client2Name, serverIPs.ipv4.String(), serverPort, true)\n\t\t}\n\t\tif clusterInfo.podV6NetworkCIDR != \"\" {\n\t\t\tnpCheck(client2Name, serverIPs.ipv6.String(), serverPort, true)\n\t\t}\n\t\tnpCheck(client2Name, serverNodeIP, service.Spec.Ports[0].NodePort, true)\n\t}\n}", "func (s) TestRefuseStartWithExcludeAndWildCardAll(t *testing.T) {\n\tinvalidConfig := &config{\n\t\tProjectID: \"fake\",\n\t\tCloudLogging: &cloudLogging{\n\t\t\tClientRPCEvents: []clientRPCEvents{\n\t\t\t\t{\n\t\t\t\t\tMethods: []string{\"*\"},\n\t\t\t\t\tExclude: true,\n\t\t\t\t\tMaxMetadataBytes: 30,\n\t\t\t\t\tMaxMessageBytes: 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tinvalidConfigJSON, err := json.Marshal(invalidConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to convert config to JSON: %v\", err)\n\t}\n\toldObservabilityConfig := envconfig.ObservabilityConfig\n\toldObservabilityConfigFile := envconfig.ObservabilityConfigFile\n\tenvconfig.ObservabilityConfig = string(invalidConfigJSON)\n\tenvconfig.ObservabilityConfigFile = \"\"\n\tdefer func() {\n\t\tenvconfig.ObservabilityConfig = oldObservabilityConfig\n\t\tenvconfig.ObservabilityConfigFile = oldObservabilityConfigFile\n\t}()\n\t// If there is at least one invalid pattern, which should not be silently tolerated.\n\tif err := Start(context.Background()); err == nil {\n\t\tt.Fatalf(\"Invalid patterns not triggering error\")\n\t}\n}", "func TestAddRange(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestSkipOnError(t *testing.T) {\n\tkube, mock := setup(t)\n\tmock.deleteErr = errors.New(\"create failed\")\n\n\tdef := cluster.SyncDef{\n\t\tActions: []cluster.SyncAction{\n\t\t\tcluster.SyncAction{\n\t\t\t\tResourceID: \"fail in middle\",\n\t\t\t\tDelete: deploymentDef(\"should fail\"),\n\t\t\t\tApply: deploymentDef(\"skipped\"),\n\t\t\t},\n\t\t\tcluster.SyncAction{\n\t\t\t\tResourceID: \"proceed\",\n\t\t\t\tApply: deploymentDef(\"apply works\"),\n\t\t\t},\n\t\t},\n\t}\n\n\terr := kube.Sync(def)\n\tswitch err := err.(type) {\n\tcase cluster.SyncError:\n\t\tif _, ok := err[\"fail in middle\"]; !ok {\n\t\t\tt.Errorf(\"expected error for failing resource %q, but got %#v\", \"fail in middle\", err)\n\t\t}\n\tdefault:\n\t\tt.Errorf(\"expected sync error, got %#v\", err)\n\t}\n\n\texpected := []command{\n\t\tcommand{\"delete\", \"should fail\"},\n\t\t// skip to next resource after failure\n\t\tcommand{\"apply\", \"apply works\"},\n\t}\n\tif !reflect.DeepEqual(expected, mock.commands) {\n\t\tt.Errorf(\"expected commands:\\n%#v\\ngot:\\n%#v\", expected, mock.commands)\n\t}\n}", "func checkRequest(httpClient *dphttp.ClienterMock, callIndex int, expectedMethod, expectedURI string, expectedIfMatch string) {\n\tSo(httpClient.DoCalls()[callIndex].Req.URL.RequestURI(), ShouldEqual, expectedURI)\n\tSo(httpClient.DoCalls()[callIndex].Req.Method, ShouldEqual, http.MethodPatch)\n\tSo(httpClient.DoCalls()[callIndex].Req.Header.Get(dprequest.AuthHeaderKey), ShouldEqual, \"Bearer \"+testServiceToken)\n\tactualIfMatch := httpClient.DoCalls()[callIndex].Req.Header.Get(\"If-Match\")\n\tSo(actualIfMatch, ShouldResemble, expectedIfMatch)\n}", "func isCatchAllIngress(spec networking.IngressSpec) bool {\n\treturn spec.DefaultBackend != nil && len(spec.Rules) == 0\n}", "func TestCreateRetryConflictNoTagDiff(t *testing.T) {\n\tregistry := registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)\n\tfirstUpdate := true\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registry),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\tstream := validImageStream()\n\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:original\"}}},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\treturn stream, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first update call, return a conflict to cause a retry of an\n\t\t\t\t// image stream whose tags haven't changed.\n\t\t\t\tif firstUpdate {\n\t\t\t\t\tfirstUpdate = false\n\t\t\t\t\treturn nil, errors.NewConflict(imagegroup.Resource(\"imagestreams\"), repo.Name, fmt.Errorf(\"resource modified\"))\n\t\t\t\t}\n\t\t\t\treturn repo, nil\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\tif obj == nil {\n\t\tt.Fatalf(\"expected a result\")\n\t}\n}", "func TestRetryNotRequired(t *testing.T) {\n\tcheck := assert.New(t)\n\tretryRequired := checkRetryRequired(http.StatusConflict)\n\tcheck.Equal(retryRequired, false)\n}", "func TestActiveReplicatorIgnoreNoConflicts(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyHTTP, logger.KeySync, logger.KeyChanges, logger.KeyCRUD, logger.KeyBucket)\n\n\t// Passive\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tAllowConflicts: false,\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Active\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tAllowConflicts: false,\n\t\t}},\n\t})\n\tdefer rt1.Close()\n\n\trt1docID := t.Name() + \"rt1doc1\"\n\tresp := rt1.SendAdminRequest(http.MethodPut, \"/db/\"+rt1docID, `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\trt1revID := respRevID(t, resp)\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\t// Add basic auth creds to target db URL\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: 200,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t})\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\tassert.Equal(t, \"\", ar.GetStatus().LastSeqPush)\n\n\t// Start the replicator (implicit connect)\n\tassert.NoError(t, ar.Start())\n\n\t// wait for the document originally written to rt1 to arrive at rt2\n\tchangesResults, err := rt2.WaitForChanges(1, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, 1)\n\tassert.Equal(t, rt1docID, changesResults.Results[0].ID)\n\n\tdoc, err := rt2.GetDatabase().GetDocument(logger.TestCtx(t), rt1docID, db.DocUnmarshalAll)\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, rt1revID, doc.SyncData.CurrentRev)\n\n\tbody, err := doc.GetDeepMutableBody()\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"rt1\", body[\"source\"])\n\n\t// write a doc on rt2 ...\n\trt2docID := t.Name() + \"rt2doc1\"\n\tresp = rt2.SendAdminRequest(http.MethodPut, \"/db/\"+rt2docID, `{\"source\":\"rt2\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\trt2revID := respRevID(t, resp)\n\n\t// ... and wait to arrive at rt1\n\tchangesResults, err = rt1.WaitForChanges(2, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, 2)\n\tassert.Equal(t, rt1docID, changesResults.Results[0].ID)\n\tassert.Equal(t, rt2docID, changesResults.Results[1].ID)\n\n\tdoc, err = rt1.GetDatabase().GetDocument(logger.TestCtx(t), rt2docID, db.DocUnmarshalAll)\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, rt2revID, doc.SyncData.CurrentRev)\n\n\tbody, err = doc.GetDeepMutableBody()\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"rt2\", body[\"source\"])\n}", "func TestK8gbSpecKeepsStableAfterIngressUpdates(t *testing.T) {\n\tt.Parallel()\n\t// name of ingress and gslb\n\tconst name = \"test-gslb-lifecycle\"\n\n\tassertStrategy := func(t *testing.T, options *k8s.KubectlOptions) {\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.splitBrainThresholdSeconds\", \"600\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.dnsTtlSeconds\", \"60\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.primaryGeoTag\", settings.PrimaryGeoTag)\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.type\", \"failover\")\n\t}\n\n\tkubeResourcePath, err := filepath.Abs(\"../examples/failover-lifecycle.yaml\")\n\tingressResourcePath, err := filepath.Abs(\"../examples/ingress-annotation-failover.yaml\")\n\trequire.NoError(t, err)\n\t// To ensure we can reuse the resource config on the same cluster to test different scenarios, we setup a unique\n\t// namespace for the resources for this test.\n\t// Note that namespaces must be lowercase.\n\tnamespaceName := fmt.Sprintf(\"k8gb-test-spec-keeps-stable-after-ingress-updates-%s\", strings.ToLower(random.UniqueId()))\n\n\t// Here we choose to use the defaults, which is:\n\t// - HOME/.kube/config for the kubectl config file\n\t// - Current context of the kubectl config file\n\t// - Random namespace\n\toptions := k8s.NewKubectlOptions(\"\", \"\", namespaceName)\n\n\tk8s.CreateNamespace(t, options, namespaceName)\n\tdefer k8s.DeleteNamespace(t, options, namespaceName)\n\n\t// create gslb\n\tutils.CreateGslb(t, options, settings, kubeResourcePath)\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tassertStrategy(t, options)\n\n\t// reapply ingress\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress := k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\t// assert Gslb strategy has initial values, ingress doesn't change it\n\tassertStrategy(t, options)\n}", "func TestPoolInternalConflict(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\", \"10.0.10.64/28\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be marked conflicting\")\n\t}\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, 2*time.Second)\n\n\tpool, err := fixture.poolClient.Get(context.Background(), \"pool-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpool.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.10.0/24\",\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), pool, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be un-marked conflicting\")\n\t}\n}", "func TestRangeDelete(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\t// Add a new CIDR, this should not have any effect on the existing service.\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.20\") {\n\t\t\tt.Error(\"Expected new ingress to be in the 10.0.20.0/24 range\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// Remove the existing range, this should trigger the re-allocation of the existing service\n\tpoolA.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.20.0/24\",\n\t\t},\n\t}\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestShouldIgnoreRepeated(t *testing.T) {\n\tvar packetEvent PacketEvent = PacketEvent{\n\t\tTimeStamp: \"Sun Mar 08 20:02:59 EDT 2020\",\n\t\tDropReason: \"PolicyDrop-br-int/POL_TABLE\",\n\t\tSourceMac: \"16:39:19:fa:f8:40\",\n\t\tDestinationMac: \"62:58:da:98:01:97\",\n\t\tEtherType: \"IPv4\",\n\t\tSourceIP: \"10.1.1.1\",\n\t\tDestinationIP: \"10.1.1.2\",\n\t\tIPProto: \"UDP\",\n\t\tSourcePort: \"10023\",\n\t\tDestinationPort: \"53\",\n\t}\n\ttempdir, err := os.MkdirTemp(\"\", \"hostagent_test_\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer os.RemoveAll(tempdir)\n\tagent := testAgent()\n\tagent.config.OpFlexEndpointDir = tempdir\n\tagent.config.OpFlexServiceDir = tempdir\n\tagent.config.OpFlexSnatDir = tempdir\n\tagent.config.UplinkIface = \"eth10\"\n\tagent.config.NodeName = \"test-node\"\n\tagent.config.ServiceVlan = 4003\n\tagent.config.UplinkMacAdress = \"5a:fd:16:e5:e7:c0\"\n\tagent.config.DropLogExpiryTime = 10\n\tagent.config.DropLogRepeatIntervalTime = 2\n\tagent.run()\n\tfor i, pt := range podTests {\n\t\tif i%2 == 0 {\n\t\t\tos.WriteFile(filepath.Join(tempdir,\n\t\t\t\tpt.uuid+\"_\"+pt.cont+\"_\"+pt.veth+\".ep\"),\n\t\t\t\t[]byte(\"random gibberish\"), 0644)\n\t\t}\n\n\t\tpod := pod(pt.uuid, pt.namespace, pt.name, pt.eg, pt.sg, pt.qp)\n\t\tpod.Status.PodIP = pt.ip\n\t\tpod.Status.Phase = \"Running\"\n\t\tcnimd := cnimd(pt.namespace, pt.name, pt.ip, pt.cont, pt.veth)\n\t\tagent.epMetadata[pt.namespace+\"/\"+pt.name] =\n\t\t\tmap[string]*metadata.ContainerMetadata{\n\t\t\t\tcnimd.Id.ContId: cnimd,\n\t\t\t}\n\t\tagent.fakePodSource.Add(pod)\n\t}\n\ttime.Sleep(3000 * time.Millisecond)\n\tcurrTime, _ := time.Parse(time.UnixDate, \"Sun Mar 08 20:03:59 EDT 2020\")\n\terr = agent.processPacketEvent(&packetEvent, currTime)\n\tassert.Nil(t, err, \"Failed to process event\")\n\tpacketEvent.TimeStamp = \"Sun Mar 08 20:04:59 EDT 2020\"\n\tcurrTime = currTime.Add(time.Minute * 1)\n\tassert.Equal(t, true, agent.shouldIgnore(&packetEvent, currTime), \"repeated event prune test failed\")\n\tpacketEvent.TimeStamp = \"Sun Mar 08 20:06:59 EDT 2020\"\n\tcurrTime = currTime.Add(time.Minute * 5)\n\tassert.Equal(t, false, agent.shouldIgnore(&packetEvent, currTime), \"post event test failed\")\n\tfor _, pt := range podTests {\n\t\tpod := pod(pt.uuid, pt.namespace, pt.name, pt.eg, pt.sg, pt.qp)\n\t\tagent.fakePodSource.Delete(pod)\n\t}\n\tagent.stop()\n}", "func TestGetStatus(t *testing.T) {\n\t_, ip := fakeInstanceProvider()\n\tpod := &v1.Pod{}\n\ttestCases := []struct {\n\t\tmilpaPodPhase api.PodPhase\n\t\tk8sPodPhase v1.PodPhase\n\t\tmodPod func(*api.Pod)\n\t}{\n\t\t{\n\t\t\tmilpaPodPhase: api.PodDispatching,\n\t\t\tk8sPodPhase: v1.PodPending,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodFailed,\n\t\t\tk8sPodPhase: v1.PodPending,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodFailed,\n\t\t\tk8sPodPhase: v1.PodFailed,\n\t\t\tmodPod: func(p *api.Pod) {\n\t\t\t\tp.Spec.RestartPolicy = api.RestartPolicyNever\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodFailed,\n\t\t\tk8sPodPhase: v1.PodFailed,\n\t\t\tmodPod: func(p *api.Pod) {\n\t\t\t\tp.Status.StartFailures = allowedStartFailures + 1\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodRunning,\n\t\t\tk8sPodPhase: v1.PodRunning,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodSucceeded,\n\t\t\tk8sPodPhase: v1.PodSucceeded,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodTerminated,\n\t\t\tk8sPodPhase: v1.PodFailed,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodWaiting,\n\t\t\tk8sPodPhase: v1.PodPending,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tmilpaPod := api.GetFakePod()\n\t\tmilpaPod.Status.Phase = tc.milpaPodPhase\n\t\tif tc.modPod != nil {\n\t\t\ttc.modPod(milpaPod)\n\t\t}\n\t\tpodStatus := getStatus(ip, milpaPod, pod)\n\t\tassert.Equal(t, podStatus.Phase, tc.k8sPodPhase)\n\t}\n}", "func TestRegisteringDuplicateAuthMethodPanics(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Handle(\"model\",\n\t\t\t\tres.Auth(\"foo\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t\tres.Auth(\"bar\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t\tres.Auth(\"foo\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t)\n\t\t})\n\t}, nil, restest.WithoutReset)\n}", "func (m *MockManager) EnsureIPAddress() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EnsureIPAddress\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestNamespacePreExisting(t *testing.T) {\n\ttestName := \"TestNamespacePreExisting\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\tAPPLICATION: true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\t/* 0 */ KappnavConfigFile,\n\t\t/* 1 */ CrdApplication,\n\t\t/* 2 */ ns1Service,\n\t\t/* 3 */ ns1Deployment,\n\t\t/* 4 */ ns2Service,\n\t\t/* 5 */ ns2Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: no applications. No resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\n\t// status should not be checked when there are not applications\n\titeration0IDs[2].expectedStatus = NoStatus\n\titeration0IDs[3].expectedStatus = NoStatus\n\titeration0IDs[4].expectedStatus = NoStatus\n\titeration0IDs[5].expectedStatus = NoStatus\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t// iteration 1: add application to NS_1. All in NS_1 is normal.\n\t// All in NS_2 remains NoStatus\n\tres, err := readOneResourceID(ns1App)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tarrayLength := len(iteration0IDs)\n\tvar iteration1IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration1IDs, iteration0IDs)\n\titeration1IDs = append(iteration1IDs, res)\n\tarrayLength++\n\titeration1IDs[2].expectedStatus = Normal\n\titeration1IDs[3].expectedStatus = Normal\n\titeration1IDs[6].expectedStatus = Normal\n\ttestActions.addIteration(iteration1IDs, emptyIDs)\n\n\t/* iteration 4: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// make all trasition of testAction\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func checkRequest(httpClient *dphttp.ClienterMock, callIndex int, expectedMethod, expectedURI string) {\n\tSo(httpClient.DoCalls()[callIndex].Req.URL.String(), ShouldEqual, expectedURI)\n\tSo(httpClient.DoCalls()[callIndex].Req.Method, ShouldEqual, expectedMethod)\n\tSo(httpClient.DoCalls()[callIndex].Req.Header.Get(dprequest.AuthHeaderKey), ShouldEqual, \"Bearer \"+testServiceToken)\n}", "func hasChanges(old *networking.Ingress, current *networking.Ingress) bool {\n\told.Status.LoadBalancer.Ingress = current.Status.LoadBalancer.Ingress\n\told.ResourceVersion = current.ResourceVersion\n\treturn !reflect.DeepEqual(old, current)\n}", "func (m *MockResponseHandler) NotModified() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"NotModified\")\n}", "func testNamedPortWNamespace() []*TestStep {\n\tnamedPorts := \"serve-80\"\n\tbuilder := &NetworkPolicySpecBuilder{}\n\tbuilder = builder.SetName(\"x\", \"allow-client-a-via-named-port-ingress-rule\").SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder.SetTypeIngress().AddIngress(v1.ProtocolTCP, nil, &namedPorts, nil, nil, nil, map[string]string{\"ns\": \"x\"}, nil, nil)\n\n\treachability80 := func() *Reachability {\n\t\treachability := NewReachability(allPods, true)\n\t\t//reachability.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\t//reachability.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\t//reachability.Expect(Pod(\"x/b\"), Pod(\"x/a\"), true)\n\t\t//reachability.Expect(Pod(\"x/c\"), Pod(\"x/a\"), true)\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: false,\n\t\t})\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tFrom: Pod(\"x/a\"),\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: true,\n\t\t})\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tFrom: Pod(\"x/b\"),\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: true,\n\t\t})\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tFrom: Pod(\"x/c\"),\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: true,\n\t\t})\n\t\treturn reachability\n\t}\n\n\t// disallow port 81\n\treachability81 := func() *Reachability {\n\t\treachability := NewReachability(allPods, true)\n\t\t//reachability.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\t//reachability.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: false,\n\t\t})\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tFrom: Pod(\"x/a\"),\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: true,\n\t\t})\n\t\treturn reachability\n\t}\n\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Port 80\",\n\t\t\treachability80(),\n\t\t\tbuilder.Get(),\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Port 81\",\n\t\t\treachability81(),\n\t\t\tbuilder.Get(),\n\t\t\t81,\n\t\t\t0,\n\t\t},\n\t}\n}", "func testAllowAllPrecedenceIngress() []*TestStep {\n\tbuilder := &NetworkPolicySpecBuilder{}\n\tbuilder = builder.SetName(\"x\", \"deny-all\").SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder.SetTypeIngress()\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{}, nil, nil, nil)\n\n\tpolicy1 := builder.Get()\n\treachability1 := NewReachability(allPods, true)\n\treachability1.ExpectAllIngress(Pod(\"x/a\"), false)\n\treachability1.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\n\tbuilder2 := &NetworkPolicySpecBuilder{}\n\t// by preserving the same name, this policy will also serve to test the 'updated policy' scenario.\n\tbuilder2 = builder2.SetName(\"x\", \"allow-all\").SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder2.SetTypeIngress()\n\tbuilder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil)\n\n\tpolicy2 := builder2.Get()\n\treachability2 := NewReachability(allPods, true)\n\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Port 81\",\n\t\t\treachability1,\n\t\t\tpolicy1,\n\t\t\tp81,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Port 80\",\n\t\t\treachability2,\n\t\t\tpolicy2,\n\t\t\tp80,\n\t\t\t0,\n\t\t},\n\t}\n}", "func TestAddPool(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\ttwentyPool := mkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"})\n\t_, err := fixture.poolClient.Create(context.Background(), twentyPool, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestServiceDelete(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tvar svcIP string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tsvcIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been allocated\")\n\t}\n\n\terr := fixture.svcClient.Services(\"default\").Delete(context.Background(), \"service-a\", meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been released\")\n\t}\n}", "func TestExternalNameService(t *testing.T) {\n\trh, c, done := setup(t, enableExternalNameService(t))\n\tdefer done()\n\n\ts1 := fixture.NewService(\"kuard\").\n\t\tWithSpec(v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{{\n\t\t\t\tPort: 80,\n\t\t\t\tTargetPort: intstr.FromInt(8080),\n\t\t\t}},\n\t\t\tExternalName: \"foo.io\",\n\t\t\tType: v1.ServiceTypeExternalName,\n\t\t})\n\n\ti1 := &networking_v1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kuard\",\n\t\t\tNamespace: s1.Namespace,\n\t\t},\n\t\tSpec: networking_v1.IngressSpec{\n\t\t\tDefaultBackend: featuretests.IngressBackend(s1),\n\t\t},\n\t}\n\trh.OnAdd(s1)\n\trh.OnAdd(i1)\n\n\tc.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tResources: resources(t,\n\t\t\tenvoy_v3.RouteConfiguration(\"ingress_http\",\n\t\t\t\tenvoy_v3.VirtualHost(\"*\",\n\t\t\t\t\t&envoy_route_v3.Route{\n\t\t\t\t\t\tMatch: routePrefix(\"/\"),\n\t\t\t\t\t\tAction: routeCluster(\"default/kuard/80/da39a3ee5e\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t\tTypeUrl: routeType,\n\t})\n\n\tc.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tResources: resources(t,\n\t\t\texternalNameCluster(\"default/kuard/80/da39a3ee5e\", \"default/kuard\", \"default_kuard_80\", \"foo.io\", 80),\n\t\t),\n\t\tTypeUrl: clusterType,\n\t})\n\n\trh.OnDelete(i1)\n\n\trh.OnAdd(fixture.NewProxy(\"kuard\").\n\t\tWithFQDN(\"kuard.projectcontour.io\").\n\t\tWithSpec(contour_api_v1.HTTPProxySpec{\n\t\t\tRoutes: []contour_api_v1.Route{{\n\t\t\t\tServices: []contour_api_v1.Service{{\n\t\t\t\t\tName: s1.Name,\n\t\t\t\t\tPort: 80,\n\t\t\t\t}},\n\t\t\t}},\n\t\t}),\n\t)\n\n\tc.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tResources: resources(t,\n\t\t\tenvoy_v3.RouteConfiguration(\"ingress_http\",\n\t\t\t\tenvoy_v3.VirtualHost(\"kuard.projectcontour.io\",\n\t\t\t\t\t&envoy_route_v3.Route{\n\t\t\t\t\t\tMatch: routePrefix(\"/\"),\n\t\t\t\t\t\tAction: routeCluster(\"default/kuard/80/a28d1ec01b\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t\tTypeUrl: routeType,\n\t})\n\n\tc.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tResources: resources(t,\n\t\t\texternalNameCluster(\"default/kuard/80/a28d1ec01b\", \"default/kuard\", \"default_kuard_80\", \"foo.io\", 80),\n\t\t),\n\t\tTypeUrl: clusterType,\n\t})\n\n\t// After we set the Host header, the cluster should remain\n\t// the same, but the Route should do update the Host header.\n\trh.OnDelete(fixture.NewProxy(\"kuard\").WithSpec(contour_api_v1.HTTPProxySpec{}))\n\trh.OnAdd(fixture.NewProxy(\"kuard\").\n\t\tWithFQDN(\"kuard.projectcontour.io\").\n\t\tWithSpec(contour_api_v1.HTTPProxySpec{\n\t\t\tRoutes: []contour_api_v1.Route{{\n\t\t\t\tServices: []contour_api_v1.Service{{\n\t\t\t\t\tName: s1.Name,\n\t\t\t\t\tPort: 80,\n\t\t\t\t}},\n\t\t\t\tRequestHeadersPolicy: &contour_api_v1.HeadersPolicy{\n\t\t\t\t\tSet: []contour_api_v1.HeaderValue{{\n\t\t\t\t\t\tName: \"Host\",\n\t\t\t\t\t\tValue: \"external.address\",\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t}},\n\t\t}),\n\t)\n\n\tc.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tTypeUrl: routeType,\n\t\tResources: resources(t,\n\t\t\tenvoy_v3.RouteConfiguration(\"ingress_http\",\n\t\t\t\tenvoy_v3.VirtualHost(\"kuard.projectcontour.io\",\n\t\t\t\t\t&envoy_route_v3.Route{\n\t\t\t\t\t\tMatch: routePrefix(\"/\"),\n\t\t\t\t\t\tAction: routeHostRewrite(\"default/kuard/80/95e871afaf\", \"external.address\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t})\n\n\tc.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tTypeUrl: clusterType,\n\t\tResources: resources(t,\n\t\t\texternalNameCluster(\"default/kuard/80/95e871afaf\", \"default/kuard\", \"default_kuard_80\", \"foo.io\", 80),\n\t\t),\n\t})\n\n\t// Now try the same configuration, but enable HTTP/2. We\n\t// should still find that the same configuration applies, but\n\t// TLS is enabled and the SNI server name is overwritten from\n\t// the Host header.\n\trh.OnDelete(fixture.NewProxy(\"kuard\").WithSpec(contour_api_v1.HTTPProxySpec{}))\n\trh.OnAdd(fixture.NewProxy(\"kuard\").\n\t\tWithFQDN(\"kuard.projectcontour.io\").\n\t\tWithSpec(contour_api_v1.HTTPProxySpec{\n\t\t\tRoutes: []contour_api_v1.Route{{\n\t\t\t\tServices: []contour_api_v1.Service{{\n\t\t\t\t\tProtocol: ref.To(\"h2\"),\n\t\t\t\t\tName: s1.Name,\n\t\t\t\t\tPort: 80,\n\t\t\t\t}},\n\t\t\t\tRequestHeadersPolicy: &contour_api_v1.HeadersPolicy{\n\t\t\t\t\tSet: []contour_api_v1.HeaderValue{{\n\t\t\t\t\t\tName: \"Host\",\n\t\t\t\t\t\tValue: \"external.address\",\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t}},\n\t\t}),\n\t)\n\n\tc.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tTypeUrl: routeType,\n\t\tResources: resources(t,\n\t\t\tenvoy_v3.RouteConfiguration(\"ingress_http\",\n\t\t\t\tenvoy_v3.VirtualHost(\"kuard.projectcontour.io\",\n\t\t\t\t\t&envoy_route_v3.Route{\n\t\t\t\t\t\tMatch: routePrefix(\"/\"),\n\t\t\t\t\t\tAction: routeHostRewrite(\"default/kuard/80/cdbf075ad8\", \"external.address\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t})\n\n\tc.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tTypeUrl: clusterType,\n\t\tResources: resources(t,\n\t\t\tDefaultCluster(\n\t\t\t\texternalNameCluster(\"default/kuard/80/cdbf075ad8\", \"default/kuard\", \"default_kuard_80\", \"foo.io\", 80),\n\t\t\t\t&envoy_cluster_v3.Cluster{\n\t\t\t\t\tTypedExtensionProtocolOptions: map[string]*anypb.Any{\n\t\t\t\t\t\t\"envoy.extensions.upstreams.http.v3.HttpProtocolOptions\": protobuf.MustMarshalAny(\n\t\t\t\t\t\t\t&envoy_extensions_upstream_http_v3.HttpProtocolOptions{\n\t\t\t\t\t\t\t\tUpstreamProtocolOptions: &envoy_extensions_upstream_http_v3.HttpProtocolOptions_ExplicitHttpConfig_{\n\t\t\t\t\t\t\t\t\tExplicitHttpConfig: &envoy_extensions_upstream_http_v3.HttpProtocolOptions_ExplicitHttpConfig{\n\t\t\t\t\t\t\t\t\t\tProtocolConfig: &envoy_extensions_upstream_http_v3.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&envoy_cluster_v3.Cluster{\n\t\t\t\t\tTransportSocket: envoy_v3.UpstreamTLSTransportSocket(\n\t\t\t\t\t\tenvoy_v3.UpstreamTLSContext(nil, \"external.address\", nil, \"h2\"),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t),\n\t\t),\n\t})\n\n\t// Now try the same configuration, but enable TLS (which\n\t// means HTTP/1.1 over TLS) rather than HTTP/2. We should get\n\t// TLS enabled with the overridden SNI name. but no HTTP/2\n\t// protocol config.\n\trh.OnDelete(fixture.NewProxy(\"kuard\").WithSpec(contour_api_v1.HTTPProxySpec{}))\n\trh.OnAdd(fixture.NewProxy(\"kuard\").\n\t\tWithFQDN(\"kuard.projectcontour.io\").\n\t\tWithSpec(contour_api_v1.HTTPProxySpec{\n\t\t\tRoutes: []contour_api_v1.Route{{\n\t\t\t\tServices: []contour_api_v1.Service{{\n\t\t\t\t\tProtocol: ref.To(\"tls\"),\n\t\t\t\t\tName: s1.Name,\n\t\t\t\t\tPort: 80,\n\t\t\t\t}},\n\t\t\t\tRequestHeadersPolicy: &contour_api_v1.HeadersPolicy{\n\t\t\t\t\tSet: []contour_api_v1.HeaderValue{{\n\t\t\t\t\t\tName: \"Host\",\n\t\t\t\t\t\tValue: \"external.address\",\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t}},\n\t\t}),\n\t)\n\n\tc.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tTypeUrl: routeType,\n\t\tResources: resources(t,\n\t\t\tenvoy_v3.RouteConfiguration(\"ingress_http\",\n\t\t\t\tenvoy_v3.VirtualHost(\"kuard.projectcontour.io\",\n\t\t\t\t\t&envoy_route_v3.Route{\n\t\t\t\t\t\tMatch: routePrefix(\"/\"),\n\t\t\t\t\t\tAction: routeHostRewrite(\"default/kuard/80/f9439c1de8\", \"external.address\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t})\n\n\tc.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tTypeUrl: clusterType,\n\t\tResources: resources(t,\n\t\t\tDefaultCluster(\n\t\t\t\texternalNameCluster(\"default/kuard/80/f9439c1de8\", \"default/kuard\", \"default_kuard_80\", \"foo.io\", 80),\n\t\t\t\t&envoy_cluster_v3.Cluster{\n\t\t\t\t\tTransportSocket: envoy_v3.UpstreamTLSTransportSocket(\n\t\t\t\t\t\tenvoy_v3.UpstreamTLSContext(nil, \"external.address\", nil),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t),\n\t\t),\n\t})\n\n\tsec1 := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"secret\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tType: \"kubernetes.io/tls\",\n\t\tData: featuretests.Secretdata(featuretests.CERTIFICATE, featuretests.RSA_PRIVATE_KEY),\n\t}\n\n\t// Create TCPProxy with upstream protocol 'tls' to an externalName type service\n\t// and verify that the SNI on the upstream request matches the externalName value.\n\trh.OnDelete(fixture.NewProxy(\"kuard\").WithSpec(contour_api_v1.HTTPProxySpec{}))\n\trh.OnAdd(sec1)\n\trh.OnAdd(fixture.NewProxy(\"kuard\").\n\t\tWithFQDN(\"kuard.projectcontour.io\").\n\t\tWithCertificate(sec1.Name).\n\t\tWithSpec(contour_api_v1.HTTPProxySpec{\n\t\t\tTCPProxy: &contour_api_v1.TCPProxy{\n\t\t\t\tServices: []contour_api_v1.Service{{\n\t\t\t\t\tProtocol: ref.To(\"tls\"),\n\t\t\t\t\tName: s1.Name,\n\t\t\t\t\tPort: 80,\n\t\t\t\t}},\n\t\t\t},\n\t\t}),\n\t)\n\n\tc.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{\n\t\tTypeUrl: clusterType,\n\t\tResources: resources(t,\n\t\t\tDefaultCluster(\n\t\t\t\texternalNameCluster(\"default/kuard/80/7d449598f5\", \"default/kuard\", \"default_kuard_80\", \"foo.io\", 80),\n\t\t\t\t&envoy_cluster_v3.Cluster{\n\t\t\t\t\tTransportSocket: envoy_v3.UpstreamTLSTransportSocket(\n\t\t\t\t\t\tenvoy_v3.UpstreamTLSContext(nil, \"foo.io\", nil),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t),\n\t\t),\n\t})\n}", "func TestIntermediateNameSameNameDifferentLayer(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tth.WriteK(\"gcp\", `\nnamePrefix: gcp-\nresources:\n- ../emea\npatchesStrategicMerge:\n- depPatch.yaml\n`)\n\tth.WriteF(\"gcp/depPatch.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: prod-foo\nspec:\n replicas: 999\n`)\n\tth.WriteK(\"emea\", `\nnamePrefix: emea-\nresources:\n- ../prod\n- deployment.yaml\n`)\n\tth.WriteF(\"emea/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tth.WriteK(\"prod\", `\nnamePrefix: prod-\nresources:\n- ../base\n`)\n\tth.WriteK(\"base\", `\nresources:\n- deployment.yaml\n`)\n\tth.WriteF(\"base/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tm := th.Run(\"gcp\", th.MakeDefaultOptions())\n\tth.AssertActualEqualsExpected(m, `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: gcp-emea-prod-foo\nspec:\n replicas: 999\n template:\n spec:\n containers:\n - image: whatever\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: gcp-emea-foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n}", "func testPortsPoliciesStackedOrUpdated() []*TestStep {\n\tblocked := func() *Reachability {\n\t\tr := NewReachability(allPods, true)\n\t\tr.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\tr.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\treturn r\n\t}\n\n\tunblocked := func() *Reachability {\n\t\treturn NewReachability(allPods, true)\n\t}\n\n\t/***\n\tInitially, only allow port 80, and verify 81 is blocked.\n\t*/\n\tpolicyName := \"policy-that-will-update-for-ports\"\n\tbuilder := &NetworkPolicySpecBuilder{}\n\tbuilder = builder.SetName(\"x\", policyName).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder.SetTypeIngress()\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil)\n\tpolicy1 := builder.Get()\n\n\tbuilder2 := &NetworkPolicySpecBuilder{}\n\t// by preserving the same name, this policy will also serve to test the 'updated policy' scenario.\n\tbuilder2 = builder2.SetName(\"x\", policyName).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder2.SetTypeIngress()\n\tbuilder2.AddIngress(v1.ProtocolTCP, &p81, nil, nil, nil, nil, nil, nil, nil)\n\tpolicy2 := builder2.Get()\n\n\t// The first policy was on port 80, which was allowed, while 81 wasn't.\n\t// The second policy was on port 81, which was allowed.\n\t// At this point, if we stacked, make sure 80 is still unblocked\n\t// Whereas if we DIDNT stack, make sure 80 is blocked.\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Port 81 -- blocked\",\n\t\t\tblocked(), // 81 blocked\n\t\t\tpolicy1,\n\t\t\t81,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Port 81 -- unblocked\",\n\t\t\tunblocked(), // 81 open now\n\t\t\tpolicy2,\n\t\t\t81,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Port 80 -- blocked\",\n\t\t\tblocked(),\n\t\t\tpolicy2,\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t}\n}", "func TestNonExistingRepoOrInvalidPkgVersionGetAvailablePackageDetail(t *testing.T) {\n\tnegativeTestCases := []struct {\n\t\ttestName string\n\t\trequest *corev1.GetAvailablePackageDetailRequest\n\t\trepoName string\n\t\trepoNamespace string\n\t\tstatusCode codes.Code\n\t}{\n\t\t{\n\t\t\ttestName: \"it fails if request has invalid package version\",\n\t\t\trepoName: \"bitnami-1\",\n\t\t\trepoNamespace: \"default\",\n\t\t\trequest: &corev1.GetAvailablePackageDetailRequest{\n\t\t\t\tAvailablePackageRef: availableRef(\"bitnami-1/redis\", \"default\"),\n\t\t\t\tPkgVersion: \"99.99.0\",\n\t\t\t},\n\t\t\tstatusCode: codes.Internal,\n\t\t},\n\t\t{\n\t\t\ttestName: \"it fails if repo does not exist\",\n\t\t\trepoName: \"bitnami-1\",\n\t\t\trepoNamespace: \"default\",\n\t\t\trequest: &corev1.GetAvailablePackageDetailRequest{\n\t\t\t\tAvailablePackageRef: availableRef(\"bitnami-2/redis\", \"default\"),\n\t\t\t},\n\t\t\tstatusCode: codes.NotFound,\n\t\t},\n\t\t{\n\t\t\ttestName: \"it fails if repo does not exist in specified namespace\",\n\t\t\trepoName: \"bitnami-1\",\n\t\t\trepoNamespace: \"non-default\",\n\t\t\trequest: &corev1.GetAvailablePackageDetailRequest{\n\t\t\t\tAvailablePackageRef: availableRef(\"bitnami-1/redis\", \"default\"),\n\t\t\t},\n\t\t\tstatusCode: codes.NotFound,\n\t\t},\n\t\t{\n\t\t\ttestName: \"it fails if request has invalid chart\",\n\t\t\trepoName: \"bitnami-1\",\n\t\t\trepoNamespace: \"default\",\n\t\t\trequest: &corev1.GetAvailablePackageDetailRequest{\n\t\t\t\tAvailablePackageRef: availableRef(\"bitnami-1/redis-123\", \"default\"),\n\t\t\t},\n\t\t\tstatusCode: codes.NotFound,\n\t\t},\n\t}\n\n\tfor _, tc := range negativeTestCases {\n\t\tt.Run(tc.testName, func(t *testing.T) {\n\t\t\treplaceUrls := make(map[string]string)\n\t\t\tcharts := []testSpecChartWithUrl{}\n\t\t\tfor _, s := range redis_charts_spec {\n\t\t\t\ttarGzBytes, err := ioutil.ReadFile(s.tgzFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"%+v\", err)\n\t\t\t\t}\n\t\t\t\t// stand up an http server just for the duration of this test\n\t\t\t\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tw.WriteHeader(200)\n\t\t\t\t\tw.Write(tarGzBytes)\n\t\t\t\t}))\n\t\t\t\tdefer ts.Close()\n\t\t\t\treplaceUrls[fmt.Sprintf(\"{{%s}}\", s.tgzFile)] = ts.URL\n\t\t\t\tc := testSpecChartWithUrl{\n\t\t\t\t\tchartID: fmt.Sprintf(\"%s/%s\", tc.repoName, s.name),\n\t\t\t\t\tchartRevision: s.revision,\n\t\t\t\t\tchartUrl: ts.URL,\n\t\t\t\t\trepoNamespace: tc.repoNamespace,\n\t\t\t\t}\n\t\t\t\tcharts = append(charts, c)\n\t\t\t}\n\n\t\t\tts2, repo, err := newRepoWithIndex(\n\t\t\t\ttestYaml(\"redis-two-versions.yaml\"), tc.repoName, tc.repoNamespace, replaceUrls, \"\")\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"%+v\", err)\n\t\t\t}\n\t\t\tdefer ts2.Close()\n\n\t\t\ts, mock, err := newServerWithRepos(t, []sourcev1.HelmRepository{*repo}, charts, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"%+v\", err)\n\t\t\t}\n\n\t\t\trequestRepoName := strings.Split(tc.request.AvailablePackageRef.Identifier, \"/\")[0]\n\t\t\trequestRepoNamespace := tc.request.AvailablePackageRef.Context.Namespace\n\n\t\t\trepoExists := requestRepoName == tc.repoName && requestRepoNamespace == tc.repoNamespace\n\t\t\tif repoExists {\n\t\t\t\ts.redisMockExpectGetFromRepoCache(mock, nil, *repo)\n\t\t\t\trequestChartName := strings.Split(tc.request.AvailablePackageRef.Identifier, \"/\")[1]\n\t\t\t\tchartExists := requestChartName == \"redis\"\n\t\t\t\tif chartExists {\n\t\t\t\t\tchartCacheKey, err := s.chartCache.KeyFor(\n\t\t\t\t\t\trequestRepoNamespace,\n\t\t\t\t\t\ttc.request.AvailablePackageRef.Identifier,\n\t\t\t\t\t\ttc.request.PkgVersion)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"%+v\", err)\n\t\t\t\t\t}\n\t\t\t\t\t// on a cache miss (there will be actually two calls to Redis GET based on current code path)\n\t\t\t\t\tfor i := 0; i < 2; i++ {\n\t\t\t\t\t\tif err = redisMockExpectGetFromChartCache(mock, chartCacheKey, \"\", nil); err != nil {\n\t\t\t\t\t\t\tt.Fatalf(\"%+v\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresponse, err := s.GetAvailablePackageDetail(context.Background(), tc.request)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"got nil, want error\")\n\t\t\t}\n\t\t\tif got, want := status.Code(err), tc.statusCode; got != want {\n\t\t\t\tt.Fatalf(\"got: %+v, want: %+v, err: %+v\", got, want, err)\n\t\t\t}\n\n\t\t\tif err = mock.ExpectationsWereMet(); err != nil {\n\t\t\t\tt.Fatalf(\"%v\", err)\n\t\t\t}\n\n\t\t\tif response != nil {\n\t\t\t\tt.Fatalf(\"want nil, got %v\", response)\n\t\t\t}\n\t\t})\n\t}\n}", "func (s *StatusSyncer) shouldTargetIngress(ingress *knetworking.Ingress) bool {\n\tvar ingressClass *knetworking.IngressClass\n\tif ingress.Spec.IngressClassName != nil {\n\t\tingressClass = s.ingressClasses.Get(*ingress.Spec.IngressClassName, \"\")\n\t}\n\treturn shouldProcessIngressWithClass(s.meshConfig.Mesh(), ingress, ingressClass)\n}", "func TestCreateRetryConflictTagDiff(t *testing.T) {\n\tfirstGet := true\n\tfirstUpdate := true\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first get, return a stream with a latest tag pointing to \"original\"\n\t\t\t\tif firstGet {\n\t\t\t\t\tfirstGet = false\n\t\t\t\t\tstream := validImageStream()\n\t\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:original\"}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\treturn stream, nil\n\t\t\t\t}\n\t\t\t\t// For subsequent gets, return a stream with the latest tag changed to \"newer\"\n\t\t\t\tstream := validImageStream()\n\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:newer\"}}},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\treturn stream, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first update, return a conflict so that the stream\n\t\t\t\t// get/compare is retried.\n\t\t\t\tif firstUpdate {\n\t\t\t\t\tfirstUpdate = false\n\t\t\t\t\treturn nil, errors.NewConflict(imagegroup.Resource(\"imagestreams\"), repo.Name, fmt.Errorf(\"resource modified\"))\n\t\t\t\t}\n\t\t\t\treturn repo, nil\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err == nil {\n\t\tt.Fatalf(\"expected an error\")\n\t}\n\tif !errors.IsConflict(err) {\n\t\tt.Errorf(\"expected a conflict error, got %v\", err)\n\t}\n\tif obj != nil {\n\t\tt.Fatalf(\"expected a nil result\")\n\t}\n}", "func updateIngressControllerSpecWithRetryOnConflict(t *testing.T, name types.NamespacedName, timeout time.Duration, mutateSpecFn func(*operatorv1.IngressControllerSpec)) error {\n\tic := operatorv1.IngressController{}\n\treturn wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), name, &ic); err != nil {\n\t\t\tt.Logf(\"error getting ingress controller %v: %v, retrying...\", name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tmutateSpecFn(&ic.Spec)\n\t\tif err := kclient.Update(context.TODO(), &ic); err != nil {\n\t\t\tif errors.IsConflict(err) {\n\t\t\t\tt.Logf(\"conflict when updating ingress controller %v: %v, retrying...\", name, err)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, nil\n\t})\n}", "func TestTransfomersImageDefaultConfig(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tmakeTransfomersImageBase(th)\n\tm := th.Run(\"base\", th.MakeDefaultOptions())\n\tth.AssertActualEqualsExpected(m, `\napiVersion: v1\ngroup: apps\nkind: Deployment\nmetadata:\n name: deploy1\nspec:\n template:\n spec:\n containers:\n - image: nginx:v2\n name: ngnix\n - image: foobar@sha256:24a0c4b4\n name: repliaced-with-digest\n - image: my-postgres:v3\n name: postgresdb\n initContainers:\n - image: my-nginx:previous\n name: nginx2\n - image: myprivaterepohostname:1234/my/cool-alpine:1.8.0\n name: init-alpine\n---\nkind: randomKind\nmetadata:\n name: random\nspec:\n template:\n spec:\n containers:\n - image: nginx:v2\n name: ngnix1\nspec2:\n template:\n spec:\n containers:\n - image: nginx:v2\n name: nginx3\n - image: my-nginx:previous\n name: nginx4\nspec3:\n template:\n spec:\n initContainers:\n - image: my-postgres:v3\n name: postgresdb\n - image: my-docker@sha256:25a0d4b4\n name: init-docker\n - image: myprivaterepohostname:1234/my/image:v1.0.1\n name: myImage\n - image: myprivaterepohostname:1234/my/image:v1.0.1\n name: myImage2\n - image: my-app-image:v1\n name: my-app\n - image: my-cool-app:latest\n name: my-cool-app\n`)\n}", "func TestAWSIAMNoPermissions(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\n\t// Create unauthorized mocks for AWS services.\n\tstsClient := &mocks.STSMock{\n\t\tARN: \"arn:aws:iam::123456789012:role/test-role\",\n\t}\n\t// Make configurator.\n\tconfigurator, err := NewIAM(ctx, IAMConfig{\n\t\tAccessPoint: &mockAccessPoint{},\n\t\tClients: &clients.TestCloudClients{}, // placeholder,\n\t\tHostID: \"host-id\",\n\t})\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname string\n\t\tmeta types.AWS\n\t\tclients clients.Clients\n\t}{\n\t\t{\n\t\t\tname: \"RDS database\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", RDS: types.RDS{InstanceID: \"postgres-rds\", ResourceID: \"postgres-rds-resource-id\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRDS: &mocks.RDSMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Aurora cluster\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", RDS: types.RDS{ClusterID: \"postgres-aurora\", ResourceID: \"postgres-aurora-resource-id\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRDS: &mocks.RDSMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"RDS database missing metadata\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", RDS: types.RDS{ClusterID: \"postgres-aurora\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRDS: &mocks.RDSMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Redshift cluster\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", Redshift: types.Redshift{ClusterID: \"redshift-cluster-1\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRedshift: &mocks.RedshiftMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ElastiCache\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", ElastiCache: types.ElastiCache{ReplicationGroupID: \"some-group\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\t// As of writing this API won't be called by the configurator anyway,\n\t\t\t\t// but might as well provide it in case that changes.\n\t\t\t\tElastiCache: &mocks.ElastiCacheMock{Unauth: true},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"IAM UnmodifiableEntityException\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", Redshift: types.Redshift{ClusterID: \"redshift-cluster-1\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRedshift: &mocks.RedshiftMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: awserr.New(iam.ErrCodeUnmodifiableEntityException, \"unauthorized\", fmt.Errorf(\"unauthorized\")),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t// Update cloud clients.\n\t\t\tconfigurator.cfg.Clients = test.clients\n\n\t\t\tdatabase, err := types.NewDatabaseV3(types.Metadata{\n\t\t\t\tName: \"test\",\n\t\t\t}, types.DatabaseSpecV3{\n\t\t\t\tProtocol: defaults.ProtocolPostgres,\n\t\t\t\tURI: \"localhost\",\n\t\t\t\tAWS: test.meta,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Make sure there're no errors trying to setup/destroy IAM.\n\t\t\terr = configurator.processTask(ctx, iamTask{\n\t\t\t\tisSetup: true,\n\t\t\t\tdatabase: database,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = configurator.UpdateIAMStatus(database)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, types.IAMPolicyStatus_IAM_POLICY_STATUS_FAILED, database.GetAWS().IAMPolicyStatus, \"must be invalid because of perm issues\")\n\n\t\t\terr = configurator.processTask(ctx, iamTask{\n\t\t\t\tisSetup: false,\n\t\t\t\tdatabase: database,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = configurator.UpdateIAMStatus(database)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, types.IAMPolicyStatus_IAM_POLICY_STATUS_UNSPECIFIED, database.GetAWS().IAMPolicyStatus, \"must be unspecified, task is tearing down\")\n\t\t})\n\t}\n}", "func (m *MockNotary) Notarize(arg0 string) (map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Notarize\", arg0)\n\tret0, _ := ret[0].(map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestValidate1(t *testing.T) {\n\tendpoints := make(map[string]map[string]*Endpoint)\n\tendpoints[\"/test\"] = map[string]*Endpoint{\n\t\t\"get\": {\n\t\t\tParams: &Parameters{\n\t\t\t\tQuery: map[string]*ParamEntry{\"test\": {Type: \"string\", Required: true}},\n\t\t\t\tPath: map[string]*ParamEntry{\"test\": {Type: \"boolean\", Required: true}},\n\t\t\t},\n\t\t\tRecieves: &Recieves{\n\t\t\t\tHeaders: map[string]string{\"foo\": \"bar\"},\n\t\t\t\tBody: map[string]string{\"example_array.0.foo\": \"string\"},\n\t\t\t},\n\t\t\tResponses: map[int]*Response{\n\t\t\t\t200: {\n\t\t\t\t\tHeaders: map[string]string{\"foo\": \"bar\"},\n\t\t\t\t\tBody: map[string]interface{}{\"bar\": \"foo\"},\n\t\t\t\t\tWeight: 100,\n\t\t\t\t\tActions: []map[string]interface{}{\n\t\t\t\t\t\t{\"delay\": 10},\n\t\t\t\t\t\t{\"request\": map[interface{}]interface{}{\"target\": \"testService\", \"id\": \"testRequest\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tActions: []map[string]interface{}{\n\t\t\t\t{\"delay\": 10},\n\t\t\t\t{\"request\": map[interface{}]interface{}{\"target\": \"testService\", \"id\": \"testRequest\"}},\n\t\t\t},\n\t\t},\n\t}\n\n\tcfg := &Config{\n\t\tVersion: 1.0,\n\t\tServices: map[string]*Service{\n\t\t\t\"testService\": {Hostname: \"localhost\", Port: 8080},\n\t\t},\n\t\tStartupActions: []map[string]interface{}{\n\t\t\t{\"delay\": 10},\n\t\t\t{\"request\": map[interface{}]interface{}{\"target\": \"testService\", \"id\": \"testRequest\"}},\n\t\t},\n\t\tRequests: map[string]*Request{\n\t\t\t\"testRequest\": {\n\t\t\t\tURL: \"/test\",\n\t\t\t\tProtocol: \"http\",\n\t\t\t\tMethod: \"get\",\n\t\t\t\tHeaders: map[string]string{\"foo\": \"bar\"},\n\t\t\t\tBody: nil,\n\t\t\t\tExpectedResponse: &Response{\n\t\t\t\t\tStatusCode: 200,\n\t\t\t\t\tBody: map[string]interface{}{\"foo.bar\": \"string\"},\n\t\t\t\t\tHeaders: nil,\n\t\t\t\t\tWeight: 100,\n\t\t\t\t\tActions: []map[string]interface{}{\n\t\t\t\t\t\t{\"delay\": 10},\n\t\t\t\t\t\t{\"request\": map[interface{}]interface{}{\"target\": \"testService\", \"id\": \"testRequest\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tEndpoints: endpoints,\n\t}\n\n\tif err := Validate(cfg); err != nil {\n\t\tt.Errorf(\"Validation Failed: %s\", err.Error())\n\t}\n}", "func (m *CloudWatchLogsServiceMock) CreateNewServiceIfUnHealthy() {\n\n}", "func ingressControllerExists(ctx context.Context, h *helper.H, ingressControllerName string, shouldexist bool) {\n\t_, err := h.Dynamic().\n\t\tResource(schema.GroupVersionResource{Group: \"operator.openshift.io\", Version: \"v1\", Resource: \"ingresscontrollers\"}).\n\t\tNamespace(\"openshift-ingress-operator\").\n\t\tGet(ctx, ingressControllerName, metav1.GetOptions{})\n\tif shouldexist {\n\t\tExpect(err).NotTo(HaveOccurred())\n\t} else {\n\t\tExpect(err).Should(MatchError(fmt.Sprintf(\"ingresscontrollers.operator.openshift.io \\\"%v\\\" not found\", ingressControllerName)))\n\t}\n}", "func TestDisablePool(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\tpoolA.Spec.Disabled = true\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].externallyDisabled {\n\t\tt.Fatal(\"The range has not been externally disabled\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected service status update to occur on service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolA.Spec.Disabled = false\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestExcludePrefixesInjection(t *testing.T) {\n\tg := gomega.NewGomegaWithT(t)\n\n\trequest := buildRequest()\n\tconn, err := doRequest(g, request)\n\n\tg.Expect(err).To(gomega.BeNil())\n\tconnPrefixes := conn.GetContext().GetIpContext().GetExcludedPrefixes()\n\tg.Expect(connPrefixes).To(gomega.ConsistOf(prefixes))\n}", "func testPodLabelAllowTrafficFromBToA() []*TestStep {\n\tbuilder := &NetworkPolicySpecBuilder{}\n\tbuilder = builder.SetName(\"x\", \"allow-client-a-via-pod-selector\").SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder.SetTypeIngress()\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{\"pod\": \"b\"}, map[string]string{\"ns\": \"x\"}, nil, nil)\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{\"pod\": \"b\"}, map[string]string{\"ns\": \"y\"}, nil, nil)\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{\"pod\": \"b\"}, map[string]string{\"ns\": \"z\"}, nil, nil)\n\n\treachability := func() *Reachability {\n\t\treachability := NewReachability(allPods, true)\n\t\treachability.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\treachability.Expect(Pod(\"x/b\"), Pod(\"x/a\"), true)\n\t\treachability.Expect(Pod(\"y/b\"), Pod(\"x/a\"), true)\n\t\treachability.Expect(Pod(\"z/b\"), Pod(\"x/a\"), true)\n\t\treachability.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\treturn reachability\n\t}\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Port 80\",\n\t\t\treachability(),\n\t\t\tbuilder.Get(),\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t}\n}", "func (m *MockUnsafeLinkServiceServer) mustEmbedUnimplementedLinkServiceServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedLinkServiceServer\")\n}", "func Test_testHealth(t *testing.T) {\n\tinitMetadata() // Used from metadata_test.go\n\n\tport := \"80\"\n\tunhealthyTHDs := []*elbv2.TargetHealthDescription{}\n\thealthyTHDs := []*elbv2.TargetHealthDescription{\n\t\t{\n\t\t\tHealthCheckPort: &port,\n\t\t},\n\t}\n\ttgArn := \"arn:1234\"\n\tcontainerID := \"123123412\"\n\tinvalidContainerID := \"111111\"\n\n\tsetupCache(\"123123412\", \"instance-123\", \"correct-lb-dnsname\", 1234, 9001, tgArn, unhealthyTHDs)\n\n\tt.Run(\"Should return STARTING because of unhealthy targets\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, unhealthyTHDs)\n\t\tvar previousStatus fargo.StatusType\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.STARTING\n\t\twantedNow := fargo.STARTING\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of healthy targets 1\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should fail gracefully\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.STARTING\n\t\twantedNow := fargo.UNKNOWN\n\n\t\tchange := determineNewEurekaStatus(invalidContainerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of eureka status\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, unhealthyTHDs)\n\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UP\n\t\twantedReg := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wantedReg {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wantedReg, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of healthy targets 2\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\n\t\tpreviousStatus := fargo.STARTING\n\t\teurekaStatus := fargo.STARTING\n\t\twantedReg := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wantedReg {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wantedReg, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n}", "func TestGetCanGetAlldata(t *testing.T) {\n\ts1, closeFn1, clientset1, configFilename1, s2, closeFn2, clientset2, configFilename2 := setUpTwoApiservers(t)\n\tdefer deleteSinglePartitionConfigFile(t, configFilename1)\n\tdefer deleteSinglePartitionConfigFile(t, configFilename2)\n\tdefer closeFn1()\n\tdefer closeFn2()\n\n\t// create pods via 2 different api servers\n\tpod1 := createPod(t, clientset1, tenant1, \"te\", \"pod1\")\n\tdefer framework.DeleteTestingTenant(tenant1, s1, t)\n\tpod2 := createPod(t, clientset2, tenant2, \"te\", \"pod1\")\n\tdefer framework.DeleteTestingTenant(tenant2, s2, t)\n\tassert.NotNil(t, pod1)\n\tassert.NotNil(t, pod2)\n\tassert.NotEqual(t, pod1.UID, pod2.UID)\n\n\t// verify get pod with same api server\n\treadPod1, err := clientset1.CoreV1().PodsWithMultiTenancy(pod1.Namespace, pod1.Tenant).Get(pod1.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get pod 1 from same clientset\")\n\tassert.NotNil(t, readPod1)\n\treadPod2, err := clientset2.CoreV1().PodsWithMultiTenancy(pod2.Namespace, pod2.Tenant).Get(pod2.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get pod 2 from same clientset\")\n\tassert.NotNil(t, readPod2)\n\n\t// verify get pod through different api server\n\treadPod1, err = clientset2.CoreV1().PodsWithMultiTenancy(pod1.Namespace, pod1.Tenant).Get(pod1.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get pod 1 from different clientset\")\n\tif err == nil {\n\t\tcheckPodEquality(t, pod1, readPod1)\n\t}\n\treadPod2, err = clientset1.CoreV1().PodsWithMultiTenancy(pod2.Namespace, pod2.Tenant).Get(pod2.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get pod 2 from different clientset\")\n\tif err == nil {\n\t\tcheckPodEquality(t, pod2, readPod2)\n\t}\n\n\t// create replicaset via 2 different api servers\n\trs1 := createRS(t, clientset1, tenant1, \"rs1\", \"default\", 1)\n\trs2 := createRS(t, clientset2, tenant2, \"rs2\", \"default\", 1)\n\tassert.NotNil(t, rs1)\n\tassert.NotNil(t, rs2)\n\tassert.NotEqual(t, rs1.UID, rs2.UID)\n\n\t// verify get rs through different api server\n\treadRs1, err := clientset2.AppsV1().ReplicaSetsWithMultiTenancy(rs1.Namespace, rs1.Tenant).Get(rs1.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get rs 1 from different clientset\")\n\tif err == nil {\n\t\tcheckRSEquality(t, rs1, readRs1)\n\t}\n\treadRs2, err := clientset1.AppsV1().ReplicaSetsWithMultiTenancy(rs2.Namespace, rs2.Tenant).Get(rs2.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get rs 2 from different clientset\")\n\tif err == nil {\n\t\tcheckRSEquality(t, rs2, readRs2)\n\t}\n\n\t// tear down\n\tdeletePod(t, clientset1, pod1)\n\tdeletePod(t, clientset1, pod2)\n\tdeleteRS(t, clientset2, rs1)\n\tdeleteRS(t, clientset2, rs2)\n}", "func (m *MockUnsafeTodoServiceServer) mustEmbedUnimplementedTodoServiceServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedTodoServiceServer\")\n}", "func (s *HealthCheckSuite) TestMultipleRoutersOnSameService(c *check.C) {\n\tfile := s.adaptFile(c, \"fixtures/healthcheck/multiple-routers-one-same-service.toml\", struct {\n\t\tServer1 string\n\t}{s.whoami1IP})\n\tdefer os.Remove(file)\n\n\tcmd, display := s.traefikCmd(withConfigFile(file))\n\tdefer display(c)\n\terr := cmd.Start()\n\tc.Assert(err, checker.IsNil)\n\tdefer s.killCmd(cmd)\n\n\t// wait for traefik\n\terr = try.GetRequest(\"http://127.0.0.1:8080/api/rawdata\", 60*time.Second, try.BodyContains(\"Host(`test.localhost`)\"))\n\tc.Assert(err, checker.IsNil)\n\n\t// Set whoami health to 200 to be sure to start with the wanted status\n\tclient := &http.Client{}\n\tstatusOkReq, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"200\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusOkReq)\n\tc.Assert(err, checker.IsNil)\n\n\t// check healthcheck on web1 entrypoint\n\thealthReqWeb1, err := http.NewRequest(http.MethodGet, \"http://127.0.0.1:8000/health\", nil)\n\tc.Assert(err, checker.IsNil)\n\thealthReqWeb1.Host = \"test.localhost\"\n\terr = try.Request(healthReqWeb1, 1*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\t// check healthcheck on web2 entrypoint\n\thealthReqWeb2, err := http.NewRequest(http.MethodGet, \"http://127.0.0.1:9000/health\", nil)\n\tc.Assert(err, checker.IsNil)\n\thealthReqWeb2.Host = \"test.localhost\"\n\n\terr = try.Request(healthReqWeb2, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\t// Set whoami health to 500\n\tstatusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"500\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusInternalServerErrorReq)\n\tc.Assert(err, checker.IsNil)\n\n\t// Verify no backend service is available due to failing health checks\n\terr = try.Request(healthReqWeb1, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))\n\tc.Assert(err, checker.IsNil)\n\n\terr = try.Request(healthReqWeb2, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))\n\tc.Assert(err, checker.IsNil)\n\n\t// Change one whoami health to 200\n\tstatusOKReq1, err := http.NewRequest(http.MethodPost, \"http://\"+s.whoami1IP+\"/health\", bytes.NewBuffer([]byte(\"200\")))\n\tc.Assert(err, checker.IsNil)\n\t_, err = client.Do(statusOKReq1)\n\tc.Assert(err, checker.IsNil)\n\n\t// Verify health check\n\terr = try.Request(healthReqWeb1, 3*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n\n\terr = try.Request(healthReqWeb2, 3*time.Second, try.StatusCodeIs(http.StatusOK))\n\tc.Assert(err, checker.IsNil)\n}", "func TestL7ModelSNI(t *testing.T) {\n\tg := gomega.NewGomegaWithT(t)\n\tintegrationtest.AddSecret(\"my-secret\", \"default\", \"tlsCert\", \"tlsKey\")\n\tmodelName := \"admin/cluster--Shared-L7-0\"\n\tSetUpTestForIngress(t, modelName)\n\n\tintegrationtest.PollForCompletion(t, modelName, 5)\n\n\t// foo.com and noo.com compute the same hashed shard vs num\n\tingrFake := (integrationtest.FakeIngress{\n\t\tName: \"foo-with-targets\",\n\t\tNamespace: \"default\",\n\t\tDnsNames: []string{\"foo.com\", \"noo.com\"},\n\t\tIps: []string{\"8.8.8.8\"},\n\t\tHostNames: []string{\"v1\"},\n\t\tTlsSecretDNS: map[string][]string{\n\t\t\t\"my-secret\": []string{\"foo.com\"},\n\t\t},\n\t\tServiceName: \"avisvc\",\n\t}).Ingress()\n\n\t_, err := KubeClient.ExtensionsV1beta1().Ingresses(\"default\").Create(ingrFake)\n\tif err != nil {\n\t\tt.Fatalf(\"error in adding Ingress: %v\", err)\n\t}\n\tintegrationtest.PollForCompletion(t, modelName, 5)\n\tfound, aviModel := objects.SharedAviGraphLister().Get(modelName)\n\tif found {\n\t\tnodes := aviModel.(*avinodes.AviObjectGraph).GetAviVS()\n\t\tg.Expect(len(nodes)).To(gomega.Equal(1))\n\t\tg.Expect(nodes[0].Name).To(gomega.ContainSubstring(\"Shared-L7\"))\n\t\tg.Expect(nodes[0].Tenant).To(gomega.Equal(\"admin\"))\n\t\tg.Expect(nodes[0].PoolRefs).To(gomega.HaveLen(1))\n\t\tg.Expect(nodes[0].PoolRefs[0].Name).To(gomega.ContainSubstring(\"noo.com\"))\n\t\tg.Expect(nodes[0].HttpPolicyRefs).To(gomega.HaveLen(1)) // redirect http->https policy\n\t\tg.Expect(nodes[0].HttpPolicyRefs[0].RedirectPorts[0].Hosts[0]).To(gomega.Equal(\"foo.com\"))\n\n\t\tg.Expect(nodes[0].SniNodes[0].VHDomainNames[0]).To(gomega.Equal(\"foo.com\"))\n\t\tg.Expect(len(nodes[0].SniNodes)).To(gomega.Equal(1))\n\t\tg.Expect(len(nodes[0].SniNodes[0].PoolGroupRefs)).To(gomega.Equal(1))\n\t\tg.Expect(len(nodes[0].SniNodes[0].HttpPolicyRefs)).To(gomega.Equal(1))\n\t\tg.Expect(nodes[0].SniNodes[0].PoolRefs[0].Name).To(gomega.ContainSubstring(\"foo.com\"))\n\t\tg.Expect(len(nodes[0].SniNodes[0].PoolRefs)).To(gomega.Equal(1))\n\t\tg.Expect(len(nodes[0].SniNodes[0].PoolRefs[0].Servers)).To(gomega.Equal(1))\n\t\tg.Expect(len(nodes[0].SniNodes[0].SSLKeyCertRefs)).To(gomega.Equal(1))\n\t} else {\n\t\tt.Fatalf(\"Could not find Model: %v\", err)\n\t}\n\terr = KubeClient.ExtensionsV1beta1().Ingresses(\"default\").Delete(\"foo-with-targets\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't DELETE the Ingress %v\", err)\n\t}\n\tKubeClient.CoreV1().Secrets(\"default\").Delete(\"my-secret\", nil)\n\tVerifySNIIngressDeletion(t, g, aviModel, 0)\n\n\tTearDownTestForIngress(t, modelName)\n}", "func TestAllocHappyPath(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\", \"FF::0/48\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Unexpected condition type assigned to service\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Unexpected condition status assigned to service\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tsvc, err := fixture.svcClient.Services(\"default\").Get(context.Background(), \"service-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Switch to requesting an IPv6 address\n\tsvc.Spec.IPFamilies = []slim_core_v1.IPFamily{\n\t\tslim_core_v1.IPv6Protocol,\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\t// The second update allocates the new IPv6\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() != nil {\n\t\t\tt.Error(\"Expected service to receive a IPv6 address\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), svc, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update after update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\t// Allow time for additional events to fire\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsvc, err = fixture.svcClient.Services(\"default\").Get(context.Background(), \"service-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Switch back to requesting an IPv4 address\n\tsvc.Spec.IPFamilies = []slim_core_v1.IPFamily{\n\t\tslim_core_v1.IPv4Protocol,\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action.(k8s_testing.PatchAction))\n\n\t\t// The second update allocates the new IPv4\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), svc, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update after update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n}", "func TestSendChangesToNoConflictPreHydrogenTarget(t *testing.T) {\n\tt.Skip(\"Test is only for development purposes\")\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\terrorCountBefore := base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value()\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tAllowConflicts: false,\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t})\n\tdefer rt1.Close()\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\tsrv := httptest.NewTLSServer(rt2.TestAdminHandler())\n\tdefer srv.Close()\n\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\tID: \"test\",\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tInsecureSkipVerify: true,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t})\n\n\tdefer func() {\n\t\trequire.NoError(t, ar.Stop())\n\t}()\n\trequire.NoError(t, ar.Start())\n\n\tassert.Equal(t, errorCountBefore, base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value())\n\n\tresponse := rt1.SendAdminRequest(\"PUT\", \"/db/doc1\", \"{}\")\n\tassertStatus(t, response, http.StatusCreated)\n\n\terr = rt2.WaitForCondition(func() bool {\n\t\tif base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value() == errorCountBefore+1 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, db.ReplicationStateStopped, ar.GetStatus().Status)\n\tassert.Equal(t, db.PreHydrogenTargetAllowConflictsError.Error(), ar.GetStatus().ErrorMessage)\n}", "func (options *ConformanceTestOptions) ShouldTestIngressOfType(t string) bool {\n\treturn indexOf(options.TestCase.Ingress.IngressConfig.Controllers, t) > -1\n}", "func (m *MockUseCase) UnlikeActor(arg0 string, arg1 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UnlikeActor\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func testOpSrcWithNonexistentRegistryNamespace(t *testing.T) {\n\topSrcName := \"nonexistent-namespace-opsrc\"\n\t// validURL is a valid endpoint for the OperatorSource\n\tvalidURL := \"https://quay.io/cnr\"\n\n\t// nonexistentRegistryNamespace is a namespace that does not exist\n\t// on the app registry\n\tnonexistentRegistryNamespace := \"not-existent-namespace\"\n\n\tctx := test.NewTestCtx(t)\n\tdefer ctx.Cleanup()\n\n\t// Get global framework variables\n\tclient := test.Global.Client\n\n\t// Get test namespace\n\tnamespace, err := ctx.GetNamespace()\n\trequire.NoError(t, err, \"Could not get namespace\")\n\tnonexistentRegistryNamespaceOperatorSource := &operator.OperatorSource{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: operator.OperatorSourceKind,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: opSrcName,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: operator.OperatorSourceSpec{\n\t\t\tType: endpointType,\n\t\t\tEndpoint: validURL,\n\t\t\tRegistryNamespace: nonexistentRegistryNamespace,\n\t\t},\n\t}\n\terr = helpers.CreateRuntimeObject(client, ctx, nonexistentRegistryNamespaceOperatorSource)\n\trequire.NoError(t, err, \"Could not create OperatorSource\")\n\n\t// Check that OperatorSource reaches \"Failed\" state eventually\n\tresultOperatorSource := &operator.OperatorSource{}\n\texpectedPhase := \"Failed\"\n\terr = wait.Poll(helpers.RetryInterval, helpers.Timeout, func() (bool, error) {\n\t\terr = helpers.WaitForResult(client, resultOperatorSource, namespace, opSrcName)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif resultOperatorSource.Status.CurrentPhase.Name == expectedPhase &&\n\t\t\tstrings.Contains(resultOperatorSource.Status.CurrentPhase.Message, \"The OperatorSource endpoint returned an empty manifest list\") {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tassert.NoError(t, err, fmt.Sprintf(\"OperatorSource never reached expected phase/message, expected %v\", expectedPhase))\n}", "func (m *MockUnsafeDocumentAnnotatorServer) mustEmbedUnimplementedDocumentAnnotatorServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedDocumentAnnotatorServer\")\n}", "func Test_ConvertingToExternalNameServiceDropsInternalTrafficPolicy(t *testing.T) {\n\tserver := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())\n\tdefer server.TearDownFn()\n\n\tclient, err := clientset.NewForConfig(server.ClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating clientset: %v\", err)\n\t}\n\n\tns := framework.CreateNamespaceOrDie(client, \"test-external-name-drops-internal-traffic-policy\", t)\n\tdefer framework.DeleteNamespaceOrDie(client, ns, t)\n\n\tservice := &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"test-123\",\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tType: corev1.ServiceTypeClusterIP,\n\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\tPort: int32(80),\n\t\t\t}},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"foo\": \"bar\",\n\t\t\t},\n\t\t},\n\t}\n\n\tservice, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating test service: %v\", err)\n\t}\n\n\tif *service.Spec.InternalTrafficPolicy != corev1.ServiceInternalTrafficPolicyCluster {\n\t\tt.Error(\"service internalTrafficPolicy was not set for clusterIP Service\")\n\t}\n\n\tnewService := service.DeepCopy()\n\tnewService.Spec.Type = corev1.ServiceTypeExternalName\n\tnewService.Spec.ExternalName = \"foo.bar.com\"\n\n\tservice, err = client.CoreV1().Services(ns.Name).Update(context.TODO(), newService, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error getting service: %v\", err)\n\t}\n\n\tif service.Spec.InternalTrafficPolicy != nil {\n\t\tt.Errorf(\"service internalTrafficPolicy should be droppped but is set: %v\", service.Spec.InternalTrafficPolicy)\n\t}\n\n\tservice, err = client.CoreV1().Services(ns.Name).Get(context.TODO(), service.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error getting service: %v\", err)\n\t}\n\n\tif service.Spec.InternalTrafficPolicy != nil {\n\t\tt.Errorf(\"service internalTrafficPolicy should be droppped but is set: %v\", service.Spec.InternalTrafficPolicy)\n\t}\n}", "func TestNonMatchingLBClass(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tlbClass := \"net.example/some-other-class\"\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerClass: &lbClass,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"Unexpected patch to a service\")\n\n\t\treturn true\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n}", "func (m *MockUnsafePdfServiceServer) mustEmbedUnimplementedPdfServiceServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedPdfServiceServer\")\n}", "func (m *MockIDistributedEnforcer) EnforceEx(arg0 ...interface{}) (bool, []string, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range arg0 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"EnforceEx\", varargs...)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].([]string)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (s *k8sStore) ingressIsValid(ing *networkingv1.Ingress) bool {\n\tvar endpointKey string\n\tif ing.Spec.DefaultBackend != nil { // stream\n\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, ing.Spec.DefaultBackend.Service.Name)\n\t} else { // http\n\tLoop:\n\t\tfor _, rule := range ing.Spec.Rules {\n\t\t\tfor _, path := range rule.IngressRuleValue.HTTP.Paths {\n\t\t\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, path.Backend.Service.Name)\n\t\t\t\tif endpointKey != \"\" {\n\t\t\t\t\tbreak Loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\titem, exists, err := s.listers.Endpoint.GetByKey(endpointKey)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can not get endpoint by key(%s): %v\", endpointKey, err)\n\t\treturn false\n\t}\n\tif !exists {\n\t\tlogrus.Debugf(\"Endpoint %s does not exist.\", endpointKey)\n\t\treturn false\n\t}\n\tendpoint, ok := item.(*corev1.Endpoints)\n\tif !ok {\n\t\tlogrus.Errorf(\"Cant not convert %v to %v\", reflect.TypeOf(item), reflect.TypeOf(endpoint))\n\t\treturn false\n\t}\n\tif len(endpoint.Subsets) == 0 {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\tif !hasReadyAddresses(endpoint) {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func Test_ExternalNameServiceDropsInternalTrafficPolicy(t *testing.T) {\n\tserver := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())\n\tdefer server.TearDownFn()\n\n\tclient, err := clientset.NewForConfig(server.ClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating clientset: %v\", err)\n\t}\n\n\tns := framework.CreateNamespaceOrDie(client, \"test-external-name-drops-internal-traffic-policy\", t)\n\tdefer framework.DeleteNamespaceOrDie(client, ns, t)\n\n\tinternalTrafficPolicy := corev1.ServiceInternalTrafficPolicyCluster\n\tservice := &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"test-123\",\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tType: corev1.ServiceTypeExternalName,\n\t\t\tExternalName: \"foo.bar.com\",\n\t\t\tInternalTrafficPolicy: &internalTrafficPolicy,\n\t\t},\n\t}\n\n\tservice, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating test service: %v\", err)\n\t}\n\n\tif service.Spec.InternalTrafficPolicy != nil {\n\t\tt.Errorf(\"service internalTrafficPolicy should be droppped but is set: %v\", service.Spec.InternalTrafficPolicy)\n\t}\n\n\tservice, err = client.CoreV1().Services(ns.Name).Get(context.TODO(), service.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error getting service: %v\", err)\n\t}\n\n\tif service.Spec.InternalTrafficPolicy != nil {\n\t\tt.Errorf(\"service internalTrafficPolicy should be droppped but is set: %v\", service.Spec.InternalTrafficPolicy)\n\t}\n}", "func TestBeforeUpdate(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\ttweakSvc func(oldSvc, newSvc *api.Service) // given basic valid services, each test case can customize them\n\t\texpectErr bool\n\t}{\n\t\t{\n\t\t\tname: \"no change\",\n\t\t\ttweakSvc: func(oldSvc, newSvc *api.Service) {\n\t\t\t\t// nothing\n\t\t\t},\n\t\t\texpectErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"change port\",\n\t\t\ttweakSvc: func(oldSvc, newSvc *api.Service) {\n\t\t\t\tnewSvc.Spec.Ports[0].Port++\n\t\t\t},\n\t\t\texpectErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad namespace\",\n\t\t\ttweakSvc: func(oldSvc, newSvc *api.Service) {\n\t\t\t\tnewSvc.Namespace = \"#$%%invalid\"\n\t\t\t},\n\t\t\texpectErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"change name\",\n\t\t\ttweakSvc: func(oldSvc, newSvc *api.Service) {\n\t\t\t\tnewSvc.Name += \"2\"\n\t\t\t},\n\t\t\texpectErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"change ClusterIP\",\n\t\t\ttweakSvc: func(oldSvc, newSvc *api.Service) {\n\t\t\t\toldSvc.Spec.ClusterIP = \"1.2.3.4\"\n\t\t\t\tnewSvc.Spec.ClusterIP = \"4.3.2.1\"\n\t\t\t},\n\t\t\texpectErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"change selectpor\",\n\t\t\ttweakSvc: func(oldSvc, newSvc *api.Service) {\n\t\t\t\tnewSvc.Spec.Selector = map[string]string{\"newkey\": \"newvalue\"}\n\t\t\t},\n\t\t\texpectErr: false,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\toldSvc := makeValidService()\n\t\tnewSvc := makeValidService()\n\t\ttc.tweakSvc(&oldSvc, &newSvc)\n\t\tctx := genericapirequest.NewDefaultContext()\n\t\terr := rest.BeforeUpdate(Strategy, ctx, runtime.Object(&oldSvc), runtime.Object(&newSvc))\n\t\tif tc.expectErr && err == nil {\n\t\t\tt.Errorf(\"unexpected non-error for %q\", tc.name)\n\t\t}\n\t\tif !tc.expectErr && err != nil {\n\t\t\tt.Errorf(\"unexpected error for %q: %v\", tc.name, err)\n\t\t}\n\t}\n}", "func (m *MockIDistributedEnforcer) Enforce(arg0 ...interface{}) (bool, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range arg0 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Enforce\", varargs...)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) InSyncReplicas(arg0 string, arg1 int32) ([]int32, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InSyncReplicas\", arg0, arg1)\n\tret0, _ := ret[0].([]int32)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestInterPodAffinityAdmission(t *testing.T) {\n\thandler := NewInterPodAntiAffinity(nil)\n\tpod := api.Pod{\n\t\tSpec: api.PodSpec{},\n\t}\n\ttests := []struct {\n\t\taffinity map[string]string\n\t\terrorExpected bool\n\t}{\n\t\t// empty affinity its success.\n\t\t{\n\t\t\taffinity: map[string]string{},\n\t\t\terrorExpected: false,\n\t\t},\n\t\t// what ever topologyKey in preferredDuringSchedulingIgnoredDuringExecution, the admission should success.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"preferredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"weight\": 5,\n\t\t\t\t\t\t\t\"podAffinityTerm\": {\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"namespaces\": [],\n\t\t\t\t\t\t\t\t\"topologyKey\": \"az\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: false,\n\t\t},\n\t\t// valid topologyKey in requiredDuringSchedulingIgnoredDuringExecution,\n\t\t// plus any topologyKey in preferredDuringSchedulingIgnoredDuringExecution, then admission success.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"preferredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"weight\": 5,\n\t\t\t\t\t\t\t\"podAffinityTerm\": {\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"namespaces\": [],\n\t\t\t\t\t\t\t\t\"topologyKey\": \"az\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}],\n\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\": [],\n\t\t\t\t\t\t\t\"topologyKey\": \"` + unversioned.LabelHostname + `\"\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: false,\n\t\t},\n\t\t// valid topologyKey in requiredDuringSchedulingIgnoredDuringExecution then admission success.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\":[],\n\t\t\t\t\t\t\t\"topologyKey\": \"` + unversioned.LabelHostname + `\"\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: false,\n\t\t},\n\t\t// invalid topologyKey in requiredDuringSchedulingIgnoredDuringExecution then admission fails.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\":[],\n\t\t\t\t\t\t\t\"topologyKey\": \" zone \"\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: true,\n\t\t},\n\t\t// invalid topologyKey in requiredDuringSchedulingRequiredDuringExecution then admission fails.\n\t\t// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.\n\t\t// {\n\t\t// affinity: map[string]string{\n\t\t//\t\t\tapi.AffinityAnnotationKey: `\n\t\t//\t\t\t\t{\"podAntiAffinity\": {\n\t\t//\t\t\t\t\t\"requiredDuringSchedulingRequiredDuringExecution\": [{\n\t\t//\t\t\t\t\t\t\"labelSelector\": {\n\t\t//\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t//\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t//\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t//\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t//\t\t\t\t\t\t\t}]\n\t\t//\t\t\t\t\t\t},\n\t\t//\t\t\t\t\t\t\"namespaces\":[],\n\t\t//\t\t\t\t\t\t\"topologyKey\": \" zone \"\n\t\t//\t\t\t\t\t}]\n\t\t//\t\t\t\t}}`,\n\t\t//\t\t\t},\n\t\t//\t\t\terrorExpected: true,\n\t\t// }\n\t\t// list of requiredDuringSchedulingIgnoredDuringExecution middle element topologyKey is not valid.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\":[],\n\t\t\t\t\t\t\t\"topologyKey\": \"` + unversioned.LabelHostname + `\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\":[],\n\t\t\t\t\t\t\t\"topologyKey\": \" zone \"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\": [],\n\t\t\t\t\t\t\t\"topologyKey\": \"` + unversioned.LabelHostname + `\"\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: true,\n\t\t},\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"thisIsAInvalidAffinity\": [{}\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\t// however, we should not get error here\n\t\t\terrorExpected: false,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tpod.ObjectMeta.Annotations = test.affinity\n\t\terr := handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind(\"Pod\").WithVersion(\"version\"), \"foo\", \"name\", api.Resource(\"pods\").WithVersion(\"version\"), \"\", \"ignored\", nil))\n\n\t\tif test.errorExpected && err == nil {\n\t\t\tt.Errorf(\"Expected error for Anti Affinity %+v but did not get an error\", test.affinity)\n\t\t}\n\n\t\tif !test.errorExpected && err != nil {\n\t\t\tt.Errorf(\"Unexpected error %v for AntiAffinity %+v\", err, test.affinity)\n\t\t}\n\t}\n}", "func TestResourcesCtrlUpdateByHostname(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\tparams := utils.NewFakeParamsGetter()\n\trender := utils.NewFakeRender()\n\tinter := &resourcesCtrlResourcesInter{}\n\tvalid := &resourcesCtrlResourcesValid{}\n\trecorder := httptest.NewRecorder()\n\tctrl := NewResourcesCtrl(inter, render, params, valid)\n\tresourceIn := &models.Resource{\n\t\tHostname: utils.StrCpy(\"foo.bar.com\"),\n\t}\n\tresourceOut := &models.Resource{}\n\n\tvalid.errValid = true\n\n\t// Validation error\n\tctrl.UpdateByHostname(recorder, utils.FakeRequest(\"PUT\", \"http://foo.bar/resources/1\", resourceIn))\n\tr.Equal(422, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.Validation, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\tvalid.errValid = false\n\n\t// No error, a resource is returned\n\tctrl.UpdateByHostname(recorder, utils.FakeRequest(\"PUT\", \"http://foo.bar/resources/1\", resourceIn))\n\tr.Equal(200, render.Status)\n\terr := json.NewDecoder(recorder.Body).Decode(resourceOut)\n\tr.NoError(err)\n\ta.NotNil(resourceOut)\n\ta.Nil(resourceOut.Hostname)\n\tutils.Clear(params, render, recorder)\n\n\t// Null body decoding error\n\tctrl.UpdateByHostname(recorder, utils.FakeRequestRaw(\"PUT\", \"http://foo.bar/resources/1\", nil))\n\tr.Equal(400, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.BodyDecoding, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\t// The interactor returns a database error\n\tinter.errDB = true\n\tctrl.UpdateByHostname(recorder, utils.FakeRequest(\"PUT\", \"http://foo.bar/resources/1\", resourceIn))\n\tr.Equal(500, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.Internal, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\t// Resource not found\n\tinter.errDB = false\n\tinter.errNotFound = true\n\tctrl.UpdateByHostname(recorder, utils.FakeRequest(\"PUT\", \"http://foo.bar/resources/1\", resourceIn))\n\tr.Equal(404, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.NotFound, render.APIError)\n\tutils.Clear(params, render, recorder)\n}", "func (m *MockqueueTask) Nack() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Nack\")\n}", "func (m *MockProvider) OnEndpointsAdd(arg0 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsAdd\", arg0)\n}", "func TestDiscoveryResourceGate(t *testing.T) {\n\tresources := map[string][]metav1.APIResource{\n\t\t\"allLegacy\": {\n\t\t\t{Name: \"clusterpolicies\", Kind: \"ClusterPolicies\"},\n\t\t\t{Name: \"clusterpolicybindings\", Kind: \"ClusterPolicyBindings\"},\n\t\t\t{Name: \"policies\", Kind: \"Policies\"},\n\t\t\t{Name: \"policybindings\", Kind: \"PolicyBindings\"},\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t},\n\t\t\"partialLegacy\": {\n\t\t\t{Name: \"clusterpolicies\", Kind: \"ClusterPolicies\"},\n\t\t\t{Name: \"clusterpolicybindings\", Kind: \"ClusterPolicyBindings\"},\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t},\n\t\t\"noLegacy\": {\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t\t{Name: \"bar\", Kind: \"Bar\"},\n\t\t},\n\t}\n\n\tlegacyTests := map[string]struct {\n\t\texistingResources *metav1.APIResourceList\n\t\texpectErrStr string\n\t}{\n\t\t\"scheme-legacy-all-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"allLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"\",\n\t\t},\n\t\t\"scheme-legacy-some-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"partialLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-legacy-none-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-all-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"allLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"\",\n\t\t},\n\t\t\"scheme-some-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"partialLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-none-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t}\n\n\tdiscoveryTests := map[string]struct {\n\t\texistingResources *metav1.APIResourceList\n\t\tinputGVR []schema.GroupVersionResource\n\t\texpectedGVR []schema.GroupVersionResource\n\t\texpectedAll bool\n\t}{\n\t\t\"discovery-subset\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"noexist\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"discovery-none\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"noexist\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{},\n\t\t},\n\t\t\"discovery-all\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedAll: true,\n\t\t},\n\t}\n\n\tfor tcName, tc := range discoveryTests {\n\t\tfunc() {\n\t\t\tserver := testServer(t, tc.existingResources)\n\t\t\tdefer server.Close()\n\t\t\tclient := discovery.NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL})\n\n\t\t\tgot, all, err := DiscoverGroupVersionResources(client, tc.inputGVR...)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"myerr %s\", err.Error())\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tc.expectedGVR) {\n\t\t\t\tt.Fatalf(\"%s got %v, expected %v\", tcName, got, tc.expectedGVR)\n\t\t\t}\n\t\t\tif tc.expectedAll && !all {\n\t\t\t\tt.Fatalf(\"%s expected all\", tcName)\n\t\t\t}\n\t\t}()\n\t}\n\n\tfor tcName, tc := range legacyTests {\n\t\tfunc() {\n\t\t\tserver := testServer(t, tc.existingResources)\n\t\t\tdefer server.Close()\n\t\t\tclient := discovery.NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL})\n\n\t\t\terr := LegacyPolicyResourceGate(client)\n\t\t\tif err != nil {\n\t\t\t\tif len(tc.expectErrStr) == 0 {\n\t\t\t\t\tt.Fatalf(\"%s unexpected err %s\\n\", tcName, err.Error())\n\t\t\t\t}\n\t\t\t\tif tc.expectErrStr != err.Error() {\n\t\t\t\t\tt.Fatalf(\"%s expected err %s, got %s\", tcName, tc.expectErrStr, err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == nil && len(tc.expectErrStr) != 0 {\n\t\t\t\tt.Fatalf(\"%s expected err %s, got none\\n\", tcName, tc.expectErrStr)\n\t\t\t}\n\t\t}()\n\t}\n}", "func TestIntermediateNameAmbiguous(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tth.WriteK(\"gcp\", `\nnamePrefix: gcp-\nresources:\n- ../emea\npatchesStrategicMerge:\n- depPatch.yaml\n`)\n\tth.WriteF(\"gcp/depPatch.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n replicas: 999\n`)\n\tth.WriteK(\"emea\", `\nnamePrefix: emea-\nresources:\n- ../prod\n- deployment.yaml\n`)\n\tth.WriteF(\"emea/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tth.WriteK(\"prod\", `\nnamePrefix: prod-\nresources:\n- ../base\n`)\n\tth.WriteK(\"base\", `\nresources:\n- deployment.yaml\n`)\n\tth.WriteF(\"base/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\terr := th.RunWithErr(\"gcp\", th.MakeDefaultOptions())\n\tassert.Error(t, err)\n}", "func Test_DeviceService_Get_NotFoundByIp(t *testing.T) {\n\th := TestHelper{}\n\trep := new(mocks.IDeviceRepository)\n\trepAuth := new(mocks.IDeviceAuthRepository)\n\ts := h.CreateTestDeviceService(rep, repAuth)\n\n\tip := \"127.0.0.1\"\n\trep.On(\"Get\", ip).Return(models.Device{}, errors.New(\"not found\"))\n\n\t_, err := s.Get(ip)\n\tassert.Error(t, err)\n}", "func Test_UniformRegistration_RegistrationOfKeptnIntegrationMultiplePods(t *testing.T) {\n\tdefer func(t *testing.T) {\n\t\tPrintLogsOfPods(t, []string{\"shipyard-controller\"})\n\t}(t)\n\n\t// make sure the echo-service uses the same distributor as Keptn core\n\timageName, err := GetImageOfDeploymentContainer(\"lighthouse-service\", \"lighthouse-service\")\n\trequire.Nil(t, err)\n\tdistributorImage := strings.Replace(imageName, \"lighthouse-service\", \"distributor\", 1)\n\n\techoServiceManifestContent := strings.ReplaceAll(echoServiceK8sManifest, \"${distributor-image}\", distributorImage)\n\techoServiceManifestContent = strings.ReplaceAll(echoServiceManifestContent, \"replicas: 1\", \"replicas: 3\")\n\techoServiceManifestContent = strings.ReplaceAll(echoServiceManifestContent, \"${queue-group}\", \"echo-service\")\n\techoServiceManifestContent = strings.ReplaceAll(echoServiceManifestContent, \"${api-endpoint}\", \"\")\n\techoServiceManifestContent = strings.ReplaceAll(echoServiceManifestContent, \"${api-token}\", \"\")\n\n\ttmpFile, err := CreateTmpFile(\"echo-service-*.yaml\", echoServiceManifestContent)\n\tdefer func() {\n\t\tif err := os.Remove(tmpFile); err != nil {\n\t\t\tt.Logf(\"Could not delete file: %v\", err)\n\t\t}\n\t}()\n\ttestUniformIntegration(t, func() {\n\t\t// install echo integration\n\t\t_, err = KubeCtlApplyFromURL(tmpFile)\n\t\trequire.Nil(t, err)\n\n\t\terr = waitForDeploymentToBeRolledOut(false, echoServiceName, GetKeptnNameSpaceFromEnv())\n\t\trequire.Nil(t, err)\n\n\t}, func() {\n\t\terr := KubeCtlDeleteFromURL(tmpFile)\n\t\trequire.Nil(t, err)\n\t}, true)\n}", "func TestApplyStatus(t *testing.T) {\n\tserver, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), []string{\"--disable-admission-plugins\", \"ServiceAccount,TaintNodesByCondition\"}, framework.SharedEtcd())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer server.TearDownFn()\n\n\tclient, err := kubernetes.NewForConfig(server.ClientConfig)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdynamicClient, err := dynamic.NewForConfig(server.ClientConfig)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// create CRDs so we can make sure that custom resources do not get lost\n\tetcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(server.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...)\n\tif _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}, metav1.CreateOptions{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcreateData := etcd.GetEtcdStorageData()\n\n\t// gather resources to test\n\t_, resourceLists, err := client.Discovery().ServerGroupsAndResources()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get ServerGroupsAndResources with error: %+v\", err)\n\t}\n\n\tfor _, resourceList := range resourceLists {\n\t\tfor _, resource := range resourceList.APIResources {\n\t\t\tif !strings.HasSuffix(resource.Name, \"/status\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmapping, err := createMapping(resourceList.GroupVersion, resource)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tt.Run(mapping.Resource.String(), func(t *testing.T) {\n\t\t\t\t// both spec and status get wiped for CSRs,\n\t\t\t\t// nothing is expected to be managed for it, skip it\n\t\t\t\tif mapping.Resource.Resource == \"certificatesigningrequests\" {\n\t\t\t\t\tt.Skip()\n\t\t\t\t}\n\n\t\t\t\tstatus, ok := statusData[mapping.Resource]\n\t\t\t\tif !ok {\n\t\t\t\t\tstatus = statusDefault\n\t\t\t\t}\n\t\t\t\tnewResource, ok := createData[mapping.Resource]\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Fatalf(\"no test data for %s. Please add a test for your new type to etcd.GetEtcdStorageData().\", mapping.Resource)\n\t\t\t\t}\n\t\t\t\tnewObj := unstructured.Unstructured{}\n\t\t\t\tif err := json.Unmarshal([]byte(newResource.Stub), &newObj.Object); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tnamespace := testNamespace\n\t\t\t\tif mapping.Scope == meta.RESTScopeRoot {\n\t\t\t\t\tnamespace = \"\"\n\t\t\t\t}\n\t\t\t\tname := newObj.GetName()\n\n\t\t\t\t// etcd test stub data doesn't contain apiVersion/kind (!), but apply requires it\n\t\t\t\tnewObj.SetGroupVersionKind(mapping.GroupVersionKind)\n\n\t\t\t\trsc := dynamicClient.Resource(mapping.Resource).Namespace(namespace)\n\t\t\t\t// apply to create\n\t\t\t\t_, err = rsc.Apply(context.TODO(), name, &newObj, metav1.ApplyOptions{FieldManager: \"create_test\"})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tstatusObj := unstructured.Unstructured{}\n\t\t\t\tif err := json.Unmarshal([]byte(status), &statusObj.Object); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tstatusObj.SetAPIVersion(mapping.GroupVersionKind.GroupVersion().String())\n\t\t\t\tstatusObj.SetKind(mapping.GroupVersionKind.Kind)\n\t\t\t\tstatusObj.SetName(name)\n\n\t\t\t\tobj, err := dynamicClient.\n\t\t\t\t\tResource(mapping.Resource).\n\t\t\t\t\tNamespace(namespace).\n\t\t\t\t\tApplyStatus(context.TODO(), name, &statusObj, metav1.ApplyOptions{FieldManager: \"apply_status_test\", Force: true})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to apply: %v\", err)\n\t\t\t\t}\n\n\t\t\t\taccessor, err := meta.Accessor(obj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to get meta accessor: %v:\\n%v\", err, obj)\n\t\t\t\t}\n\n\t\t\t\tmanagedFields := accessor.GetManagedFields()\n\t\t\t\tif managedFields == nil {\n\t\t\t\t\tt.Fatal(\"Empty managed fields\")\n\t\t\t\t}\n\t\t\t\tif !findManager(managedFields, \"apply_status_test\") {\n\t\t\t\t\tt.Fatalf(\"Couldn't find apply_status_test: %v\", managedFields)\n\t\t\t\t}\n\t\t\t\tif !findManager(managedFields, \"create_test\") {\n\t\t\t\t\tt.Fatalf(\"Couldn't find create_test: %v\", managedFields)\n\t\t\t\t}\n\n\t\t\t\tif err := rsc.Delete(context.TODO(), name, *metav1.NewDeleteOptions(0)); err != nil {\n\t\t\t\t\tt.Fatalf(\"deleting final object failed: %v\", err)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}", "func TestAssetTagServiceExistingTags(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusOK, response.StatusCode)\n}", "func isIngressEqual(a []v1.LoadBalancerIngress, b []v1.LoadBalancerIngress) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif _, found := isAddressInIngress(b, a[i].IP); !found {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := 0; i < len(b); i++ {\n\t\tif _, found := isAddressInIngress(a, b[i].IP); !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TestAssetTagServiceNoExistingTags(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(false, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusOK, response.StatusCode)\n}", "func TestV1WebhookCacheAndRetry(t *testing.T) {\n\tserv := new(mockV1Service)\n\ts, err := NewV1TestServer(serv, serverCert, serverKey, caCert)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Close()\n\n\t// Create an authenticator that caches successful responses \"forever\" (100 days).\n\twh, err := newV1TokenAuthenticator(s.URL, clientCert, clientKey, caCert, 2400*time.Hour, nil, noopAuthenticatorMetrics())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestcases := []struct {\n\t\tdescription string\n\n\t\ttoken string\n\t\tallow bool\n\t\tcode int\n\n\t\texpectError bool\n\t\texpectOk bool\n\t\texpectCalls int\n\t}{\n\t\t{\n\t\t\tdescription: \"t0k3n, 500 error, retries and fails\",\n\n\t\t\ttoken: \"t0k3n\",\n\t\t\tallow: false,\n\t\t\tcode: 500,\n\n\t\t\texpectError: true,\n\t\t\texpectOk: false,\n\t\t\texpectCalls: 5,\n\t\t},\n\t\t{\n\t\t\tdescription: \"t0k3n, 404 error, fails (but no retry)\",\n\n\t\t\ttoken: \"t0k3n\",\n\t\t\tallow: false,\n\t\t\tcode: 404,\n\n\t\t\texpectError: true,\n\t\t\texpectOk: false,\n\t\t\texpectCalls: 1,\n\t\t},\n\t\t{\n\t\t\tdescription: \"t0k3n, 200 response, allowed, succeeds with a single call\",\n\n\t\t\ttoken: \"t0k3n\",\n\t\t\tallow: true,\n\t\t\tcode: 200,\n\n\t\t\texpectError: false,\n\t\t\texpectOk: true,\n\t\t\texpectCalls: 1,\n\t\t},\n\t\t{\n\t\t\tdescription: \"t0k3n, 500 response, disallowed, but never called because previous 200 response was cached\",\n\n\t\t\ttoken: \"t0k3n\",\n\t\t\tallow: false,\n\t\t\tcode: 500,\n\n\t\t\texpectError: false,\n\t\t\texpectOk: true,\n\t\t\texpectCalls: 0,\n\t\t},\n\n\t\t{\n\t\t\tdescription: \"an0th3r_t0k3n, 500 response, disallowed, should be called again with retries\",\n\n\t\t\ttoken: \"an0th3r_t0k3n\",\n\t\t\tallow: false,\n\t\t\tcode: 500,\n\n\t\t\texpectError: true,\n\t\t\texpectOk: false,\n\t\t\texpectCalls: 5,\n\t\t},\n\t\t{\n\t\t\tdescription: \"an0th3r_t0k3n, 429 response, disallowed, should be called again with retries\",\n\n\t\t\ttoken: \"an0th3r_t0k3n\",\n\t\t\tallow: false,\n\t\t\tcode: 429,\n\n\t\t\texpectError: true,\n\t\t\texpectOk: false,\n\t\t\texpectCalls: 5,\n\t\t},\n\t\t{\n\t\t\tdescription: \"an0th3r_t0k3n, 200 response, allowed, succeeds with a single call\",\n\n\t\t\ttoken: \"an0th3r_t0k3n\",\n\t\t\tallow: true,\n\t\t\tcode: 200,\n\n\t\t\texpectError: false,\n\t\t\texpectOk: true,\n\t\t\texpectCalls: 1,\n\t\t},\n\t\t{\n\t\t\tdescription: \"an0th3r_t0k3n, 500 response, disallowed, but never called because previous 200 response was cached\",\n\n\t\t\ttoken: \"an0th3r_t0k3n\",\n\t\t\tallow: false,\n\t\t\tcode: 500,\n\n\t\t\texpectError: false,\n\t\t\texpectOk: true,\n\t\t\texpectCalls: 0,\n\t\t},\n\t}\n\n\tfor _, testcase := range testcases {\n\t\tt.Run(testcase.description, func(t *testing.T) {\n\t\t\tserv.allow = testcase.allow\n\t\t\tserv.statusCode = testcase.code\n\t\t\tserv.called = 0\n\n\t\t\t_, ok, err := wh.AuthenticateToken(context.Background(), testcase.token)\n\t\t\thasError := err != nil\n\t\t\tif hasError != testcase.expectError {\n\t\t\t\tt.Errorf(\"Webhook returned HTTP %d, expected error=%v, but got error %v\", testcase.code, testcase.expectError, err)\n\t\t\t}\n\t\t\tif serv.called != testcase.expectCalls {\n\t\t\t\tt.Errorf(\"Expected %d calls, got %d\", testcase.expectCalls, serv.called)\n\t\t\t}\n\t\t\tif ok != testcase.expectOk {\n\t\t\t\tt.Errorf(\"Expected ok=%v, got %v\", testcase.expectOk, ok)\n\t\t\t}\n\t\t})\n\t}\n}", "func testEgressToServerInCIDRBlockWithException(t *testing.T, data *TestData) {\n\tworkerNode := workerNodeName(1)\n\tserverAName, serverAIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, \"test-server-\", workerNode, testNamespace, false)\n\tdefer cleanupFunc()\n\n\tclientA, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, \"test-client-\", workerNode, testNamespace, false)\n\tdefer cleanupFunc()\n\tvar serverAAllowCIDR string\n\tvar serverAExceptList []string\n\tvar serverAIP string\n\tif serverAIPs.ipv6 == nil {\n\t\tt.Fatal(\"server IPv6 address is empty\")\n\t}\n\t_, serverAAllowSubnet, err := net.ParseCIDR(fmt.Sprintf(\"%s/%d\", serverAIPs.ipv6.String(), 64))\n\tif err != nil {\n\t\tt.Fatalf(\"could not parse allow subnet\")\n\t}\n\tserverAAllowCIDR = serverAAllowSubnet.String()\n\tserverAExceptList = []string{fmt.Sprintf(\"%s/%d\", serverAIPs.ipv6.String(), 128)}\n\tserverAIP = serverAIPs.ipv6.String()\n\n\tif err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err != nil {\n\t\tt.Fatalf(\"%s should be able to netcat %s\", clientA, serverAName)\n\t}\n\n\tnp, err := data.createNetworkPolicy(\"deny-client-a-via-except-cidr-egress-rule\", &networkingv1.NetworkPolicySpec{\n\t\tPodSelector: metav1.LabelSelector{\n\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\"antrea-e2e\": clientA,\n\t\t\t},\n\t\t},\n\t\tPolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},\n\t\tEgress: []networkingv1.NetworkPolicyEgressRule{\n\t\t\t{\n\t\t\t\tTo: []networkingv1.NetworkPolicyPeer{\n\t\t\t\t\t{\n\t\t\t\t\t\tIPBlock: &networkingv1.IPBlock{\n\t\t\t\t\t\t\tCIDR: serverAAllowCIDR,\n\t\t\t\t\t\t\tExcept: serverAExceptList,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Error when creating network policy: %v\", err)\n\t}\n\tcleanupNP := func() {\n\t\tif err = data.deleteNetworkpolicy(np); err != nil {\n\t\t\tt.Errorf(\"Error when deleting network policy: %v\", err)\n\t\t}\n\t}\n\tdefer cleanupNP()\n\n\tif err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err == nil {\n\t\tt.Fatalf(\"%s should not be able to netcat %s\", clientA, serverAName)\n\t}\n}", "func updateIngressConfigSpecWithRetryOnConflict(t *testing.T, name types.NamespacedName, timeout time.Duration, mutateSpecFn func(*configv1.IngressSpec)) error {\n\tingressConfig := configv1.Ingress{}\n\treturn wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), name, &ingressConfig); err != nil {\n\t\t\tt.Logf(\"error getting ingress config %v: %v, retrying...\", name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tmutateSpecFn(&ingressConfig.Spec)\n\t\tif err := kclient.Update(context.TODO(), &ingressConfig); err != nil {\n\t\t\tif errors.IsConflict(err) {\n\t\t\t\tt.Logf(\"conflict when updating ingress config %v: %v, retrying...\", name, err)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, nil\n\t})\n}", "func (m *MockProvider) OnEndpointsSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsSynced\")\n}", "func TestOfnetHostBridge(t *testing.T) {\n\n\tfor i := 0; i < NUM_HOST_BRIDGE; i++ {\n\t\tmacAddr, _ := net.ParseMAC(\"02:02:01:AB:CD:EF\")\n\t\tipAddr := net.ParseIP(\"20.20.33.33\")\n\t\tendpoint := EndpointInfo{\n\t\t\tPortNo: uint32(NUM_AGENT + 3),\n\t\t\tMacAddr: macAddr,\n\t\t\tVlan: 1,\n\t\t\tIpAddr: ipAddr,\n\t\t}\n\n\t\tlog.Infof(\"Installing local host bridge endpoint: %+v\", endpoint)\n\n\t\t// Install the local endpoint\n\t\terr := hostBridges[i].AddHostPort(endpoint)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error installing endpoint: %+v. Err: %v\", endpoint, err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"Finished adding local host bridge endpoint\")\n\n\t\t// verify all ovs switches have this route\n\t\tbrName := \"hostBridge\" + fmt.Sprintf(\"%d\", i)\n\t\tflowList, err := ofctlFlowDump(brName)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error getting flow entries. Err: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// verify flow entry exists\n\t\tgwInFlowMatch := fmt.Sprintf(\"priority=100,in_port=%d\", NUM_AGENT+3)\n\t\tif !ofctlFlowMatch(flowList, MAC_DEST_TBL_ID, gwInFlowMatch) {\n\t\t\tt.Errorf(\"Could not find the flow %s on ovs %s\", gwInFlowMatch, brName)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"Found gwInFlowMatch %s on ovs %s\", gwInFlowMatch, brName)\n\t\t// verify flow entry exists\n\t\tgwOutFlowMatch := fmt.Sprintf(\"priority=100,dl_dst=%s\", macAddr)\n\t\tif !ofctlFlowMatch(flowList, MAC_DEST_TBL_ID, gwOutFlowMatch) {\n\t\t\tt.Errorf(\"Could not find the flow %s on ovs %s\", gwOutFlowMatch, brName)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"Found gwOutFlowMatch %s on ovs %s\", gwOutFlowMatch, brName)\n\n\t\t// verify flow entry exists\n\t\tgwARPFlowMatch := fmt.Sprintf(\"priority=100,arp\")\n\t\tif !ofctlFlowMatch(flowList, MAC_DEST_TBL_ID, gwARPFlowMatch) {\n\t\t\tt.Errorf(\"Could not find the flow %s on ovs %s\", gwARPFlowMatch, brName)\n\t\t\tt.Errorf(\"##FlowList: %v\", flowList)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"Found gwARPFlowMatch %s on ovs %s\", gwARPFlowMatch, brName)\n\t\tlog.Infof(\"##FlowList: %v\", flowList)\n\n\t\t// Remove the gw endpoint\n\t\terr = hostBridges[i].DelHostPort(uint32(NUM_AGENT + 3))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error deleting endpoint: %+v. Err: %v\", endpoint, err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"Deleted endpoints. Verifying they are gone\")\n\n\t\t// verify flows are deleted\n\t\tflowList, err = ofctlFlowDump(brName)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error getting flow entries. Err: %v\", err)\n\t\t}\n\n\t\tif ofctlFlowMatch(flowList, MAC_DEST_TBL_ID, gwInFlowMatch) {\n\t\t\tt.Errorf(\"The flow %s not deleted from ovs %s\", gwInFlowMatch, brName)\n\t\t\treturn\n\t\t}\n\n\t\tif ofctlFlowMatch(flowList, MAC_DEST_TBL_ID, gwOutFlowMatch) {\n\t\t\tt.Errorf(\"The flow %s not deleted from ovs %s\", gwOutFlowMatch, brName)\n\t\t\treturn\n\t\t}\n\n\t\tif ofctlFlowMatch(flowList, MAC_DEST_TBL_ID, gwARPFlowMatch) {\n\t\t\tt.Errorf(\"The flow %s not deleted from ovs %s\", gwARPFlowMatch, brName)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"Verified all flows are deleted\")\n\t}\n}", "func TestImplementationsExist(srcPrefix, dstPrefix string) error {\n\tswitch srcPrefix {\n\tcase \"k8s\":\n\tcase \"s3\":\n\tcase \"abs\":\n\tcase \"gcs\":\n\tdefault:\n\t\treturn fmt.Errorf(srcPrefix + \" not implemented\")\n\t}\n\n\tswitch dstPrefix {\n\tcase \"k8s\":\n\tcase \"s3\":\n\tcase \"abs\":\n\tcase \"gcs\":\n\tdefault:\n\t\treturn fmt.Errorf(dstPrefix + \" not implemented\")\n\t}\n\n\treturn nil\n}", "func TestGetStatusByIPAddressAtTimestamp1(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\")\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}" ]
[ "0.66160387", "0.6372045", "0.60668015", "0.60601646", "0.6034052", "0.59953487", "0.59017277", "0.58672535", "0.5726815", "0.5719427", "0.5681671", "0.5648987", "0.56416595", "0.5630958", "0.5605578", "0.55587876", "0.5509858", "0.54953486", "0.54921466", "0.5445247", "0.5445039", "0.5441898", "0.5436041", "0.54353446", "0.54272175", "0.54260254", "0.5412361", "0.54116476", "0.5407124", "0.53979146", "0.5393933", "0.5389572", "0.53872514", "0.5367924", "0.5363139", "0.53493756", "0.5335455", "0.53283954", "0.53265923", "0.5326085", "0.53154236", "0.53022856", "0.5296352", "0.5278955", "0.52784586", "0.5275306", "0.52710253", "0.5256815", "0.5255653", "0.5254917", "0.5226375", "0.52263504", "0.521921", "0.52153903", "0.52099246", "0.5202246", "0.5186393", "0.51863086", "0.5179068", "0.5170847", "0.5168878", "0.5166943", "0.5164337", "0.5164216", "0.5164049", "0.5160906", "0.515752", "0.5146962", "0.5142266", "0.51187885", "0.51138663", "0.51132", "0.5111714", "0.5109299", "0.5094818", "0.5093605", "0.50910485", "0.5083211", "0.50825226", "0.50762343", "0.50716275", "0.5070771", "0.5067465", "0.5066868", "0.50650823", "0.506221", "0.506142", "0.5056996", "0.5055348", "0.5054484", "0.5046686", "0.50451857", "0.503911", "0.5039028", "0.5034868", "0.5031998", "0.50248736", "0.5023645", "0.5020298", "0.50202686" ]
0.7617899
0
CheckNoConflictsInIngress indicates an expected call of CheckNoConflictsInIngress
func (mr *MockValidaterMockRecorder) CheckNoConflictsInIngress(ingress interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckNoConflictsInIngress", reflect.TypeOf((*MockValidater)(nil).CheckNoConflictsInIngress), ingress) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockValidater) CheckNoConflictsInIngress(ingress *v1.Ingress) (bool, string) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckNoConflictsInIngress\", ingress)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(string)\n\treturn ret0, ret1\n}", "func TestIngressNoUpdate(t *testing.T) {\n\tingrNoUpdate := &networkingv1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"red-ns\",\n\t\t\tName: \"testingr-noupdate\",\n\t\t},\n\t\tSpec: networkingv1.IngressSpec{\n\t\t\tDefaultBackend: &networkingv1.IngressBackend{\n\t\t\t\tService: &networkingv1.IngressServiceBackend{\n\t\t\t\t\tName: \"testsvc\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err := kubeClient.NetworkingV1().Ingresses(\"red-ns\").Create(context.TODO(), ingrNoUpdate, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in adding Ingress: %v\", err)\n\t}\n\twaitAndverify(t, \"Ingress/red-ns/testingr-noupdate\")\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"2\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP: \"2.3.4.5\",\n\t\t\t\t\tHostname: \"testingr2.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"3\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\twaitAndverify(t, \"\")\n}", "func hasChanges(old *networking.Ingress, current *networking.Ingress) bool {\n\told.Status.LoadBalancer.Ingress = current.Status.LoadBalancer.Ingress\n\told.ResourceVersion = current.ResourceVersion\n\treturn !reflect.DeepEqual(old, current)\n}", "func (rs *RouteStatus) MarkIngressNotConfigured() {\n\trouteCondSet.Manage(rs).MarkUnknown(RouteConditionIngressReady,\n\t\t\"IngressNotConfigured\", \"Ingress has not yet been reconciled.\")\n}", "func isCatchAllIngress(spec networking.IngressSpec) bool {\n\treturn spec.DefaultBackend != nil && len(spec.Rules) == 0\n}", "func testIngressOverlapCIDRBlocks() []*TestStep {\n\texceptBuilder1 := make([]string, 1)\n\texceptBuilder3 := make([]string, 1)\n\n\t// It does not matter if podCIDR of pod \"x/a\" and \"y/a\" are same\n\t// Build simple allowCIDR block for testing purposes.\n\tallowCIDRStr := podIPs[\"y/a\"] + \"/16\"\n\t_, allowCIDR, err := net.ParseCIDR(allowCIDRStr)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to parse CIDR string %s\", allowCIDRStr)\n\t}\n\tallowCIDRStr = allowCIDR.String()\n\n\texceptBuilder1[0] = podIPs[\"y/a\"] + \"/24\"\n\tvar exceptCIDR *net.IPNet\n\t_, exceptCIDR, err = net.ParseCIDR(exceptBuilder1[0])\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to parse CIDR string %s\", exceptBuilder1[0])\n\t}\n\texceptBuilder1[0] = exceptCIDR.String()\n\n\t// Build exceptCIDR to block just the pod \"y/a\"\n\texceptBuilder3[0] = podIPs[\"y/a\"] + \"/32\"\n\n\tallowWithExcept := func() *Reachability {\n\t\tr := NewReachability(allPods, true)\n\t\tr.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\tr.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\t// Adding connectivity for all the pods in allowCIDR - exceptCIDR\n\t\tfor eachPod := range podIPs {\n\t\t\tif allowCIDR.Contains(net.ParseIP(podIPs[eachPod])) && !exceptCIDR.Contains(net.ParseIP(podIPs[eachPod])) {\n\t\t\t\tr.Expect(Pod(eachPod), Pod(\"x/a\"), true)\n\t\t\t}\n\t\t}\n\t\treturn r\n\t}\n\toverlapAllow := func() *Reachability {\n\t\tr := NewReachability(allPods, true)\n\t\tr.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\tr.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\t// Adding connectivity for all the pods in the created allowCIDR\n\t\tfor eachPod := range podIPs {\n\t\t\tif allowCIDR.Contains(net.ParseIP(podIPs[eachPod])) {\n\t\t\t\tr.Expect(Pod(eachPod), Pod(\"x/a\"), true)\n\t\t\t}\n\t\t}\n\t\treturn r\n\t}\n\toverlapAllowAndExcept := func() *Reachability {\n\t\tr := NewReachability(allPods, true)\n\t\tr.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\tr.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\t// Adding connectivity for all pods in created allowCIDR except pod \"y/a\"\n\t\tfor eachPod := range podIPs {\n\t\t\tif allowCIDR.Contains(net.ParseIP(podIPs[eachPod])) {\n\t\t\t\tr.Expect(Pod(eachPod), Pod(\"x/a\"), true)\n\t\t\t}\n\t\t}\n\t\t//Override the connectivity to pod \"y/a\"\n\t\tr.Expect(Pod(\"y/a\"), Pod(\"x/a\"), false)\n\t\treturn r\n\t}\n\n\t// Policy-1 is added with allow and except CIDR\n\tpolicyName := \"policy-that-has-except-block\"\n\tbuilder1 := &NetworkPolicySpecBuilder{}\n\tbuilder1 = builder1.SetName(\"x\", policyName).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder1.SetTypeIngress()\n\tbuilder1.AddIngress(v1.ProtocolTCP, &p80, nil, &allowCIDRStr, exceptBuilder1, nil, nil, nil, nil)\n\tpolicy1 := builder1.Get()\n\t// Policy-2 is added with allow CIDR that is same as except CIDR of policy-1 (overlap policy)\n\tpolicyName2 := \"overlap-policy\"\n\tbuilder2 := &NetworkPolicySpecBuilder{}\n\tbuilder2 = builder2.SetName(\"x\", policyName2).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder2.SetTypeIngress()\n\tbuilder2.AddIngress(v1.ProtocolTCP, &p80, nil, &exceptBuilder1[0], nil, nil, nil, nil, nil)\n\tpolicy2 := builder2.Get()\n\t// Update policy-2 with exceptCIDR not allowing only pod \"y/a\"\n\tbuilder3 := &NetworkPolicySpecBuilder{}\n\t// by preserving the same name, this policy will also serve to test the 'updated policy with CIDRs\".\n\tbuilder3 = builder3.SetName(\"x\", policyName2).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder3.SetTypeIngress()\n\tbuilder3.AddIngress(v1.ProtocolTCP, &p80, nil, &exceptBuilder1[0], exceptBuilder3, nil, nil, nil, nil)\n\tpolicy3 := builder3.Get()\n\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Pods in built exceptCIDR -- not allowed\",\n\t\t\tallowWithExcept(), // exceptCIDR built from pod \"y/a\"\n\t\t\tpolicy1,\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"All pods in built allowCIDR -- allowed\",\n\t\t\toverlapAllow(), // allowCIDR is same as exceptCIDR from policy-1\n\t\t\tpolicy2,\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Only pod y/a -- not allowed\",\n\t\t\toverlapAllowAndExcept(), // exceptCIDR contains only IP of pod \"y/a\"\n\t\t\tpolicy3,\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t}\n}", "func (s *k8sStore) checkIngress(ing *networkingv1.Ingress) bool {\n\ti, err := l4.NewParser(s).Parse(ing)\n\tif err != nil {\n\t\tlogrus.Warningf(\"Uxpected error with ingress: %v\", err)\n\t\treturn false\n\t}\n\n\tcfg := i.(*l4.Config)\n\tif cfg.L4Enable {\n\t\t_, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:%d\", cfg.L4Host, cfg.L4Port))\n\t\tif err == nil {\n\t\t\tlogrus.Warningf(\"%s, in Ingress(%v), is already in use.\",\n\t\t\t\tfmt.Sprintf(\"%s:%d\", cfg.L4Host, cfg.L4Port), ing)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\treturn true\n}", "func verifyExternalIngressController(t *testing.T, name types.NamespacedName, hostname, address string) {\n\tt.Helper()\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\t// If we have a DNS as an external IP address, make sure we can resolve it before moving on.\n\t// This just limits the number of \"could not resolve host\" errors which can be confusing.\n\tif net.ParseIP(address) == nil {\n\t\tif err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {\n\t\t\t_, err := net.LookupIP(address)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"waiting for loadbalancer domain %s to resolve...\", address)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}); err != nil {\n\t\t\tt.Fatalf(\"loadbalancer domain %s was unable to resolve:\", address)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"http://%s\", address), nil)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build client request: %v\", err)\n\t}\n\t// we use HOST header to map to the domain associated on the ingresscontroller.\n\t// This ensures our http call is routed to the correct router.\n\treq.Host = hostname\n\n\thttpClient := http.Client{Timeout: 5 * time.Second}\n\terr = waitForHTTPClientCondition(t, &httpClient, req, 10*time.Second, 10*time.Minute, func(r *http.Response) bool {\n\t\tif r.StatusCode == http.StatusOK {\n\t\t\tt.Logf(\"verified connectivity with workload with req %v and response %v\", req.URL, r.StatusCode)\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with reqURL %s using external client: %v\", req.URL, err)\n\t}\n}", "func (r *NuxeoReconciler) reconcileIngress(access v1alpha1.NuxeoAccess, forcePassthrough bool, nodeSet v1alpha1.NodeSet,\n\tinstance *v1alpha1.Nuxeo) error {\n\tingressName := ingressName(instance, nodeSet)\n\tif access != (v1alpha1.NuxeoAccess{}) {\n\t\tif expected, err := r.defaultIngress(instance, access, forcePassthrough, ingressName, nodeSet); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t_, err = r.addOrUpdate(ingressName, instance.Namespace, expected, &v1beta1.Ingress{}, util.IngressComparer)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\treturn r.removeIfPresent(instance, ingressName, instance.Namespace, &v1beta1.Ingress{})\n\t}\n}", "func TestMakeIngressRuleZeroPercentTarget(t *testing.T) {\n\ttargets := []traffic.RevisionTarget{{\n\t\tTrafficTarget: v1.TrafficTarget{\n\t\t\tConfigurationName: \"config\",\n\t\t\tRevisionName: \"revision-dolphin\",\n\t\t\tPercent: ptr.Int64(100),\n\t\t},\n\t}, {\n\t\tTrafficTarget: v1.TrafficTarget{\n\t\t\tConfigurationName: \"new-config\",\n\t\t\tRevisionName: \"new-revision-orca\",\n\t\t\tPercent: ptr.Int64(0),\n\t\t},\n\t}}\n\tdomains := sets.NewString(\"test.org\")\n\ttc := &traffic.Config{\n\t\tTargets: map[string]traffic.RevisionTargets{\n\t\t\ttraffic.DefaultTarget: targets,\n\t\t},\n\t}\n\tro := tc.BuildRollout()\n\trule := makeIngressRule(domains, ns,\n\t\tnetv1alpha1.IngressVisibilityExternalIP, targets, ro.RolloutsByTag(traffic.DefaultTarget), false /* internal encryption */)\n\texpected := netv1alpha1.IngressRule{\n\t\tHosts: []string{\"test.org\"},\n\t\tHTTP: &netv1alpha1.HTTPIngressRuleValue{\n\t\t\tPaths: []netv1alpha1.HTTPIngressPath{{\n\t\t\t\tSplits: []netv1alpha1.IngressBackendSplit{{\n\t\t\t\t\tIngressBackend: netv1alpha1.IngressBackend{\n\t\t\t\t\t\tServiceNamespace: ns,\n\t\t\t\t\t\tServiceName: \"revision-dolphin\",\n\t\t\t\t\t\tServicePort: intstr.FromInt(80),\n\t\t\t\t\t},\n\t\t\t\t\tPercent: 100,\n\t\t\t\t\tAppendHeaders: map[string]string{\n\t\t\t\t\t\t\"Knative-Serving-Revision\": \"revision-dolphin\",\n\t\t\t\t\t\t\"Knative-Serving-Namespace\": ns,\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t\tVisibility: netv1alpha1.IngressVisibilityExternalIP,\n\t}\n\n\tif !cmp.Equal(expected, rule) {\n\t\tt.Error(\"Unexpected rule (-want, +got):\", cmp.Diff(expected, rule))\n\t}\n}", "func verifyInternalIngressController(t *testing.T, name types.NamespacedName, hostname, address, image string) {\n\tkubeConfig, err := config.GetConfig()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get kube config: %v\", err)\n\t}\n\tclient, err := kubernetes.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create kube client: %v\", err)\n\t}\n\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\textraArgs := []string{\n\t\t\"--header\", \"HOST:\" + echoRoute.Spec.Host,\n\t\t\"-v\",\n\t\t\"--retry-delay\", \"20\",\n\t\t\"--max-time\", \"10\",\n\t}\n\tclientPodName := types.NamespacedName{Namespace: name.Namespace, Name: \"curl-\" + name.Name}\n\tclientPodSpec := buildCurlPod(clientPodName.Name, clientPodName.Namespace, image, address, echoRoute.Spec.Host, extraArgs...)\n\tclientPod := clientPodSpec.DeepCopy()\n\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t}\n\t}()\n\n\tvar curlPodLogs string\n\terr = wait.PollImmediate(10*time.Second, 10*time.Minute, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), clientPodName, clientPod); err != nil {\n\t\t\tt.Logf(\"error getting client pod %q: %v, retrying...\", clientPodName, err)\n\t\t\treturn false, nil\n\t\t}\n\t\t// First check if client curl pod is still starting or not running.\n\t\tif clientPod.Status.Phase == corev1.PodPending {\n\t\t\tt.Logf(\"waiting for client pod %q to start\", clientPodName)\n\t\t\treturn false, nil\n\t\t}\n\t\treadCloser, err := client.CoreV1().Pods(clientPod.Namespace).GetLogs(clientPod.Name, &corev1.PodLogOptions{\n\t\t\tContainer: \"curl\",\n\t\t\tFollow: false,\n\t\t}).Stream(context.TODO())\n\t\tif err != nil {\n\t\t\tt.Logf(\"failed to read output from pod %s: %v\", clientPod.Name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tscanner := bufio.NewScanner(readCloser)\n\t\tdefer func() {\n\t\t\tif err := readCloser.Close(); err != nil {\n\t\t\t\tt.Errorf(\"failed to close reader for pod %s: %v\", clientPod.Name, err)\n\t\t\t}\n\t\t}()\n\t\tcurlPodLogs = \"\"\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tcurlPodLogs += line + \"\\n\"\n\t\t\tif strings.Contains(line, \"HTTP/1.0 200 OK\") {\n\t\t\t\tt.Logf(\"verified connectivity with workload with address: %s with response %s\", address, line)\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\t// If failed or succeeded, the pod is stopped, but didn't provide us 200 response, let's try again.\n\t\tif clientPod.Status.Phase == corev1.PodFailed || clientPod.Status.Phase == corev1.PodSucceeded {\n\t\t\tt.Logf(\"client pod %q has stopped...restarting. Curl Pod Logs:\\n%s\", clientPodName, curlPodLogs)\n\t\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil && errors.IsNotFound(err) {\n\t\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\t// Wait for deletion to prevent a race condition. Use PollInfinite since we are already in a Poll.\n\t\t\twait.PollInfinite(5*time.Second, func() (bool, error) {\n\t\t\t\terr = kclient.Get(context.TODO(), clientPodName, clientPod)\n\t\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\t\tt.Logf(\"waiting for %q: to be deleted\", clientPodName)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t})\n\t\t\tclientPod = clientPodSpec.DeepCopy()\n\t\t\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\t\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with address: %s using internal curl client. Curl Pod Logs:\\n%s\", address, curlPodLogs)\n\t}\n}", "func (s *StatusSyncer) shouldTargetIngress(ingress *knetworking.Ingress) bool {\n\tvar ingressClass *knetworking.IngressClass\n\tif ingress.Spec.IngressClassName != nil {\n\t\tingressClass = s.ingressClasses.Get(*ingress.Spec.IngressClassName, \"\")\n\t}\n\treturn shouldProcessIngressWithClass(s.meshConfig.Mesh(), ingress, ingressClass)\n}", "func (s *k8sStore) ingressIsValid(ing *networkingv1.Ingress) bool {\n\tvar endpointKey string\n\tif ing.Spec.DefaultBackend != nil { // stream\n\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, ing.Spec.DefaultBackend.Service.Name)\n\t} else { // http\n\tLoop:\n\t\tfor _, rule := range ing.Spec.Rules {\n\t\t\tfor _, path := range rule.IngressRuleValue.HTTP.Paths {\n\t\t\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, path.Backend.Service.Name)\n\t\t\t\tif endpointKey != \"\" {\n\t\t\t\t\tbreak Loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\titem, exists, err := s.listers.Endpoint.GetByKey(endpointKey)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can not get endpoint by key(%s): %v\", endpointKey, err)\n\t\treturn false\n\t}\n\tif !exists {\n\t\tlogrus.Debugf(\"Endpoint %s does not exist.\", endpointKey)\n\t\treturn false\n\t}\n\tendpoint, ok := item.(*corev1.Endpoints)\n\tif !ok {\n\t\tlogrus.Errorf(\"Cant not convert %v to %v\", reflect.TypeOf(item), reflect.TypeOf(endpoint))\n\t\treturn false\n\t}\n\tif len(endpoint.Subsets) == 0 {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\tif !hasReadyAddresses(endpoint) {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func TestRequestIPs(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.20\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.10.20\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.20'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tfirst := false\n\t\tsecond := false\n\t\tthird := false\n\n\t\tfor _, ingress := range svc.Status.LoadBalancer.Ingress {\n\t\t\tswitch ingress.IP {\n\t\t\tcase \"10.0.10.21\":\n\t\t\t\tfirst = true\n\t\t\tcase \"10.0.10.22\":\n\t\t\t\tsecond = true\n\t\t\tcase \"10.0.10.23\":\n\t\t\t\tthird = true\n\t\t\tdefault:\n\t\t\t\tt.Error(\"Unexpected ingress IP\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif !first {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.21'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !second {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.22'\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !third {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.10.23'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tciliumSvcLBIPSAnnotation: \"10.0.10.22,10.0.10.23\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-c\" {\n\t\t\tt.Error(\"Expected status update for service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to have one conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionFalse {\n\t\t\tt.Error(\"Expected condition to be request-valid:false\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Reason != \"already_allocated\" {\n\t\t\tt.Error(\"Expected condition reason to be 'already_allocated'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// request an already allocated IP\n\tserviceC := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-c\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceCUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.21\",\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceC, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func TestRemoveRequestedIP(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tsvc1 := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124,10.0.10.125\",\n\t\t\t},\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t},\n\t}\n\n\tfixture.coreCS.Tracker().Add(\n\t\tsvc1,\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 3 {\n\t\t\tt.Error(\"Expected service to receive exactly three ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 2 {\n\t\t\tt.Error(\"Expected service to receive exactly two ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tsvc1 = svc1.DeepCopy()\n\tsvc1.Annotations = map[string]string{\n\t\t\"io.cilium/lb-ipam-ips\": \"10.0.10.124\",\n\t}\n\n\t_, err := fixture.svcClient.Services(svc1.Namespace).Update(context.Background(), svc1, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.123' to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.124' to be allocated\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.125\")) {\n\t\tt.Fatal(\"Expected IP '10.0.10.125' to be released\")\n\t}\n}", "func (d *Driver) existingNetChecks() {\n\t// Request all networks on the endpoint without any filters\n}", "func TestConflictResolution(t *testing.T) {\n\tpoolB := mkPool(poolBUID, \"pool-b\", []string{\"10.0.10.0/24\", \"FF::0/48\"})\n\tpoolB.CreationTimestamp = meta_v1.Date(2022, 10, 16, 13, 30, 00, 0, time.UTC)\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tpoolB,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif !isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool B has not been marked conflicting\")\n\t}\n\n\t// All ranges of a conflicting pool must be disabled\n\tpoolBRanges, _ := fixture.lbIPAM.rangesStore.GetRangesForPool(\"pool-b\")\n\tfor _, r := range poolBRanges {\n\t\tif !r.internallyDisabled {\n\t\t\tt.Fatalf(\"Range '%s' from pool B hasn't been disabled\", ipNetStr(r.allocRange.CIDR()))\n\t\t}\n\t}\n\n\t// Phase 2, resolving the conflict\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolB, err := fixture.poolClient.Get(context.Background(), \"pool-b\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(poolB)\n\t}\n\n\t// Remove the conflicting range\n\tpoolB.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: cilium_api_v2alpha1.IPv4orIPv6CIDR(\"FF::0/48\"),\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolB, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool b has not de-conflicted\")\n\t}\n}", "func TestAdmissionUpdateRequestsNotApplicable(t *testing.T) {\n\tadmission := clusterResourceOverrideAdmission{}\n\treq := &admissionv1.AdmissionRequest{\n\t\tOperation: \"UPDATE\",\n\t\tResource: metav1.GroupVersionResource{Resource: string(corev1.ResourcePods)},\n\t\tSubResource: \"\",\n\t}\n\tapplicable := admission.IsApplicable(req)\n\tassert.False(t, applicable)\n}", "func isIngressEqual(a []v1.LoadBalancerIngress, b []v1.LoadBalancerIngress) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif _, found := isAddressInIngress(b, a[i].IP); !found {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := 0; i < len(b); i++ {\n\t\tif _, found := isAddressInIngress(a, b[i].IP); !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Validate(ingress *networkingv1.Ingress) error {\n\tif supportsTLS(ingress) && containsWildcard(ingress.Spec.TLS[0].Hosts[0]) {\n\t\treturn errors.Errorf(\"ingress TLS host %q contains wildcards\", ingress.Spec.TLS[0].Hosts[0])\n\t}\n\n\tif len(ingress.Spec.Rules) == 0 {\n\t\treturn errors.New(\"ingress does not have any rules\")\n\t}\n\n\tif containsWildcard(ingress.Spec.Rules[0].Host) {\n\t\treturn errors.Errorf(\"ingress host %q contains wildcards\", ingress.Spec.Rules[0].Host)\n\t}\n\n\treturn nil\n}", "func doesIngressReferenceService(ing *v1.Ingress, svc *api_v1.Service) bool {\n\tif ing.Namespace != svc.Namespace {\n\t\treturn false\n\t}\n\n\tdoesReference := false\n\tutils.TraverseIngressBackends(ing, func(id utils.ServicePortID) bool {\n\t\tif id.Service.Name == svc.Name {\n\t\t\tdoesReference = true\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\treturn doesReference\n}", "func isRetryableConflict(err *genericarmclient.CloudError) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\t// We retry on this code as ADS may be in the process of being enabled (in the case of parallel deployment)\n\treturn err.Code() == \"VulnerabilityAssessmentADSIsDisabled\"\n}", "func ingressControllerExists(ctx context.Context, h *helper.H, ingressControllerName string, shouldexist bool) {\n\t_, err := h.Dynamic().\n\t\tResource(schema.GroupVersionResource{Group: \"operator.openshift.io\", Version: \"v1\", Resource: \"ingresscontrollers\"}).\n\t\tNamespace(\"openshift-ingress-operator\").\n\t\tGet(ctx, ingressControllerName, metav1.GetOptions{})\n\tif shouldexist {\n\t\tExpect(err).NotTo(HaveOccurred())\n\t} else {\n\t\tExpect(err).Should(MatchError(fmt.Sprintf(\"ingresscontrollers.operator.openshift.io \\\"%v\\\" not found\", ingressControllerName)))\n\t}\n}", "func TestRetryNotRequired(t *testing.T) {\n\tcheck := assert.New(t)\n\tretryRequired := checkRetryRequired(http.StatusConflict)\n\tcheck.Equal(retryRequired, false)\n}", "func (z *zedrouter) checkConflictingNetworkInstances() {\n\tfor _, item := range z.pubNetworkInstanceStatus.GetAll() {\n\t\tniStatus := item.(types.NetworkInstanceStatus)\n\t\tif !niStatus.NIConflict {\n\t\t\tcontinue\n\t\t}\n\t\tniConfig := z.lookupNetworkInstanceConfig(niStatus.Key())\n\t\tif niConfig == nil {\n\t\t\tcontinue\n\t\t}\n\t\tniConflict, err := z.doNetworkInstanceSanityCheck(niConfig)\n\t\tif err == nil && niConflict == false {\n\t\t\t// Try to re-create the network instance now that the conflict is gone.\n\t\t\tz.log.Noticef(\"Recreating NI %s (%s) now that inter-NI conflict \"+\n\t\t\t\t\"is not present anymore\", niConfig.UUID, niConfig.DisplayName)\n\t\t\t// First release whatever has been already allocated for this NI.\n\t\t\tz.delNetworkInstance(&niStatus)\n\t\t\tz.handleNetworkInstanceCreate(nil, niConfig.Key(), *niConfig)\n\t\t}\n\t}\n}", "func (proxier *Proxier) CheckUnusedRule (activeServiceMap map[activeServiceKey]bool, activeBindIP map[string]int, dummylink netlink.Link){\n\t// check unused ipvs rules\n\toldSvcs, err := proxier.ipvsInterface.ListIpvsService()\n\tif err != nil{\n\t\tglog.Errorf(\"CheckUnusedRule: failed to list ipvs service: %v\", err)\n\t\treturn\n\t}\n\tfor _, oldSvc := range oldSvcs{\n\n\t\toldSvc_t, err := utilipvs.CreateInterService(oldSvc)\n\t\tserviceKey := activeServiceKey{\n\t\t\tip: oldSvc_t.ClusterIP.String(),\n\t\t\tport: oldSvc_t.Port,\n\t\t\tprotocol: oldSvc_t.Protocol,\n\t\t}\n\t\tglog.V(4).Infof(\"check active service: %s:%d:%s\",serviceKey.ip,serviceKey.port,serviceKey.protocol)\n\t\t_, ok := activeServiceMap[serviceKey]\n\n\t\t/* unused service info so remove ipvs config and dummy cluster ip*/\n\t\tif !ok{\n\t\t\tglog.V(3).Infof(\"delete unused ipvs service config and dummy cluster ip: %s:%d:%s\",serviceKey.ip,serviceKey.port,serviceKey.protocol)\n\n\t\t\terr = proxier.ipvsInterface.DeleteIpvsService(oldSvc_t)\n\n\t\t\tif err != nil{\n\t\t\t\tglog.Errorf(\"clean unused ipvs service failed: %s\",err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t}\n\t}\n\n\t// check unused binded cluster ip\n\toldBindIPs, _ := proxier.ipvsInterface.ListDuumyClusterIp(dummylink)\n\tif err != nil{\n\t\tglog.Errorf(\"CheckUnusedRule: failed to list binded ip: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, oldBindIP := range oldBindIPs{\n\t\t_, bind := activeBindIP[oldBindIP.IP.String()]\n\t\tif !bind{\n\t\t\tglog.V(3).Infof(\"unbinded unused ipvs dummy cluster ip: %s\",oldBindIP.IP.String())\n\n\t\t\terr = proxier.ipvsInterface.DeleteDummyClusterIp(oldBindIP.IP, dummylink)\n\t\t\tif err != nil{\n\t\t\t\tglog.Errorf(\"clean unused dummy cluster ip failed: %s\",err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func testEnsureV1Beta1(fx *e2e.Framework) {\n\tt := fx.T()\n\tnamespace := \"002-ingress-ensure-v1beta1\"\n\n\tfx.CreateNamespace(namespace)\n\tdefer fx.DeleteNamespace(namespace)\n\n\tfx.Fixtures.Echo.Deploy(namespace, \"ingress-conformance-echo\")\n\n\tingressHost := \"v1beta1.projectcontour.io\"\n\ti := &v1beta1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"echo\",\n\t\t},\n\t\tSpec: v1beta1.IngressSpec{\n\t\t\tRules: []v1beta1.IngressRule{\n\t\t\t\t{\n\t\t\t\t\tHost: ingressHost,\n\t\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\tServiceName: \"ingress-conformance-echo\",\n\t\t\t\t\t\t\t\t\t\tServicePort: intstr.FromInt(80),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\trequire.NoError(t, fx.Client.Create(context.TODO(), i))\n\n\tres, ok := fx.HTTP.RequestUntil(&e2e.HTTPRequestOpts{\n\t\tHost: ingressHost,\n\t\tPath: \"/echo\",\n\t\tCondition: e2e.HasStatusCode(200),\n\t})\n\trequire.Truef(t, ok, \"expected 200 response code, got %d\", res.StatusCode)\n}", "func (options *ConformanceTestOptions) SkipIngress() bool {\n\treturn options.TestCase.Ingress.Skip\n}", "func TestInterPodAffinityAdmission(t *testing.T) {\n\thandler := NewInterPodAntiAffinity(nil)\n\tpod := api.Pod{\n\t\tSpec: api.PodSpec{},\n\t}\n\ttests := []struct {\n\t\taffinity map[string]string\n\t\terrorExpected bool\n\t}{\n\t\t// empty affinity its success.\n\t\t{\n\t\t\taffinity: map[string]string{},\n\t\t\terrorExpected: false,\n\t\t},\n\t\t// what ever topologyKey in preferredDuringSchedulingIgnoredDuringExecution, the admission should success.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"preferredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"weight\": 5,\n\t\t\t\t\t\t\t\"podAffinityTerm\": {\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"namespaces\": [],\n\t\t\t\t\t\t\t\t\"topologyKey\": \"az\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: false,\n\t\t},\n\t\t// valid topologyKey in requiredDuringSchedulingIgnoredDuringExecution,\n\t\t// plus any topologyKey in preferredDuringSchedulingIgnoredDuringExecution, then admission success.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"preferredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"weight\": 5,\n\t\t\t\t\t\t\t\"podAffinityTerm\": {\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"namespaces\": [],\n\t\t\t\t\t\t\t\t\"topologyKey\": \"az\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}],\n\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\": [],\n\t\t\t\t\t\t\t\"topologyKey\": \"` + unversioned.LabelHostname + `\"\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: false,\n\t\t},\n\t\t// valid topologyKey in requiredDuringSchedulingIgnoredDuringExecution then admission success.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\":[],\n\t\t\t\t\t\t\t\"topologyKey\": \"` + unversioned.LabelHostname + `\"\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: false,\n\t\t},\n\t\t// invalid topologyKey in requiredDuringSchedulingIgnoredDuringExecution then admission fails.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\":[],\n\t\t\t\t\t\t\t\"topologyKey\": \" zone \"\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: true,\n\t\t},\n\t\t// invalid topologyKey in requiredDuringSchedulingRequiredDuringExecution then admission fails.\n\t\t// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.\n\t\t// {\n\t\t// affinity: map[string]string{\n\t\t//\t\t\tapi.AffinityAnnotationKey: `\n\t\t//\t\t\t\t{\"podAntiAffinity\": {\n\t\t//\t\t\t\t\t\"requiredDuringSchedulingRequiredDuringExecution\": [{\n\t\t//\t\t\t\t\t\t\"labelSelector\": {\n\t\t//\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t//\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t//\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t//\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t//\t\t\t\t\t\t\t}]\n\t\t//\t\t\t\t\t\t},\n\t\t//\t\t\t\t\t\t\"namespaces\":[],\n\t\t//\t\t\t\t\t\t\"topologyKey\": \" zone \"\n\t\t//\t\t\t\t\t}]\n\t\t//\t\t\t\t}}`,\n\t\t//\t\t\t},\n\t\t//\t\t\terrorExpected: true,\n\t\t// }\n\t\t// list of requiredDuringSchedulingIgnoredDuringExecution middle element topologyKey is not valid.\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\":[],\n\t\t\t\t\t\t\t\"topologyKey\": \"` + unversioned.LabelHostname + `\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\":[],\n\t\t\t\t\t\t\t\"topologyKey\": \" zone \"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\"key\": \"security\",\n\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\"values\":[\"S2\"]\n\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"namespaces\": [],\n\t\t\t\t\t\t\t\"topologyKey\": \"` + unversioned.LabelHostname + `\"\n\t\t\t\t\t\t}]\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\terrorExpected: true,\n\t\t},\n\t\t{\n\t\t\taffinity: map[string]string{\n\t\t\t\tapi.AffinityAnnotationKey: `\n\t\t\t\t\t{\"podAntiAffinity\": {\n\t\t\t\t\t\t\"thisIsAInvalidAffinity\": [{}\n\t\t\t\t\t}}`,\n\t\t\t},\n\t\t\t// however, we should not get error here\n\t\t\terrorExpected: false,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tpod.ObjectMeta.Annotations = test.affinity\n\t\terr := handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind(\"Pod\").WithVersion(\"version\"), \"foo\", \"name\", api.Resource(\"pods\").WithVersion(\"version\"), \"\", \"ignored\", nil))\n\n\t\tif test.errorExpected && err == nil {\n\t\t\tt.Errorf(\"Expected error for Anti Affinity %+v but did not get an error\", test.affinity)\n\t\t}\n\n\t\tif !test.errorExpected && err != nil {\n\t\t\tt.Errorf(\"Unexpected error %v for AntiAffinity %+v\", err, test.affinity)\n\t\t}\n\t}\n}", "func (s) TestRefuseStartWithExcludeAndWildCardAll(t *testing.T) {\n\tinvalidConfig := &config{\n\t\tProjectID: \"fake\",\n\t\tCloudLogging: &cloudLogging{\n\t\t\tClientRPCEvents: []clientRPCEvents{\n\t\t\t\t{\n\t\t\t\t\tMethods: []string{\"*\"},\n\t\t\t\t\tExclude: true,\n\t\t\t\t\tMaxMetadataBytes: 30,\n\t\t\t\t\tMaxMessageBytes: 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tinvalidConfigJSON, err := json.Marshal(invalidConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to convert config to JSON: %v\", err)\n\t}\n\toldObservabilityConfig := envconfig.ObservabilityConfig\n\toldObservabilityConfigFile := envconfig.ObservabilityConfigFile\n\tenvconfig.ObservabilityConfig = string(invalidConfigJSON)\n\tenvconfig.ObservabilityConfigFile = \"\"\n\tdefer func() {\n\t\tenvconfig.ObservabilityConfig = oldObservabilityConfig\n\t\tenvconfig.ObservabilityConfigFile = oldObservabilityConfigFile\n\t}()\n\t// If there is at least one invalid pattern, which should not be silently tolerated.\n\tif err := Start(context.Background()); err == nil {\n\t\tt.Fatalf(\"Invalid patterns not triggering error\")\n\t}\n}", "func validIngress() *extensions.Ingress {\n\treturn newIngress(map[string]utils.FakeIngressRuleValueMap{\n\t\t\"foo.bar.com\": testPathMap,\n\t})\n}", "func TestNamespacePreExisting(t *testing.T) {\n\ttestName := \"TestNamespacePreExisting\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\tAPPLICATION: true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\t/* 0 */ KappnavConfigFile,\n\t\t/* 1 */ CrdApplication,\n\t\t/* 2 */ ns1Service,\n\t\t/* 3 */ ns1Deployment,\n\t\t/* 4 */ ns2Service,\n\t\t/* 5 */ ns2Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: no applications. No resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\n\t// status should not be checked when there are not applications\n\titeration0IDs[2].expectedStatus = NoStatus\n\titeration0IDs[3].expectedStatus = NoStatus\n\titeration0IDs[4].expectedStatus = NoStatus\n\titeration0IDs[5].expectedStatus = NoStatus\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t// iteration 1: add application to NS_1. All in NS_1 is normal.\n\t// All in NS_2 remains NoStatus\n\tres, err := readOneResourceID(ns1App)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tarrayLength := len(iteration0IDs)\n\tvar iteration1IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration1IDs, iteration0IDs)\n\titeration1IDs = append(iteration1IDs, res)\n\tarrayLength++\n\titeration1IDs[2].expectedStatus = Normal\n\titeration1IDs[3].expectedStatus = Normal\n\titeration1IDs[6].expectedStatus = Normal\n\ttestActions.addIteration(iteration1IDs, emptyIDs)\n\n\t/* iteration 4: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// make all trasition of testAction\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func ValidateHasNoAutoscalingAnnotation(annotations map[string]string) (errs *apis.FieldError) {\n\tfor key := range annotations {\n\t\tif strings.HasPrefix(key, autoscaling.GroupName) {\n\t\t\terrs = errs.Also(\n\t\t\t\tapis.ErrInvalidKeyName(key, apis.CurrentField, `autoscaling annotations must be put under \"spec.template.metadata.annotations\" to work`))\n\t\t}\n\t}\n\treturn errs\n}", "func loadBalancerServiceIsProgressing(ic *operatorv1.IngressController, service *corev1.Service, platform *configv1.PlatformStatus) error {\n\tvar errs []error\n\twantScope := ic.Status.EndpointPublishingStrategy.LoadBalancer.Scope\n\thaveScope := operatorv1.ExternalLoadBalancer\n\tif IsServiceInternal(service) {\n\t\thaveScope = operatorv1.InternalLoadBalancer\n\t}\n\tif wantScope != haveScope {\n\t\terr := fmt.Errorf(\"The IngressController scope was changed from %q to %q.\", haveScope, wantScope)\n\t\tswitch platform.Type {\n\t\tcase configv1.AWSPlatformType, configv1.IBMCloudPlatformType:\n\t\t\terr = fmt.Errorf(\"%[1]s To effectuate this change, you must delete the service: `oc -n %[2]s delete svc/%[3]s`; the service load-balancer will then be deprovisioned and a new one created. This will most likely cause the new load-balancer to have a different host name and IP address from the old one's. Alternatively, you can revert the change to the IngressController: `oc -n openshift-ingress-operator patch ingresscontrollers/%[4]s --type=merge --patch='{\\\"spec\\\":{\\\"endpointPublishingStrategy\\\":{\\\"loadBalancer\\\":{\\\"scope\\\":\\\"%[5]s\\\"}}}}'\", err.Error(), service.Namespace, service.Name, ic.Name, haveScope)\n\t\t}\n\t\terrs = append(errs, err)\n\t}\n\n\terrs = append(errs, loadBalancerSourceRangesAnnotationSet(service))\n\terrs = append(errs, loadBalancerSourceRangesMatch(ic, service))\n\n\treturn kerrors.NewAggregate(errs)\n}", "func updateIngressControllerSpecWithRetryOnConflict(t *testing.T, name types.NamespacedName, timeout time.Duration, mutateSpecFn func(*operatorv1.IngressControllerSpec)) error {\n\tic := operatorv1.IngressController{}\n\treturn wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), name, &ic); err != nil {\n\t\t\tt.Logf(\"error getting ingress controller %v: %v, retrying...\", name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tmutateSpecFn(&ic.Spec)\n\t\tif err := kclient.Update(context.TODO(), &ic); err != nil {\n\t\t\tif errors.IsConflict(err) {\n\t\t\t\tt.Logf(\"conflict when updating ingress controller %v: %v, retrying...\", name, err)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, nil\n\t})\n}", "func (r *IngressReconciler) reconcileAnnotations(ctx context.Context, ingress *networkingv1.Ingress) (updated bool, err error) {\n\tingressCopy := ingress.DeepCopy()\n\n\tupdated, err = r.monitorService.AnnotateIngress(ingressCopy)\n\tif err != nil || !updated {\n\t\treturn false, err\n\t}\n\n\terr = r.Update(ctx, ingressCopy)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}", "func updateIngressConfigSpecWithRetryOnConflict(t *testing.T, name types.NamespacedName, timeout time.Duration, mutateSpecFn func(*configv1.IngressSpec)) error {\n\tingressConfig := configv1.Ingress{}\n\treturn wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), name, &ingressConfig); err != nil {\n\t\t\tt.Logf(\"error getting ingress config %v: %v, retrying...\", name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tmutateSpecFn(&ingressConfig.Spec)\n\t\tif err := kclient.Update(context.TODO(), &ingressConfig); err != nil {\n\t\t\tif errors.IsConflict(err) {\n\t\t\t\tt.Logf(\"conflict when updating ingress config %v: %v, retrying...\", name, err)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, nil\n\t})\n}", "func setIngressManagedAnnotation(rollouts []v1alpha1.Rollout, refResource validation.ReferencedResources) {\n\tfor _, rollout := range rollouts {\n\t\tfor i := range refResource.Ingresses {\n\t\t\tvar serviceName string\n\n\t\t\t// Basic Canary so ingress is only pointing a single service and so no linting is needed for this case.\n\t\t\tif rollout.Spec.Strategy.Canary == nil || rollout.Spec.Strategy.Canary.TrafficRouting == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.StableService\n\t\t\t} else if rollout.Spec.Strategy.Canary.TrafficRouting.ALB != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.StableService\n\t\t\t\tif rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService != \"\" {\n\t\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService\n\t\t\t\t}\n\t\t\t} else if rollout.Spec.Strategy.Canary.TrafficRouting.SMI != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.TrafficRouting.SMI.RootService\n\t\t\t}\n\n\t\t\tif ingressutil.HasRuleWithService(&refResource.Ingresses[i], serviceName) {\n\t\t\t\tannotations := refResource.Ingresses[i].GetAnnotations()\n\t\t\t\tif annotations == nil {\n\t\t\t\t\tannotations = make(map[string]string)\n\t\t\t\t}\n\t\t\t\tannotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name\n\t\t\t\trefResource.Ingresses[i].SetAnnotations(annotations)\n\t\t\t}\n\t\t}\n\t}\n}", "func satisfyExistingPodsAntiAffinity(state *preFilterState, nodeInfo *NodeInfo) bool {\n\tif len(state.existingAntiAffinityCounts) > 0 {\n\t\t// Iterate over topology pairs to get any of the pods being affected by\n\t\t// the scheduled pod anti-affinity terms\n\t\tfor topologyKey, topologyValue := range nodeInfo.Node().Labels {\n\t\t\ttp := topologyPair{key: topologyKey, value: topologyValue}\n\t\t\tif state.existingAntiAffinityCounts[tp] > 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error {\n\t// check refs in mount pod, the corresponding pod exists or not\n\tlock := config.GetPodLock(pod.Name)\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\tdelAnnotations := []string{}\n\tvar existTargets int\n\tfor k, target := range pod.Annotations {\n\t\tif k == util.GetReferenceKey(target) {\n\t\t\t_, exists := p.mit.deletedPods[getPodUid(target)]\n\t\t\tif !exists { // only it is not in pod lists can be seen as deleted\n\t\t\t\t// target pod is deleted\n\t\t\t\tdelAnnotations = append(delAnnotations, k)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texistTargets++\n\t\t}\n\t}\n\n\tif existTargets != 0 && pod.Annotations[config.DeleteDelayAtKey] != \"\" {\n\t\tdelAnnotations = append(delAnnotations, config.DeleteDelayAtKey)\n\t}\n\tif len(delAnnotations) != 0 {\n\t\t// check mount pod reference key, if it is not the latest, return conflict\n\t\tnewPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(util.GetAllRefKeys(*newPod)) != len(util.GetAllRefKeys(*pod)) {\n\t\t\treturn apierrors.NewConflict(schema.GroupResource{\n\t\t\t\tGroup: pod.GroupVersionKind().Group,\n\t\t\t\tResource: pod.GroupVersionKind().Kind,\n\t\t\t}, pod.Name, fmt.Errorf(\"can not patch pod\"))\n\t\t}\n\t\tif err := util.DelPodAnnotation(ctx, p.Client, pod, delAnnotations); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif existTargets == 0 && pod.DeletionTimestamp == nil {\n\t\tvar shouldDelay bool\n\t\tshouldDelay, err := util.ShouldDelay(ctx, pod, p.Client)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !shouldDelay {\n\t\t\t// check mount pod resourceVersion, if it is not the latest, return conflict\n\t\t\tnewPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// check mount pod reference key, if it is not none, return conflict\n\t\t\tif len(util.GetAllRefKeys(*newPod)) != 0 {\n\t\t\t\treturn apierrors.NewConflict(schema.GroupResource{\n\t\t\t\t\tGroup: pod.GroupVersionKind().Group,\n\t\t\t\t\tResource: pod.GroupVersionKind().Kind,\n\t\t\t\t}, pod.Name, fmt.Errorf(\"can not delete pod\"))\n\t\t\t}\n\t\t\t// if there are no refs or after delay time, delete it\n\t\t\tklog.V(5).Infof(\"There are no refs in pod %s annotation, delete it\", pod.Name)\n\t\t\tif err := p.Client.DeletePod(ctx, pod); err != nil {\n\t\t\t\tklog.Errorf(\"Delete pod %s error: %v\", pod.Name, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// delete related secret\n\t\t\tsecretName := pod.Name + \"-secret\"\n\t\t\tklog.V(6).Infof(\"delete related secret of pod: %s\", secretName)\n\t\t\tif err := p.Client.DeleteSecret(ctx, secretName, pod.Namespace); err != nil {\n\t\t\t\tklog.V(5).Infof(\"Delete secret %s error: %v\", secretName, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func checkNamespaces(podSpec PodSpecV1, result *Result) {\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-network\"); labelExists {\n\t\tif podSpec.HostNetwork {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostNetworkTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostNetwork to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostNetwork to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostNetwork {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostNetworkTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostNetwork is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-IPC\"); labelExists {\n\t\tif podSpec.HostIPC {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostIPCTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostIPC to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostIPC to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostIPC {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostIPCTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostIPC is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\tif labelExists, reason := getPodOverrideLabelReason(result, \"allow-namespace-host-PID\"); labelExists {\n\t\tif podSpec.HostPID {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorNamespaceHostPIDTrueAllowed,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostPID to true\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t} else {\n\t\t\tocc := Occurrence{\n\t\t\t\tpodHost: podSpec.Hostname,\n\t\t\t\tid: ErrorMisconfiguredKubeauditAllow,\n\t\t\t\tkind: Warn,\n\t\t\t\tmessage: \"Allowed setting hostPID to true, but it is set to false\",\n\t\t\t\tmetadata: Metadata{\"Reason\": prettifyReason(reason)},\n\t\t\t}\n\t\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t\t}\n\t} else if podSpec.HostPID {\n\t\tocc := Occurrence{\n\t\t\tpodHost: podSpec.Hostname,\n\t\t\tid: ErrorNamespaceHostPIDTrue,\n\t\t\tkind: Error,\n\t\t\tmessage: \"hostPID is set to true in podSpec, please set to false!\",\n\t\t}\n\t\tresult.Occurrences = append(result.Occurrences, occ)\n\t}\n\treturn\n}", "func CheckExternalAccesss(t *testing.T, k8client client.Client, pravega *api.PravegaCluster) error {\n\n\tssSvc := &corev1.Service{}\n\tconSvc := &corev1.Service{}\n\t_ = k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: pravega.Namespace, Name: pravega.ServiceNameForSegmentStore(0)}, ssSvc)\n\t_ = k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: pravega.Namespace, Name: pravega.ServiceNameForController()}, conSvc)\n\n\tif len(conSvc.Status.LoadBalancer.Ingress) == 0 || len(ssSvc.Status.LoadBalancer.Ingress) == 0 {\n\t\treturn fmt.Errorf(\"External Access is not enabled\")\n\t}\n\tlog.Printf(\"pravega cluster External Acess Validated: %s\", pravega.Name)\n\treturn nil\n}", "func testAllowAllPrecedenceIngress() []*TestStep {\n\tbuilder := &NetworkPolicySpecBuilder{}\n\tbuilder = builder.SetName(\"x\", \"deny-all\").SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder.SetTypeIngress()\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{}, nil, nil, nil)\n\n\tpolicy1 := builder.Get()\n\treachability1 := NewReachability(allPods, true)\n\treachability1.ExpectAllIngress(Pod(\"x/a\"), false)\n\treachability1.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\n\tbuilder2 := &NetworkPolicySpecBuilder{}\n\t// by preserving the same name, this policy will also serve to test the 'updated policy' scenario.\n\tbuilder2 = builder2.SetName(\"x\", \"allow-all\").SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder2.SetTypeIngress()\n\tbuilder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil)\n\n\tpolicy2 := builder2.Get()\n\treachability2 := NewReachability(allPods, true)\n\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Port 81\",\n\t\t\treachability1,\n\t\t\tpolicy1,\n\t\t\tp81,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Port 80\",\n\t\t\treachability2,\n\t\t\tpolicy2,\n\t\t\tp80,\n\t\t\t0,\n\t\t},\n\t}\n}", "func TestK8gbRepeatedlyRecreatedFromIngress(t *testing.T) {\n\tt.Parallel()\n\t// name of ingress and gslb\n\tconst name = \"test-gslb-failover-simple\"\n\n\tassertStrategy := func(t *testing.T, options *k8s.KubectlOptions) {\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.splitBrainThresholdSeconds\", \"300\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.dnsTtlSeconds\", \"30\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.primaryGeoTag\", settings.PrimaryGeoTag)\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.type\", \"failover\")\n\t}\n\n\t// Path to the Kubernetes resource config we will test\n\tingressResourcePath, err := filepath.Abs(\"../examples/ingress-annotation-failover-simple.yaml\")\n\trequire.NoError(t, err)\n\n\t// To ensure we can reuse the resource config on the same cluster to test different scenarios, we setup a unique\n\t// namespace for the resources for this test.\n\t// Note that namespaces must be lowercase.\n\tnamespaceName := fmt.Sprintf(\"k8gb-test-repeatedly-recreated-from-ingress-%s\", strings.ToLower(random.UniqueId()))\n\n\t// Here we choose to use the defaults, which is:\n\t// - HOME/.kube/config for the kubectl config file\n\t// - Current context of the kubectl config file\n\t// - Random namespace\n\toptions := k8s.NewKubectlOptions(\"\", \"\", namespaceName)\n\n\tk8s.CreateNamespace(t, options, namespaceName)\n\n\tdefer k8s.DeleteNamespace(t, options, namespaceName)\n\n\tdefer k8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress := k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n\n\tk8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.AssertGslbDeleted(t, options, ingress.Name)\n\n\t// recreate ingress\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress = k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n}", "func Test_desiredIngressClass(t *testing.T) {\n\tscope := \"Cluster\"\n\tmakeIngressClass := func(icName string, annotateAsDefault bool) *networkingv1.IngressClass {\n\t\tapiGroup := \"operator.openshift.io\"\n\t\tname := \"openshift-\" + icName\n\t\tclass := networkingv1.IngressClass{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: name,\n\t\t\t},\n\t\t\tSpec: networkingv1.IngressClassSpec{\n\t\t\t\tController: routev1.IngressToRouteIngressClassControllerName,\n\t\t\t\tParameters: &networkingv1.IngressClassParametersReference{\n\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\tKind: \"IngressController\",\n\t\t\t\t\tName: icName,\n\t\t\t\t\tScope: &scope,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif annotateAsDefault {\n\t\t\tclass.Annotations = map[string]string{\n\t\t\t\t\"ingressclass.kubernetes.io/is-default-class\": \"true\",\n\t\t\t}\n\t\t}\n\t\treturn &class\n\t}\n\ttestCases := []struct {\n\t\tdescription string\n\n\t\thaveIngressController bool\n\t\tingressControllerName string\n\t\tingressClasses []networkingv1.IngressClass\n\n\t\texpectWant bool\n\t\texpectIngressClass *networkingv1.IngressClass\n\t}{\n\t\t{\n\t\t\tdescription: \"no ingresscontroller\",\n\t\t\thaveIngressController: false,\n\t\t\texpectWant: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"custom ingresscontroller when no ingressclasses exist\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"custom\",\n\t\t\tingressClasses: []networkingv1.IngressClass{},\n\t\t\texpectWant: true,\n\t\t\texpectIngressClass: makeIngressClass(\"custom\", false),\n\t\t},\n\t\t{\n\t\t\tdescription: \"custom ingresscontroller when its ingressclass already exists\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"custom\",\n\t\t\tingressClasses: []networkingv1.IngressClass{\n\t\t\t\t*makeIngressClass(\"custom\", false),\n\t\t\t},\n\t\t\texpectWant: true,\n\t\t\texpectIngressClass: makeIngressClass(\"custom\", false),\n\t\t},\n\t\t{\n\t\t\tdescription: \"custom ingresscontroller when its ingressclass already exists and is annotated as default\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"custom\",\n\t\t\tingressClasses: []networkingv1.IngressClass{\n\t\t\t\t*makeIngressClass(\"custom\", true),\n\t\t\t},\n\t\t\texpectWant: true,\n\t\t\t// desired doesn't have the annotation, but that's all\n\t\t\t// right because the update logic ignores the user-set\n\t\t\t// annotation.\n\t\t\texpectIngressClass: makeIngressClass(\"custom\", false),\n\t\t},\n\t\t{\n\t\t\tdescription: \"default ingresscontroller when no default ingressclass exists\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"default\",\n\t\t\tingressClasses: []networkingv1.IngressClass{},\n\t\t\texpectWant: true,\n\t\t\t// TODO This test case expects the default ingressclass\n\t\t\t// not to be annotated as default because doing so\n\t\t\t// breaks \"[sig-network] IngressClass [Feature:Ingress]\n\t\t\t// should not set default value if no default\n\t\t\t// IngressClass\"; we need to fix that test and then\n\t\t\t// update this test case.\n\t\t\texpectIngressClass: makeIngressClass(\"default\", false),\n\t\t},\n\t\t{\n\t\t\tdescription: \"default ingresscontroller when some custom ingressclass exists and is annotated as default\",\n\t\t\thaveIngressController: true,\n\t\t\tingressControllerName: \"default\",\n\t\t\tingressClasses: []networkingv1.IngressClass{\n\t\t\t\t*makeIngressClass(\"custom\", true),\n\t\t\t},\n\t\t\texpectWant: true,\n\t\t\texpectIngressClass: makeIngressClass(\"default\", false),\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.description, func(t *testing.T) {\n\t\t\twant, class := desiredIngressClass(tc.haveIngressController, tc.ingressControllerName, tc.ingressClasses)\n\t\t\tif want != tc.expectWant {\n\t\t\t\tt.Errorf(\"expected desiredIngressClass to return %t, got %t\", tc.expectWant, want)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(class, tc.expectIngressClass) {\n\t\t\t\tt.Errorf(\"expected desiredIngressClass to return %+v, got %+v\", tc.expectIngressClass, class)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestNodePortServiceChangedEmptyAnnotations(t *testing.T) {\n\tsvc1 := corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: nil,\n\t\t},\n\t}\n\tsvc2 := corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t}\n\ttestCases := []struct {\n\t\tdescription string\n\t\tcurrent, desired *corev1.Service\n\t}{\n\t\t{\"null to empty\", &svc1, &svc2},\n\t\t{\"empty to null\", &svc2, &svc1},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.description, func(t *testing.T) {\n\t\t\tchanged, _ := nodePortServiceChanged(tc.current, tc.desired)\n\t\t\tassert.False(t, changed)\n\t\t})\n\t}\n}", "func mutationRequired(ignoredList []string, metadata *metav1.ObjectMeta) bool {\n\n\tif metadata == nil {\n\t\treturn false\n\t}\n\n\t// If the environment variable MATCH_NAMESPACE exists,\n\t// only the namespaces in the environment variable will be matched\n\te := os.Getenv(\"MATCH_NAMESPACE\")\n\tif e != \"\" {\n\t\te = strings.Replace(e, \" \", \"\", -1)\n\t\tmatchNamespaces := strings.Split(e, \",\")\n\t\tmatchNamespacesMap := make(map[string]struct{})\n\t\tfor _, ns := range matchNamespaces {\n\t\t\tmatchNamespacesMap[ns] = struct{}{}\n\t\t}\n\t\tif _, ok := matchNamespacesMap[metadata.Namespace]; !ok {\n\t\t\tglog.Infof(\n\t\t\t\t\"Skip mutation %s/%s, it's not in the MATCH_NAMESPACE %v\",\n\t\t\t\tmetadata.Namespace, metadata.Name, matchNamespaces,\n\t\t\t)\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// skip special kubernete system namespaces\n\tfor _, namespace := range ignoredList {\n\t\tif metadata.Namespace == namespace {\n\t\t\tglog.Infof(\"Skip mutation for %v for it's in special namespace:%v\", metadata.Name, metadata.Namespace)\n\t\t\treturn false\n\t\t}\n\t}\n\n\tannotations := metadata.GetAnnotations()\n\tif annotations == nil {\n\t\tannotations = map[string]string{}\n\t}\n\n\t// ignore recreate resource for devend\n\tif annotations[\"nocalhost-dep-ignore\"] == \"true\" {\n\t\treturn false\n\t}\n\n\t//status := annotations[admissionWebhookAnnotationStatusKey]\n\n\t// determine whether to perform mutation based on annotation for the target resource\n\tvar required = true\n\n\t//glog.Infof(\"Mutation policy for %v/%v: status: %q required:%v\", metadata.Namespace, metadata.Name, status, required)\n\treturn required\n}", "func TestSidecarsCheckNoPods(t *testing.T) {\n\tchecker := PodChecker{Pods: []v1.Pod{}}\n\tresult := checker.Check()\n\n\tassert.Equal(t, 0, len(result))\n}", "func TestOverlayIOPExhaustiveness(t *testing.T) {\n\twellknownProviders := map[string]struct{}{\n\t\t\"prometheus\": {},\n\t\t\"envoy_file_access_log\": {},\n\t\t\"stackdriver\": {},\n\t\t\"envoy_otel_als\": {},\n\t\t\"envoy_ext_authz_http\": {},\n\t\t\"envoy_ext_authz_grpc\": {},\n\t\t\"zipkin\": {},\n\t\t\"lightstep\": {},\n\t\t\"datadog\": {},\n\t\t\"opencensus\": {},\n\t\t\"skywalking\": {},\n\t\t\"envoy_http_als\": {},\n\t\t\"envoy_tcp_als\": {},\n\t\t\"opentelemetry\": {},\n\t}\n\n\tunexpectedProviders := make([]string, 0)\n\n\tmsg := &meshconfig.MeshConfig_ExtensionProvider{}\n\tpb := msg.ProtoReflect()\n\tmd := pb.Descriptor()\n\n\tof := md.Oneofs().Get(0)\n\tfor i := 0; i < of.Fields().Len(); i++ {\n\t\to := of.Fields().Get(i)\n\t\tn := string(o.Name())\n\t\tif _, ok := wellknownProviders[n]; ok {\n\t\t\tdelete(wellknownProviders, n)\n\t\t} else {\n\t\t\tunexpectedProviders = append(unexpectedProviders, n)\n\t\t}\n\t}\n\n\tif len(wellknownProviders) != 0 || len(unexpectedProviders) != 0 {\n\t\tt.Errorf(\"unexpected provider not implemented in OverlayIOP, wellknownProviders: %v unexpectedProviders: %v\", wellknownProviders, unexpectedProviders)\n\t\tt.Fail()\n\t}\n}", "func (c *controller) shouldProcessIngressUpdate(ing *knetworking.Ingress) bool {\n\t// ingress add/update\n\tshouldProcess := c.shouldProcessIngress(c.meshWatcher.Mesh(), ing)\n\titem := config.NamespacedName(ing)\n\tif shouldProcess {\n\t\t// record processed ingress\n\t\tc.mutex.Lock()\n\t\tc.ingresses[item] = ing\n\t\tc.mutex.Unlock()\n\t\treturn true\n\t}\n\n\tc.mutex.Lock()\n\t_, preProcessed := c.ingresses[item]\n\t// previous processed but should not currently, delete it\n\tif preProcessed && !shouldProcess {\n\t\tdelete(c.ingresses, item)\n\t} else {\n\t\tc.ingresses[item] = ing\n\t}\n\tc.mutex.Unlock()\n\n\treturn preProcessed\n}", "func TestIngressTranslatorWithHTTPOptionDisabled(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tin *v1alpha1.Ingress\n\t\tstate []runtime.Object\n\t\twant *translatedIngress\n\t}{{\n\t\tname: \"tls redirect\",\n\t\tin: ing(\"testspace\", \"testname\", func(ing *v1alpha1.Ingress) {\n\t\t\ting.Spec.TLS = []v1alpha1.IngressTLS{{\n\t\t\t\tHosts: []string{\"foo.example.com\"},\n\t\t\t\tSecretNamespace: \"secretns\",\n\t\t\t\tSecretName: \"secretname\",\n\t\t\t}}\n\t\t\ting.Spec.HTTPOption = v1alpha1.HTTPOptionRedirected\n\t\t}),\n\t\tstate: []runtime.Object{\n\t\t\tns(\"testspace\"),\n\t\t\tsvc(\"servicens\", \"servicename\"),\n\t\t\teps(\"servicens\", \"servicename\"),\n\t\t\tsecret,\n\t\t},\n\t\twant: func() *translatedIngress {\n\t\t\tvHosts := []*route.VirtualHost{\n\t\t\t\tenvoy.NewVirtualHost(\n\t\t\t\t\t\"(testspace/testname).Rules[0]\",\n\t\t\t\t\t[]string{\"foo.example.com\", \"foo.example.com:*\"},\n\t\t\t\t\t[]*route.Route{envoy.NewRoute(\n\t\t\t\t\t\t\"(testspace/testname).Rules[0].Paths[/test]\",\n\t\t\t\t\t\t[]*route.HeaderMatcher{{\n\t\t\t\t\t\t\tName: \"testheader\",\n\t\t\t\t\t\t\tHeaderMatchSpecifier: &route.HeaderMatcher_StringMatch{\n\t\t\t\t\t\t\t\tStringMatch: &envoymatcherv3.StringMatcher{\n\t\t\t\t\t\t\t\t\tMatchPattern: &envoymatcherv3.StringMatcher_Exact{\n\t\t\t\t\t\t\t\t\t\tExact: \"foo\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t\t\"/test\",\n\t\t\t\t\t\t[]*route.WeightedCluster_ClusterWeight{\n\t\t\t\t\t\t\tenvoy.NewWeightedCluster(\"servicens/servicename\", 100, map[string]string{\"baz\": \"gna\"}),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t0,\n\t\t\t\t\t\tmap[string]string{\"foo\": \"bar\"},\n\t\t\t\t\t\t\"rewritten.example.com\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t}\n\t\t\treturn &translatedIngress{\n\t\t\t\tname: types.NamespacedName{\n\t\t\t\t\tNamespace: \"testspace\",\n\t\t\t\t\tName: \"testname\",\n\t\t\t\t},\n\t\t\t\tsniMatches: []*envoy.SNIMatch{{\n\t\t\t\t\tHosts: []string{\"foo.example.com\"},\n\t\t\t\t\tCertSource: types.NamespacedName{\n\t\t\t\t\t\tNamespace: \"secretns\",\n\t\t\t\t\t\tName: \"secretname\",\n\t\t\t\t\t},\n\t\t\t\t\tCertificateChain: cert,\n\t\t\t\t\tPrivateKey: privateKey,\n\t\t\t\t}},\n\t\t\t\tclusters: []*v3.Cluster{\n\t\t\t\t\tenvoy.NewCluster(\n\t\t\t\t\t\t\"servicens/servicename\",\n\t\t\t\t\t\t5*time.Second,\n\t\t\t\t\t\tlbEndpoints,\n\t\t\t\t\t\tfalse,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tv3.Cluster_STATIC,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\texternalVirtualHosts: vHosts,\n\t\t\t\texternalTLSVirtualHosts: vHosts,\n\t\t\t\tinternalVirtualHosts: vHosts,\n\t\t\t}\n\t\t}(),\n\t}, {\n\t\t// cluster local is not affected by HTTPOption.\n\t\tname: \"tls redirect cluster local\",\n\t\tin: ing(\"testspace\", \"testname\", func(ing *v1alpha1.Ingress) {\n\t\t\ting.Spec.TLS = []v1alpha1.IngressTLS{{\n\t\t\t\tHosts: []string{\"foo.example.com\"},\n\t\t\t\tSecretNamespace: \"secretns\",\n\t\t\t\tSecretName: \"secretname\",\n\t\t\t}}\n\t\t\ting.Spec.HTTPOption = v1alpha1.HTTPOptionRedirected\n\t\t\ting.Spec.Rules[0].Visibility = v1alpha1.IngressVisibilityClusterLocal\n\t\t}),\n\t\tstate: []runtime.Object{\n\t\t\tns(\"testspace\"),\n\t\t\tsvc(\"servicens\", \"servicename\"),\n\t\t\teps(\"servicens\", \"servicename\"),\n\t\t\tsecret,\n\t\t},\n\t\twant: func() *translatedIngress {\n\t\t\tvHosts := []*route.VirtualHost{\n\t\t\t\tenvoy.NewVirtualHost(\n\t\t\t\t\t\"(testspace/testname).Rules[0]\",\n\t\t\t\t\t[]string{\"foo.example.com\", \"foo.example.com:*\"},\n\t\t\t\t\t[]*route.Route{envoy.NewRoute(\n\t\t\t\t\t\t\"(testspace/testname).Rules[0].Paths[/test]\",\n\t\t\t\t\t\t[]*route.HeaderMatcher{{\n\t\t\t\t\t\t\tName: \"testheader\",\n\t\t\t\t\t\t\tHeaderMatchSpecifier: &route.HeaderMatcher_StringMatch{\n\t\t\t\t\t\t\t\tStringMatch: &envoymatcherv3.StringMatcher{\n\t\t\t\t\t\t\t\t\tMatchPattern: &envoymatcherv3.StringMatcher_Exact{\n\t\t\t\t\t\t\t\t\t\tExact: \"foo\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t\t\"/test\",\n\t\t\t\t\t\t[]*route.WeightedCluster_ClusterWeight{\n\t\t\t\t\t\t\tenvoy.NewWeightedCluster(\"servicens/servicename\", 100, map[string]string{\"baz\": \"gna\"}),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t0,\n\t\t\t\t\t\tmap[string]string{\"foo\": \"bar\"},\n\t\t\t\t\t\t\"rewritten.example.com\"),\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t}\n\n\t\t\treturn &translatedIngress{\n\t\t\t\tname: types.NamespacedName{\n\t\t\t\t\tNamespace: \"testspace\",\n\t\t\t\t\tName: \"testname\",\n\t\t\t\t},\n\t\t\t\tsniMatches: []*envoy.SNIMatch{{\n\t\t\t\t\tHosts: []string{\"foo.example.com\"},\n\t\t\t\t\tCertSource: types.NamespacedName{\n\t\t\t\t\t\tNamespace: \"secretns\",\n\t\t\t\t\t\tName: \"secretname\",\n\t\t\t\t\t},\n\t\t\t\t\tCertificateChain: cert,\n\t\t\t\t\tPrivateKey: privateKey,\n\t\t\t\t}},\n\t\t\t\tclusters: []*v3.Cluster{\n\t\t\t\t\tenvoy.NewCluster(\n\t\t\t\t\t\t\"servicens/servicename\",\n\t\t\t\t\t\t5*time.Second,\n\t\t\t\t\t\tlbEndpoints,\n\t\t\t\t\t\tfalse,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tv3.Cluster_STATIC,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\texternalVirtualHosts: []*route.VirtualHost{},\n\t\t\t\texternalTLSVirtualHosts: []*route.VirtualHost{},\n\t\t\t\tinternalVirtualHosts: vHosts,\n\t\t\t}\n\t\t}(),\n\t}}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tt.Setenv(\"KOURIER_HTTPOPTION_DISABLED\", \"true\")\n\t\t\tcfg := defaultConfig.DeepCopy()\n\t\t\tctx := (&testConfigStore{config: cfg}).ToContext(context.Background())\n\t\t\tkubeclient := fake.NewSimpleClientset(test.state...)\n\n\t\t\ttranslator := NewIngressTranslator(\n\t\t\t\tfunc(ns, name string) (*corev1.Secret, error) {\n\t\t\t\t\treturn kubeclient.CoreV1().Secrets(ns).Get(ctx, name, metav1.GetOptions{})\n\t\t\t\t},\n\t\t\t\tfunc(ns, name string) (*corev1.Endpoints, error) {\n\t\t\t\t\treturn kubeclient.CoreV1().Endpoints(ns).Get(ctx, name, metav1.GetOptions{})\n\t\t\t\t},\n\t\t\t\tfunc(ns, name string) (*corev1.Service, error) {\n\t\t\t\t\treturn kubeclient.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})\n\t\t\t\t},\n\t\t\t\tfunc(name string) (*corev1.Namespace, error) {\n\t\t\t\t\treturn kubeclient.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})\n\t\t\t\t},\n\t\t\t\t&pkgtest.FakeTracker{},\n\t\t\t)\n\n\t\t\tgot, err := translator.translateIngress(ctx, test.in, false)\n\t\t\tassert.NilError(t, err)\n\t\t\tassert.DeepEqual(t, got, test.want,\n\t\t\t\tcmp.AllowUnexported(translatedIngress{}),\n\t\t\t\tprotocmp.Transform(),\n\t\t\t)\n\t\t})\n\t}\n}", "func isServiceOwnedByIngressController(service *corev1.Service, ic *operatorv1.IngressController) bool {\n\tif service != nil && service.Labels[manifests.OwningIngressControllerLabel] == ic.Name {\n\t\treturn true\n\t}\n\treturn false\n}", "func checkIfAPIExists(trafficSplitterAPIs []*userconfig.TrafficSplit, apis []userconfig.API, deployedRealtimeAPIs strset.Set) error {\n\tvar missingAPIs []string\n\t// check if apis named in trafficsplitter are either defined in same yaml or already deployed\n\tfor _, trafficSplitAPI := range trafficSplitterAPIs {\n\t\t// check if already deployed\n\t\tdeployed := deployedRealtimeAPIs.Has(trafficSplitAPI.Name)\n\n\t\t// check defined apis\n\t\tfor _, definedAPI := range apis {\n\t\t\tif trafficSplitAPI.Name == definedAPI.Name {\n\t\t\t\tdeployed = true\n\t\t\t}\n\t\t}\n\t\tif !deployed {\n\t\t\tmissingAPIs = append(missingAPIs, trafficSplitAPI.Name)\n\t\t}\n\t}\n\tif len(missingAPIs) != 0 {\n\t\treturn ErrorAPIsNotDeployed(missingAPIs)\n\t}\n\treturn nil\n\n}", "func (r *NuxeoReconciler) defaultIngress(instance *v1alpha1.Nuxeo, access v1alpha1.NuxeoAccess, forcePassthrough bool,\n\tingressName string, nodeSet v1alpha1.NodeSet) (*v1beta1.Ingress, error) {\n\tconst nginxPassthroughAnnotation = \"nginx.ingress.kubernetes.io/ssl-passthrough\"\n\ttargetPort := intstr.IntOrString{\n\t\tType: intstr.String,\n\t\tStrVal: \"web\",\n\t}\n\tif access.TargetPort != (intstr.IntOrString{}) {\n\t\ttargetPort = access.TargetPort\n\t}\n\tingress := v1beta1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: ingressName,\n\t\t\tNamespace: instance.Namespace,\n\t\t},\n\t\tSpec: v1beta1.IngressSpec{\n\t\t\tRules: []v1beta1.IngressRule{{\n\t\t\t\tHost: access.Hostname,\n\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{{\n\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\tServiceName: serviceName(instance, nodeSet),\n\t\t\t\t\t\t\t\tServicePort: targetPort,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\tif access.Termination != \"\" || forcePassthrough {\n\t\tif access.Termination != \"\" && access.Termination != routev1.TLSTerminationPassthrough &&\n\t\t\taccess.Termination != routev1.TLSTerminationEdge {\n\t\t\treturn nil, fmt.Errorf(\"only passthrough and edge termination are supported\")\n\t\t}\n\t\tingress.Spec.TLS = []v1beta1.IngressTLS{{\n\t\t\tHosts: []string{access.Hostname},\n\t\t}}\n\t\tif access.Termination == routev1.TLSTerminationPassthrough || forcePassthrough {\n\t\t\tingress.ObjectMeta.Annotations = map[string]string{nginxPassthroughAnnotation: \"true\"}\n\t\t} else {\n\t\t\t// the Ingress will terminate TLS\n\t\t\tif access.TLSSecret == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"the Ingress was configured for TLS termination but no secret was provided\")\n\t\t\t}\n\t\t\t// secret needs keys 'tls.crt' and 'tls.key' and cert must have CN=<access.Hostname>\n\t\t\tingress.Spec.TLS[0].SecretName = access.TLSSecret\n\t\t}\n\t}\n\t_ = controllerutil.SetControllerReference(instance, &ingress, r.Scheme)\n\treturn &ingress, nil\n}", "func appIngressExits(\n\tctx context.Context,\n\th *helper.H,\n\tisdefault bool,\n\tdnsname string,\n) (appIngress cloudingressv1alpha1.ApplicationIngress, exists bool, index int) {\n\tPublishingStrategyInstance, _ := getPublishingStrategy(ctx, h)\n\n\t// Grab the current list of Application Ingresses from the Publishing Strategy\n\tAppIngressList := PublishingStrategyInstance.Spec.ApplicationIngress\n\n\t// Find the application ingress matching our criteria\n\tfor i, v := range AppIngressList {\n\t\tif v.Default == isdefault && strings.HasPrefix(v.DNSName, dnsname) {\n\t\t\tappIngress = v\n\t\t\texists = true\n\t\t\tindex = i\n\t\t\tbreak\n\t\t}\n\t}\n\treturn appIngress, exists, index\n}", "func TestRegisteringDuplicateAuthMethodPanics(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Handle(\"model\",\n\t\t\t\tres.Auth(\"foo\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t\tres.Auth(\"bar\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t\tres.Auth(\"foo\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t)\n\t\t})\n\t}, nil, restest.WithoutReset)\n}", "func validateNoNameCollision(localConfigSvc config.Service, configName string) error {\n\texistingConfigs, err := localConfigSvc.ListConfigs()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error checking existing configs: %w\", err)\n\t}\n\tif len(existingConfigs) == 0 {\n\t\treturn nil\n\t}\n\n\t// If there are existing configs then require that a name be\n\t// specified in order to distinguish this new config from what's\n\t// there already.\n\tif configName == \"\" {\n\t\treturn errors.New(\"flag name is required if you already have existing configs\")\n\t}\n\tif _, ok := existingConfigs[configName]; ok {\n\t\treturn fmt.Errorf(\"config name %q already exists\", configName)\n\t}\n\n\treturn nil\n}", "func TestCreateRetryConflictNoTagDiff(t *testing.T) {\n\tregistry := registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)\n\tfirstUpdate := true\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registry),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\tstream := validImageStream()\n\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:original\"}}},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\treturn stream, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first update call, return a conflict to cause a retry of an\n\t\t\t\t// image stream whose tags haven't changed.\n\t\t\t\tif firstUpdate {\n\t\t\t\t\tfirstUpdate = false\n\t\t\t\t\treturn nil, errors.NewConflict(imagegroup.Resource(\"imagestreams\"), repo.Name, fmt.Errorf(\"resource modified\"))\n\t\t\t\t}\n\t\t\t\treturn repo, nil\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\tif obj == nil {\n\t\tt.Fatalf(\"expected a result\")\n\t}\n}", "func (m *IoK8sAPINetworkingV1IngressSpec) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDefaultBackend(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTLS(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func TestIntermediateNameAmbiguous(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tth.WriteK(\"gcp\", `\nnamePrefix: gcp-\nresources:\n- ../emea\npatchesStrategicMerge:\n- depPatch.yaml\n`)\n\tth.WriteF(\"gcp/depPatch.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n replicas: 999\n`)\n\tth.WriteK(\"emea\", `\nnamePrefix: emea-\nresources:\n- ../prod\n- deployment.yaml\n`)\n\tth.WriteF(\"emea/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tth.WriteK(\"prod\", `\nnamePrefix: prod-\nresources:\n- ../base\n`)\n\tth.WriteK(\"base\", `\nresources:\n- deployment.yaml\n`)\n\tth.WriteF(\"base/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\terr := th.RunWithErr(\"gcp\", th.MakeDefaultOptions())\n\tassert.Error(t, err)\n}", "func isDefaultIngressController(o client.Object) bool {\n\treturn o.GetNamespace() == controller.DefaultOperatorNamespace && o.GetName() == manifests.DefaultIngressControllerName\n}", "func validateIpOverlap(d *db.DB, intf string, ipPref string, tblName string) (string, error) {\n log.Info(\"Checking for IP overlap ....\")\n\n ipA, ipNetA, err := net.ParseCIDR(ipPref)\n if err != nil {\n log.Info(\"Failed to parse IP address: \", ipPref)\n return \"\", err\n }\n\n var allIntfKeys []db.Key\n\n for key, _ := range IntfTypeTblMap {\n intTbl := IntfTypeTblMap[key]\n keys, err := d.GetKeys(&db.TableSpec{Name:intTbl.cfgDb.intfTN})\n if err != nil {\n log.Info(\"Failed to get keys; err=%v\", err)\n return \"\", err\n }\n allIntfKeys = append(allIntfKeys, keys...)\n }\n\n if len(allIntfKeys) > 0 {\n for _, key := range allIntfKeys {\n if len(key.Comp) < 2 {\n continue\n }\n ipB, ipNetB, perr := net.ParseCIDR(key.Get(1))\n //Check if key has IP, if not continue\n if ipB == nil || perr != nil {\n continue\n }\n if ipNetA.Contains(ipB) || ipNetB.Contains(ipA) {\n if log.V(3) {\n log.Info(\"IP: \", ipPref, \" overlaps with \", key.Get(1), \" of \", key.Get(0))\n }\n //Handle IP overlap across different interface, reject if in same VRF\n intfType, _, ierr := getIntfTypeByName(key.Get(0))\n if ierr != nil {\n log.Errorf(\"Extracting Interface type for Interface: %s failed!\", key.Get(0))\n return \"\", ierr\n }\n intTbl := IntfTypeTblMap[intfType]\n if intf != key.Get(0) {\n vrfNameA, _ := d.GetMap(&db.TableSpec{Name:tblName+\"|\"+intf}, \"vrf_name\")\n vrfNameB, _ := d.GetMap(&db.TableSpec{Name:intTbl.cfgDb.intfTN+\"|\"+key.Get(0)}, \"vrf_name\")\n if vrfNameA == vrfNameB {\n errStr := \"IP \" + ipPref + \" overlaps with IP \" + key.Get(1) + \" of Interface \" + key.Get(0)\n log.Error(errStr)\n return \"\", errors.New(errStr)\n }\n } else {\n //Handle IP overlap on same interface, replace\n log.Error(\"Entry \", key.Get(1), \" on \", intf, \" needs to be deleted\")\n errStr := \"IP overlap on same interface with IP \" + key.Get(1)\n return key.Get(1), errors.New(errStr)\n }\n }\n }\n }\n return \"\", nil\n}", "func checkConflictingNodes(ctx context.Context, client client.Interface, node *libapi.Node) (v4conflict, v6conflict bool, retErr error) {\n\t// Get the full set of nodes.\n\tvar nodes []libapi.Node\n\tif nodeList, err := client.Nodes().List(ctx, options.ListOptions{}); err != nil {\n\t\tlog.WithError(err).Errorf(\"Unable to query node configuration\")\n\t\tretErr = err\n\t\treturn\n\t} else {\n\t\tnodes = nodeList.Items\n\t}\n\n\tourIPv4, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv4Address)\n\tif err != nil && node.Spec.BGP.IPv4Address != \"\" {\n\t\tlog.WithError(err).Errorf(\"Error parsing IPv4 CIDR '%s' for node '%s'\", node.Spec.BGP.IPv4Address, node.Name)\n\t\tretErr = err\n\t\treturn\n\t}\n\tourIPv6, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv6Address)\n\tif err != nil && node.Spec.BGP.IPv6Address != \"\" {\n\t\tlog.WithError(err).Errorf(\"Error parsing IPv6 CIDR '%s' for node '%s'\", node.Spec.BGP.IPv6Address, node.Name)\n\t\tretErr = err\n\t\treturn\n\t}\n\n\tfor _, theirNode := range nodes {\n\t\tif theirNode.Spec.BGP == nil {\n\t\t\t// Skip nodes that don't have BGP configured. We know\n\t\t\t// that this node does have BGP since we only perform\n\t\t\t// this check after configuring BGP.\n\t\t\tcontinue\n\t\t}\n\n\t\ttheirIPv4, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv4Address)\n\t\tif err != nil && theirNode.Spec.BGP.IPv4Address != \"\" {\n\t\t\tlog.WithError(err).Errorf(\"Error parsing IPv4 CIDR '%s' for node '%s'\", theirNode.Spec.BGP.IPv4Address, theirNode.Name)\n\t\t\tretErr = err\n\t\t\treturn\n\t\t}\n\n\t\ttheirIPv6, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv6Address)\n\t\tif err != nil && theirNode.Spec.BGP.IPv6Address != \"\" {\n\t\t\tlog.WithError(err).Errorf(\"Error parsing IPv6 CIDR '%s' for node '%s'\", theirNode.Spec.BGP.IPv6Address, theirNode.Name)\n\t\t\tretErr = err\n\t\t\treturn\n\t\t}\n\n\t\t// If this is our node (based on the name), check if the IP\n\t\t// addresses have changed. If so warn the user as it could be\n\t\t// an indication of multiple nodes using the same name. This\n\t\t// is not an error condition as the IPs could actually change.\n\t\tif theirNode.Name == node.Name {\n\t\t\tif theirIPv4.IP != nil && ourIPv4.IP != nil && !theirIPv4.IP.Equal(ourIPv4.IP) {\n\t\t\t\tfields := log.Fields{\"node\": theirNode.Name, \"original\": theirIPv4.String(), \"updated\": ourIPv4.String()}\n\t\t\t\tlog.WithFields(fields).Warnf(\"IPv4 address has changed. This could happen if there are multiple nodes with the same name.\")\n\t\t\t}\n\t\t\tif theirIPv6.IP != nil && ourIPv6.IP != nil && !theirIPv6.IP.Equal(ourIPv6.IP) {\n\t\t\t\tfields := log.Fields{\"node\": theirNode.Name, \"original\": theirIPv6.String(), \"updated\": ourIPv6.String()}\n\t\t\t\tlog.WithFields(fields).Warnf(\"IPv6 address has changed. This could happen if there are multiple nodes with the same name.\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check that other nodes aren't using the same IP addresses.\n\t\t// This is an error condition.\n\t\tif theirIPv4.IP != nil && ourIPv4.IP != nil && theirIPv4.IP.Equal(ourIPv4.IP) {\n\t\t\tlog.Warnf(\"Calico node '%s' is already using the IPv4 address %s.\", theirNode.Name, ourIPv4.String())\n\t\t\tretErr = fmt.Errorf(\"IPv4 address conflict\")\n\t\t\tv4conflict = true\n\t\t}\n\n\t\tif theirIPv6.IP != nil && ourIPv6.IP != nil && theirIPv6.IP.Equal(ourIPv6.IP) {\n\t\t\tlog.Warnf(\"Calico node '%s' is already using the IPv6 address %s.\", theirNode.Name, ourIPv6.String())\n\t\t\tretErr = fmt.Errorf(\"IPv6 address conflict\")\n\t\t\tv6conflict = true\n\t\t}\n\t}\n\treturn\n}", "func testNamedPortWNamespace() []*TestStep {\n\tnamedPorts := \"serve-80\"\n\tbuilder := &NetworkPolicySpecBuilder{}\n\tbuilder = builder.SetName(\"x\", \"allow-client-a-via-named-port-ingress-rule\").SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder.SetTypeIngress().AddIngress(v1.ProtocolTCP, nil, &namedPorts, nil, nil, nil, map[string]string{\"ns\": \"x\"}, nil, nil)\n\n\treachability80 := func() *Reachability {\n\t\treachability := NewReachability(allPods, true)\n\t\t//reachability.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\t//reachability.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\t//reachability.Expect(Pod(\"x/b\"), Pod(\"x/a\"), true)\n\t\t//reachability.Expect(Pod(\"x/c\"), Pod(\"x/a\"), true)\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: false,\n\t\t})\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tFrom: Pod(\"x/a\"),\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: true,\n\t\t})\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tFrom: Pod(\"x/b\"),\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: true,\n\t\t})\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tFrom: Pod(\"x/c\"),\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: true,\n\t\t})\n\t\treturn reachability\n\t}\n\n\t// disallow port 81\n\treachability81 := func() *Reachability {\n\t\treachability := NewReachability(allPods, true)\n\t\t//reachability.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\t//reachability.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: false,\n\t\t})\n\t\treachability.ExpectConn(&Connectivity{\n\t\t\tFrom: Pod(\"x/a\"),\n\t\t\tTo: Pod(\"x/a\"),\n\t\t\tIsConnected: true,\n\t\t})\n\t\treturn reachability\n\t}\n\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Port 80\",\n\t\t\treachability80(),\n\t\t\tbuilder.Get(),\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Port 81\",\n\t\t\treachability81(),\n\t\t\tbuilder.Get(),\n\t\t\t81,\n\t\t\t0,\n\t\t},\n\t}\n}", "func (options *ConformanceTestOptions) ShouldTestIngressOfType(t string) bool {\n\treturn indexOf(options.TestCase.Ingress.IngressConfig.Controllers, t) > -1\n}", "func (rs *RouteStatus) MarkIngressRolloutInProgress() {\n\trouteCondSet.Manage(rs).MarkUnknown(RouteConditionIngressReady,\n\t\t\"RolloutInProgress\", \"A gradual rollout of the latest revision(s) is in progress.\")\n}", "func TestRequestIPWithMismatchedLabel(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tpoolA.Spec.ServiceSelector = &slim_meta_v1.LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"color\": \"blue\",\n\t\t},\n\t}\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"color\": \"red\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.123\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\t\tif svc.Status.Conditions[0].Reason != \"pool_selector_mismatch\" {\n\t\t\tt.Error(\"Expected service to receive 'pool_selector_mismatch' condition\")\n\t\t}\n\n\t\treturn true\n\t}, 1*time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected status update of service\")\n\t}\n}", "func TestK8gbSpecKeepsStableAfterIngressUpdates(t *testing.T) {\n\tt.Parallel()\n\t// name of ingress and gslb\n\tconst name = \"test-gslb-lifecycle\"\n\n\tassertStrategy := func(t *testing.T, options *k8s.KubectlOptions) {\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.splitBrainThresholdSeconds\", \"600\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.dnsTtlSeconds\", \"60\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.primaryGeoTag\", settings.PrimaryGeoTag)\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.type\", \"failover\")\n\t}\n\n\tkubeResourcePath, err := filepath.Abs(\"../examples/failover-lifecycle.yaml\")\n\tingressResourcePath, err := filepath.Abs(\"../examples/ingress-annotation-failover.yaml\")\n\trequire.NoError(t, err)\n\t// To ensure we can reuse the resource config on the same cluster to test different scenarios, we setup a unique\n\t// namespace for the resources for this test.\n\t// Note that namespaces must be lowercase.\n\tnamespaceName := fmt.Sprintf(\"k8gb-test-spec-keeps-stable-after-ingress-updates-%s\", strings.ToLower(random.UniqueId()))\n\n\t// Here we choose to use the defaults, which is:\n\t// - HOME/.kube/config for the kubectl config file\n\t// - Current context of the kubectl config file\n\t// - Random namespace\n\toptions := k8s.NewKubectlOptions(\"\", \"\", namespaceName)\n\n\tk8s.CreateNamespace(t, options, namespaceName)\n\tdefer k8s.DeleteNamespace(t, options, namespaceName)\n\n\t// create gslb\n\tutils.CreateGslb(t, options, settings, kubeResourcePath)\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tassertStrategy(t, options)\n\n\t// reapply ingress\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress := k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\t// assert Gslb strategy has initial values, ingress doesn't change it\n\tassertStrategy(t, options)\n}", "func InternalIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool {\n\treturn (net.ParseIP(old) != nil) && (net.ParseIP(new) == nil)\n}", "func TestExcludePrefixesValidation(t *testing.T) {\n\tg := gomega.NewGomegaWithT(t)\n\n\trequest := buildRequest()\n\n\trequest.Connection.Context = &connectioncontext.ConnectionContext{\n\t\tIpContext: &connectioncontext.IPContext{DstIpAddr: \"10.32.0.1/32\"},\n\t}\n\t_, err := doRequest(g, request)\n\tg.Expect(err.Error()).To(gomega.MatchRegexp(\"dstIP .* intersects excluded prefixes list\"))\n\n\trequest.Connection.Context = &connectioncontext.ConnectionContext{\n\t\tIpContext: &connectioncontext.IPContext{SrcIpAddr: \"10.32.0.1/32\"},\n\t}\n\t_, err = doRequest(g, request)\n\tg.Expect(err.Error()).To(gomega.MatchRegexp(\"srcIP .* intersects excluded prefixes list\"))\n}", "func (r *replicatorProps) needsFromAnnotationsUpdate(object *metav1.ObjectMeta, sourceObject *metav1.ObjectMeta) (bool, error) {\n\tupdate := false\n\t// check \"from\" annotation of the source\n\tif source, sOk := resolveAnnotation(sourceObject, ReplicateFromAnnotation); !sOk {\n\t\treturn false, fmt.Errorf(\"source %s/%s misses annotation %s\",\n\t\t\tsourceObject.Namespace, sourceObject.Name, ReplicateFromAnnotation)\n\n\t} else if !validPath.MatchString(source) ||\n\t\t\tsource == fmt.Sprintf(\"%s/%s\", sourceObject.Namespace, sourceObject.Name) {\n\t\treturn false, fmt.Errorf(\"source %s/%s has invalid annotation %s (%s)\",\n\t\t\tsourceObject.Namespace, sourceObject.Name, ReplicateFromAnnotation, source)\n\n\t// check that target has the same annotation\n\t} else if val, ok := object.Annotations[ReplicateFromAnnotation]; !ok || val != source {\n\t\tupdate = true\n\t}\n\n\tsource, sOk := sourceObject.Annotations[ReplicateOnceAnnotation]\n\t// check \"once\" annotation of the source\n\tif sOk {\n\t\tif _, err := strconv.ParseBool(source); err != nil {\n\t\t\treturn false, fmt.Errorf(\"source %s/%s has illformed annotation %s: %s\",\n\t\t\t\tsourceObject.Namespace, sourceObject.Name, ReplicateOnceAnnotation, err)\n\t\t}\n\t}\n\t// check that target has the same annotation\n\tif val, ok := object.Annotations[ReplicateOnceAnnotation]; sOk != ok || ok && val != source {\n\t\tupdate = true\n\t}\n\n\treturn update, nil\n}", "func testDefaultDenyIngressPolicy(t *testing.T, data *TestData) {\n\tserverNode := workerNodeName(1)\n\tserverNodeIP := workerNodeIP(1)\n\tserverPort := int32(80)\n\t_, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, \"test-server-\", serverNode, testNamespace, false)\n\tdefer cleanupFunc()\n\n\tservice, err := data.createService(\"nginx\", testNamespace, serverPort, serverPort, map[string]string{\"app\": \"nginx\"}, false, false, corev1.ServiceTypeNodePort, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when creating nginx NodePort service: %v\", err)\n\t}\n\tdefer data.deleteService(service.Name)\n\n\t// client1 is a host network Pod and is on the same node as the server Pod, simulating kubelet probe traffic.\n\tclient1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, \"test-hostnetwork-client-can-connect-\", serverNode, testNamespace, true)\n\tdefer cleanupFunc()\n\n\t// client2 is a host network Pod and is on a different node from the server Pod, accessing the server Pod via the NodePort service.\n\tclient2Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, \"test-hostnetwork-client-cannot-connect-\", controlPlaneNodeName(), testNamespace, true)\n\tdefer cleanupFunc()\n\n\tspec := &networkingv1.NetworkPolicySpec{\n\t\tPodSelector: metav1.LabelSelector{},\n\t\tPolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},\n\t\tIngress: []networkingv1.NetworkPolicyIngressRule{},\n\t}\n\tnp, err := data.createNetworkPolicy(\"test-networkpolicy-deny-all-ingress\", spec)\n\tif err != nil {\n\t\tt.Fatalf(\"Error when creating network policy: %v\", err)\n\t}\n\tdefer func() {\n\t\tif err = data.deleteNetworkpolicy(np); err != nil {\n\t\t\tt.Fatalf(\"Error when deleting network policy: %v\", err)\n\t\t}\n\t}()\n\n\tnpCheck := func(clientName, serverIP string, serverPort int32, wantErr bool) {\n\t\tif err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); wantErr && err == nil {\n\t\t\tt.Fatalf(\"Pod %s should not be able to connect %s, but was able to connect\", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))\n\t\t} else if !wantErr && err != nil {\n\t\t\tt.Fatalf(\"Pod %s should be able to connect %s, but was not able to connect\", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))\n\t\t}\n\t}\n\n\t// Locally generated traffic can always access the Pods regardless of NetworkPolicy configuration.\n\tif clusterInfo.podV4NetworkCIDR != \"\" {\n\t\tnpCheck(client1Name, serverIPs.ipv4.String(), serverPort, false)\n\t}\n\tif clusterInfo.podV6NetworkCIDR != \"\" {\n\t\tnpCheck(client1Name, serverIPs.ipv6.String(), serverPort, false)\n\t}\n\n\tif testOptions.providerName == \"kind\" {\n\t\tt.Logf(\"Skipped testing NodePort traffic for TestDefaultDenyIngressPolicy because pkt_mark is not properly supported on OVS netdev datapath\")\n\t} else {\n\t\tif clusterInfo.podV4NetworkCIDR != \"\" {\n\t\t\tnpCheck(client2Name, serverIPs.ipv4.String(), serverPort, true)\n\t\t}\n\t\tif clusterInfo.podV6NetworkCIDR != \"\" {\n\t\t\tnpCheck(client2Name, serverIPs.ipv6.String(), serverPort, true)\n\t\t}\n\t\tnpCheck(client2Name, serverNodeIP, service.Spec.Ports[0].NodePort, true)\n\t}\n}", "func TestPoolInternalConflict(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\", \"10.0.10.64/28\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be marked conflicting\")\n\t}\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, 2*time.Second)\n\n\tpool, err := fixture.poolClient.Get(context.Background(), \"pool-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpool.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.10.0/24\",\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), pool, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be un-marked conflicting\")\n\t}\n}", "func TestShouldIgnoreRepeated(t *testing.T) {\n\tvar packetEvent PacketEvent = PacketEvent{\n\t\tTimeStamp: \"Sun Mar 08 20:02:59 EDT 2020\",\n\t\tDropReason: \"PolicyDrop-br-int/POL_TABLE\",\n\t\tSourceMac: \"16:39:19:fa:f8:40\",\n\t\tDestinationMac: \"62:58:da:98:01:97\",\n\t\tEtherType: \"IPv4\",\n\t\tSourceIP: \"10.1.1.1\",\n\t\tDestinationIP: \"10.1.1.2\",\n\t\tIPProto: \"UDP\",\n\t\tSourcePort: \"10023\",\n\t\tDestinationPort: \"53\",\n\t}\n\ttempdir, err := os.MkdirTemp(\"\", \"hostagent_test_\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer os.RemoveAll(tempdir)\n\tagent := testAgent()\n\tagent.config.OpFlexEndpointDir = tempdir\n\tagent.config.OpFlexServiceDir = tempdir\n\tagent.config.OpFlexSnatDir = tempdir\n\tagent.config.UplinkIface = \"eth10\"\n\tagent.config.NodeName = \"test-node\"\n\tagent.config.ServiceVlan = 4003\n\tagent.config.UplinkMacAdress = \"5a:fd:16:e5:e7:c0\"\n\tagent.config.DropLogExpiryTime = 10\n\tagent.config.DropLogRepeatIntervalTime = 2\n\tagent.run()\n\tfor i, pt := range podTests {\n\t\tif i%2 == 0 {\n\t\t\tos.WriteFile(filepath.Join(tempdir,\n\t\t\t\tpt.uuid+\"_\"+pt.cont+\"_\"+pt.veth+\".ep\"),\n\t\t\t\t[]byte(\"random gibberish\"), 0644)\n\t\t}\n\n\t\tpod := pod(pt.uuid, pt.namespace, pt.name, pt.eg, pt.sg, pt.qp)\n\t\tpod.Status.PodIP = pt.ip\n\t\tpod.Status.Phase = \"Running\"\n\t\tcnimd := cnimd(pt.namespace, pt.name, pt.ip, pt.cont, pt.veth)\n\t\tagent.epMetadata[pt.namespace+\"/\"+pt.name] =\n\t\t\tmap[string]*metadata.ContainerMetadata{\n\t\t\t\tcnimd.Id.ContId: cnimd,\n\t\t\t}\n\t\tagent.fakePodSource.Add(pod)\n\t}\n\ttime.Sleep(3000 * time.Millisecond)\n\tcurrTime, _ := time.Parse(time.UnixDate, \"Sun Mar 08 20:03:59 EDT 2020\")\n\terr = agent.processPacketEvent(&packetEvent, currTime)\n\tassert.Nil(t, err, \"Failed to process event\")\n\tpacketEvent.TimeStamp = \"Sun Mar 08 20:04:59 EDT 2020\"\n\tcurrTime = currTime.Add(time.Minute * 1)\n\tassert.Equal(t, true, agent.shouldIgnore(&packetEvent, currTime), \"repeated event prune test failed\")\n\tpacketEvent.TimeStamp = \"Sun Mar 08 20:06:59 EDT 2020\"\n\tcurrTime = currTime.Add(time.Minute * 5)\n\tassert.Equal(t, false, agent.shouldIgnore(&packetEvent, currTime), \"post event test failed\")\n\tfor _, pt := range podTests {\n\t\tpod := pod(pt.uuid, pt.namespace, pt.name, pt.eg, pt.sg, pt.qp)\n\t\tagent.fakePodSource.Delete(pod)\n\t}\n\tagent.stop()\n}", "func TestInterPodAffinityAnnotations(t *testing.T) {\n\tutilfeature.DefaultFeatureGate.Set(\"AffinityInAnnotations=true\")\n\tpodLabel := map[string]string{\"service\": \"securityscan\"}\n\tlabels1 := map[string]string{\n\t\t\"region\": \"r1\",\n\t\t\"zone\": \"z11\",\n\t}\n\tpodLabel2 := map[string]string{\"security\": \"S1\"}\n\tnode1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: \"machine1\", Labels: labels1}}\n\ttests := []struct {\n\t\tpod *v1.Pod\n\t\tpods []*v1.Pod\n\t\tnode *v1.Node\n\t\tfits bool\n\t\ttest string\n\t}{\n\t\t{\n\t\t\tpod: new(v1.Pod),\n\t\t\tnode: &node1,\n\t\t\tfits: true,\n\t\t\ttest: \"A pod that has no required pod affinity scheduling rules can schedule onto a node with no existing pods\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel2,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},\n\t\t\tnode: &node1,\n\t\t\tfits: true,\n\t\t\ttest: \"satisfies with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel2,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"NotIn\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan3\", \"value3\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},\n\t\t\tnode: &node1,\n\t\t\tfits: true,\n\t\t\ttest: \"satisfies the pod with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel2,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"namespaces\":[\"DiffNameSpace\"]\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel, Namespace: \"ns\"}}},\n\t\t\tnode: &node1,\n\t\t\tfits: false,\n\t\t\ttest: \"Does not satisfy the PodAffinity with labelSelector because of diff Namespace\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"antivirusscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},\n\t\t\tnode: &node1,\n\t\t\tfits: false,\n\t\t\ttest: \"Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel2,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\t\"operator\": \"Exists\"\n\t\t\t\t\t\t\t\t\t\t}, {\n\t\t\t\t\t\t\t\t\t\t\t\"key\": \"wrongkey\",\n\t\t\t\t\t\t\t\t\t\t\t\"operator\": \"DoesNotExist\"\n\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t\t}, {\n\t\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\"]\n\t\t\t\t\t\t\t\t\t\t}, {\n\t\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\t\"operator\": \"NotIn\",\n\t\t\t\t\t\t\t\t\t\t\t\"values\": [\"WrongValue\"]\n\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},\n\t\t\tnode: &node1,\n\t\t\tfits: true,\n\t\t\ttest: \"satisfies the PodAffinity with different label Operators in multiple RequiredDuringSchedulingIgnoredDuringExecution \",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel2,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\t\"operator\": \"Exists\"\n\t\t\t\t\t\t\t\t\t\t}, {\n\t\t\t\t\t\t\t\t\t\t\t\"key\": \"wrongkey\",\n\t\t\t\t\t\t\t\t\t\t\t\"operator\": \"DoesNotExist\"\n\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t\t}, {\n\t\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan2\"]\n\t\t\t\t\t\t\t\t\t\t}, {\n\t\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\t\"operator\": \"NotIn\",\n\t\t\t\t\t\t\t\t\t\t\t\"values\": [\"WrongValue\"]\n\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},\n\t\t\tnode: &node1,\n\t\t\tfits: false,\n\t\t\ttest: \"The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel2,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"podAntiAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"antivirusscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"node\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},\n\t\t\tnode: &node1,\n\t\t\tfits: true,\n\t\t\ttest: \"satisfies the PodAffinity and PodAntiAffinity with the existing pod\",\n\t\t},\n\t\t// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.\n\t\t//{\n\t\t//\t pod: &v1.Pod{\n\t\t//\t\tObjectMeta: metav1.ObjectMeta{\n\t\t//\t\t\tLabels: podLabel2,\n\t\t//\t\t\tAnnotations: map[string]string{\n\t\t//\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t//\t\t\t\t{\"podAffinity\": {\n\t\t//\t\t\t\t\t\"requiredDuringSchedulingRequiredDuringExecution\": [\n\t\t//\t\t\t\t\t\t{\n\t\t//\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t//\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t//\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t//\t\t\t\t\t\t\t\t\t\"operator\": \"Exists\"\n\t\t//\t\t\t\t\t\t\t\t}, {\n\t\t//\t\t\t\t\t\t\t\t\t\"key\": \"wrongkey\",\n\t\t//\t\t\t\t\t\t\t\t\t\"operator\": \"DoesNotExist\"\n\t\t//\t\t\t\t\t\t\t\t}]\n\t\t//\t\t\t\t\t\t\t},\n\t\t//\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t//\t\t\t\t\t\t}, {\n\t\t//\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t//\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t//\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t//\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t//\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\"]\n\t\t//\t\t\t\t\t\t\t\t}, {\n\t\t//\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t//\t\t\t\t\t\t\t\t\t\"operator\": \"NotIn\",\n\t\t//\t\t\t\t\t\t\t\t\t\"values\": [\"WrongValue\"]\n\t\t//\t\t\t\t\t\t\t\t}]\n\t\t//\t\t\t\t\t\t\t},\n\t\t//\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t//\t\t\t\t\t\t}\n\t\t//\t\t\t\t\t]\n\t\t//\t\t\t\t}}`,\n\t\t//\t\t\t},\n\t\t//\t\t},\n\t\t//\t},\n\t\t//\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podlabel}}},\n\t\t//\tnode: &node1,\n\t\t//\tfits: true,\n\t\t//\ttest: \"satisfies the PodAffinity with different Label Operators in multiple RequiredDuringSchedulingRequiredDuringExecution \",\n\t\t//},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel2,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"podAntiAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"antivirusscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"node\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"},\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Labels: podLabel,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"PodAntiAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"antivirusscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"node\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t}},\n\t\t\t}},\n\t\t\tnode: &node1,\n\t\t\tfits: true,\n\t\t\ttest: \"satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel2,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"podAntiAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"zone\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},\n\t\t\tnode: &node1,\n\t\t\tfits: false,\n\t\t\ttest: \"satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"podAntiAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"antivirusscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"node\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"},\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Labels: podLabel,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"PodAntiAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"zone\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t}},\n\t\t\t}},\n\t\t\tnode: &node1,\n\t\t\tfits: false,\n\t\t\ttest: \"satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"podAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"NotIn\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"region\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine2\"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},\n\t\t\tnode: &node1,\n\t\t\tfits: false,\n\t\t\ttest: \"pod matches its own Label in PodAffinity and that matches the existing pod Labels\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel,\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"},\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Labels: podLabel,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n\t\t\t\t\t\t{\"PodAntiAffinity\": {\n\t\t\t\t\t\t\t\"requiredDuringSchedulingIgnoredDuringExecution\": [{\n\t\t\t\t\t\t\t\t\"labelSelector\": {\n\t\t\t\t\t\t\t\t\t\"matchExpressions\": [{\n\t\t\t\t\t\t\t\t\t\t\"key\": \"service\",\n\t\t\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\t\t\"values\": [\"securityscan\", \"value2\"]\n\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\"topologyKey\": \"zone\"\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t}}`,\n\t\t\t\t\t}},\n\t\t\t}},\n\t\t\tnode: &node1,\n\t\t\tfits: false,\n\t\t\ttest: \"verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod\",\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabel,\n\t\t\t\t},\n\t\t\t},\n\t\t\tpods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: \"machine1\"},\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Labels: podLabel,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.AffinityAnnotationKey: `\n {\"PodAntiAffinity\": {\n \"requiredDuringSchedulingIgnoredDuringExecution\": [{\n \"labelSelector\": {\n \"matchExpressions\": [{\n \"key\": \"service\",\n \"operator\": \"NotIn\",\n \"values\": [\"securityscan\", \"value2\"]\n }]\n },\n \"topologyKey\": \"zone\"\n }]\n }}`,\n\t\t\t\t\t}},\n\t\t\t}},\n\t\t\tnode: &node1,\n\t\t\tfits: true,\n\t\t\ttest: \"verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. satisfy PodAntiAffinity symmetry with the existing pod\",\n\t\t},\n\t}\n\texpectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}\n\n\tfor _, test := range tests {\n\t\tnode := test.node\n\t\tvar podsOnNode []*v1.Pod\n\t\tfor _, pod := range test.pods {\n\t\t\tif pod.Spec.NodeName == node.Name {\n\t\t\t\tpodsOnNode = append(podsOnNode, pod)\n\t\t\t}\n\t\t}\n\n\t\tfit := PodAffinityChecker{\n\t\t\tinfo: FakeNodeInfo(*node),\n\t\t\tpodLister: schedulertesting.FakePodLister(test.pods),\n\t\t}\n\t\tnodeInfo := schedulercache.NewNodeInfo(podsOnNode...)\n\t\tnodeInfo.SetNode(test.node)\n\t\tnodeInfoMap := map[string]*schedulercache.NodeInfo{test.node.Name: nodeInfo}\n\t\tfits, reasons, err := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: unexpected error %v\", test.test, err)\n\t\t}\n\t\tif !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {\n\t\t\tt.Errorf(\"%s: unexpected failure reasons: %v, want: %v\", test.test, reasons, expectedFailureReasons)\n\t\t}\n\t\tif fits != test.fits {\n\t\t\tt.Errorf(\"%s: expected %v got %v\", test.test, test.fits, fits)\n\t\t}\n\t}\n}", "func testPortsPoliciesStackedOrUpdated() []*TestStep {\n\tblocked := func() *Reachability {\n\t\tr := NewReachability(allPods, true)\n\t\tr.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\tr.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\treturn r\n\t}\n\n\tunblocked := func() *Reachability {\n\t\treturn NewReachability(allPods, true)\n\t}\n\n\t/***\n\tInitially, only allow port 80, and verify 81 is blocked.\n\t*/\n\tpolicyName := \"policy-that-will-update-for-ports\"\n\tbuilder := &NetworkPolicySpecBuilder{}\n\tbuilder = builder.SetName(\"x\", policyName).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder.SetTypeIngress()\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil)\n\tpolicy1 := builder.Get()\n\n\tbuilder2 := &NetworkPolicySpecBuilder{}\n\t// by preserving the same name, this policy will also serve to test the 'updated policy' scenario.\n\tbuilder2 = builder2.SetName(\"x\", policyName).SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder2.SetTypeIngress()\n\tbuilder2.AddIngress(v1.ProtocolTCP, &p81, nil, nil, nil, nil, nil, nil, nil)\n\tpolicy2 := builder2.Get()\n\n\t// The first policy was on port 80, which was allowed, while 81 wasn't.\n\t// The second policy was on port 81, which was allowed.\n\t// At this point, if we stacked, make sure 80 is still unblocked\n\t// Whereas if we DIDNT stack, make sure 80 is blocked.\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Port 81 -- blocked\",\n\t\t\tblocked(), // 81 blocked\n\t\t\tpolicy1,\n\t\t\t81,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Port 81 -- unblocked\",\n\t\t\tunblocked(), // 81 open now\n\t\t\tpolicy2,\n\t\t\t81,\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"Port 80 -- blocked\",\n\t\t\tblocked(),\n\t\t\tpolicy2,\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t}\n}", "func shouldPublishRouterCA(ingresses []ingressv1alpha1.ClusterIngress) bool {\n\tfor _, ci := range ingresses {\n\t\tif ci.Spec.DefaultCertificateSecret == nil {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (nse ErrNoSuchEndpoint) NotFound() {}", "func TestTrunkENI_getBranchInterfacesUsedByPod_MissingAnnotation(t *testing.T) {\n\ttrunkENI := getMockTrunk()\n\tbranchENIs := trunkENI.getBranchInterfacesUsedByPod(MockPod2)\n\n\tassert.Equal(t, 0, len(branchENIs))\n}", "func (adminAPIOp) SkipVerification() bool { return true }", "func (r *ReconcileLegacyHeader) reconcileIngress(ctx context.Context, instance *operatorsv1alpha1.LegacyHeader, needToRequeue *bool) error {\n\treqLogger := log.WithValues(\"func\", \"reconcileIngress\", \"instance.Name\", instance.Name)\n\t// Define a new Ingress\n\tnewNavIngress := res.IngressForLegacyUI(instance)\n\t// Set instance as the owner and controller of the ingress\n\terr := controllerutil.SetControllerReference(instance, newNavIngress, r.scheme)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to set owner for Nav ingress\")\n\t\treturn nil\n\t}\n\terr = res.ReconcileIngress(ctx, r.client, instance.Namespace, res.LegacyReleaseName, newNavIngress, needToRequeue)\n\tif err != nil {\n\t\treturn err\n\t}\n\treqLogger.Info(\"got legacy header Ingress\")\n\n\treturn nil\n}", "func loadBalancerServiceAnnotationsChanged(current, expected *corev1.Service, annotations sets.String) (bool, *corev1.Service) {\n\tchanged := false\n\tfor annotation := range annotations {\n\t\tcurrentVal, have := current.Annotations[annotation]\n\t\texpectedVal, want := expected.Annotations[annotation]\n\t\tif (want && (!have || currentVal != expectedVal)) || (have && !want) {\n\t\t\tchanged = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !changed {\n\t\treturn false, nil\n\t}\n\n\tupdated := current.DeepCopy()\n\n\tif updated.Annotations == nil {\n\t\tupdated.Annotations = map[string]string{}\n\t}\n\n\tfor annotation := range annotations {\n\t\tcurrentVal, have := current.Annotations[annotation]\n\t\texpectedVal, want := expected.Annotations[annotation]\n\t\tif want && (!have || currentVal != expectedVal) {\n\t\t\tupdated.Annotations[annotation] = expected.Annotations[annotation]\n\t\t} else if have && !want {\n\t\t\tdelete(updated.Annotations, annotation)\n\t\t}\n\t}\n\n\treturn true, updated\n}", "func TestChangeServiceType(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\t// This existing ClusterIP service should be ignored\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeClusterIP,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tvar assignedIP string\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tassignedIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tupdatedService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Update(context.Background(), updatedService, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 0 {\n\t\t\tt.Error(\"Expected service to have no conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tupdatedService = &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeNodePort,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), updatedService, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(assignedIP)) {\n\t\tt.Fatal(\"Expected assigned IP to be released\")\n\t}\n}", "func TestIntermediateNameSameNameDifferentLayer(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tth.WriteK(\"gcp\", `\nnamePrefix: gcp-\nresources:\n- ../emea\npatchesStrategicMerge:\n- depPatch.yaml\n`)\n\tth.WriteF(\"gcp/depPatch.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: prod-foo\nspec:\n replicas: 999\n`)\n\tth.WriteK(\"emea\", `\nnamePrefix: emea-\nresources:\n- ../prod\n- deployment.yaml\n`)\n\tth.WriteF(\"emea/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tth.WriteK(\"prod\", `\nnamePrefix: prod-\nresources:\n- ../base\n`)\n\tth.WriteK(\"base\", `\nresources:\n- deployment.yaml\n`)\n\tth.WriteF(\"base/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tm := th.Run(\"gcp\", th.MakeDefaultOptions())\n\tth.AssertActualEqualsExpected(m, `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: gcp-emea-prod-foo\nspec:\n replicas: 999\n template:\n spec:\n containers:\n - image: whatever\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: gcp-emea-foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n}", "func TestImplementationsExistForSync(srcPrefix, dstPrefix string) error {\n\t//For now sync is allowed only from S3 to K8s\n\tswitch srcPrefix {\n\tcase K8S:\n\t\tif dstPrefix != S3 {\n\t\t\treturn fmt.Errorf(srcPrefix + \"-->\" + dstPrefix + \" not implemented\")\n\t\t}\n\tcase S3:\n\t\tif dstPrefix != K8S {\n\t\t\treturn fmt.Errorf(srcPrefix + \"-->\" + dstPrefix + \" not implemented\")\n\t\t}\n\t//case \"abs\":\n\t//case \"gcs\":\n\tdefault:\n\t\treturn fmt.Errorf(srcPrefix + \"-->\" + dstPrefix + \" not implemented\")\n\t}\n\n\treturn nil\n}", "func testPodLabelAllowTrafficFromBToA() []*TestStep {\n\tbuilder := &NetworkPolicySpecBuilder{}\n\tbuilder = builder.SetName(\"x\", \"allow-client-a-via-pod-selector\").SetPodSelector(map[string]string{\"pod\": \"a\"})\n\tbuilder.SetTypeIngress()\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{\"pod\": \"b\"}, map[string]string{\"ns\": \"x\"}, nil, nil)\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{\"pod\": \"b\"}, map[string]string{\"ns\": \"y\"}, nil, nil)\n\tbuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{\"pod\": \"b\"}, map[string]string{\"ns\": \"z\"}, nil, nil)\n\n\treachability := func() *Reachability {\n\t\treachability := NewReachability(allPods, true)\n\t\treachability.ExpectAllIngress(Pod(\"x/a\"), false)\n\t\treachability.Expect(Pod(\"x/b\"), Pod(\"x/a\"), true)\n\t\treachability.Expect(Pod(\"y/b\"), Pod(\"x/a\"), true)\n\t\treachability.Expect(Pod(\"z/b\"), Pod(\"x/a\"), true)\n\t\treachability.Expect(Pod(\"x/a\"), Pod(\"x/a\"), true)\n\t\treturn reachability\n\t}\n\treturn []*TestStep{\n\t\t{\n\t\t\t\"Port 80\",\n\t\t\treachability(),\n\t\t\tbuilder.Get(),\n\t\t\t80,\n\t\t\t0,\n\t\t},\n\t}\n}", "func isOVNKubernetesChangeSafe(prev, next *operv1.NetworkSpec) []error {\n\tpn := prev.DefaultNetwork.OVNKubernetesConfig\n\tnn := next.DefaultNetwork.OVNKubernetesConfig\n\terrs := []error{}\n\n\tif next.Migration != nil && next.Migration.MTU != nil {\n\t\tmtuNet := next.Migration.MTU.Network\n\t\tmtuMach := next.Migration.MTU.Machine\n\n\t\t// For MTU values provided for migration, verify that:\n\t\t// - The current and target MTUs for the CNI are provided\n\t\t// - The machine target MTU is provided\n\t\t// - The current MTU actually matches the MTU known as current\n\t\t// - The machine target MTU has a valid overhead with the CNI target MTU\n\t\tif mtuNet == nil || mtuMach == nil || mtuNet.From == nil || mtuNet.To == nil || mtuMach.To == nil {\n\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU, at least one of the required fields is missing\"))\n\t\t} else {\n\t\t\t// Only check next.Migration.MTU.Network.From when it changes\n\t\t\tcheckPrevMTU := prev.Migration == nil || prev.Migration.MTU == nil || prev.Migration.MTU.Network == nil || !reflect.DeepEqual(prev.Migration.MTU.Network.From, next.Migration.MTU.Network.From)\n\t\t\tif checkPrevMTU && !reflect.DeepEqual(next.Migration.MTU.Network.From, pn.MTU) {\n\t\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU.Network.From(%d) not equal to the currently applied MTU(%d)\", *next.Migration.MTU.Network.From, *pn.MTU))\n\t\t\t}\n\n\t\t\tminMTU := MinMTUIPv4\n\t\t\tfor _, cn := range next.ClusterNetwork {\n\t\t\t\tif utilnet.IsIPv6CIDRString(cn.CIDR) {\n\t\t\t\t\tminMTU = MinMTUIPv6\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif *next.Migration.MTU.Network.To < minMTU || *next.Migration.MTU.Network.To > MaxMTU {\n\t\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU.Network.To(%d), has to be in range: %d-%d\", *next.Migration.MTU.Network.To, minMTU, MaxMTU))\n\t\t\t}\n\t\t\tif *next.Migration.MTU.Machine.To < minMTU || *next.Migration.MTU.Machine.To > MaxMTU {\n\t\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU.Machine.To(%d), has to be in range: %d-%d\", *next.Migration.MTU.Machine.To, minMTU, MaxMTU))\n\t\t\t}\n\t\t\tif (*next.Migration.MTU.Network.To + getOVNEncapOverhead(next)) > *next.Migration.MTU.Machine.To {\n\t\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU.Machine.To(%d), has to be at least %d\", *next.Migration.MTU.Machine.To, *next.Migration.MTU.Network.To+getOVNEncapOverhead(next)))\n\t\t\t}\n\t\t}\n\t} else if !reflect.DeepEqual(pn.MTU, nn.MTU) {\n\t\terrs = append(errs, errors.Errorf(\"cannot change ovn-kubernetes MTU without migration\"))\n\t}\n\n\tif !reflect.DeepEqual(pn.GenevePort, nn.GenevePort) {\n\t\terrs = append(errs, errors.Errorf(\"cannot change ovn-kubernetes genevePort\"))\n\t}\n\tif pn.HybridOverlayConfig == nil && nn.HybridOverlayConfig != nil {\n\t\terrs = append(errs, errors.Errorf(\"cannot start a hybrid overlay network after install time\"))\n\t}\n\tif pn.HybridOverlayConfig != nil {\n\t\tif !reflect.DeepEqual(pn.HybridOverlayConfig, nn.HybridOverlayConfig) {\n\t\t\terrs = append(errs, errors.Errorf(\"cannot edit a running hybrid overlay network\"))\n\t\t}\n\t}\n\tif pn.IPsecConfig != nil && nn.IPsecConfig != nil {\n\t\tif !reflect.DeepEqual(pn.IPsecConfig, nn.IPsecConfig) {\n\t\t\terrs = append(errs, errors.Errorf(\"cannot edit IPsec configuration at runtime\"))\n\t\t}\n\t}\n\n\treturn errs\n}", "func (c *Controller) validateBackupAnnotations(key string, volumeMissing prometheus.GaugeVec, excludeAnnotation string, backupAnnotation string) error {\n\n\tobj, exists, err := c.indexer.GetByKey(key)\n\tif err != nil {\n\t\tklog.Errorf(\"fetching object with key %s from store failed with %v\", key, err)\n\t\treturn err\n\t}\n\tif !exists {\n\t\tklog.Infof(\"pod %s does not exist anymore\", key)\n\t\tif obj, exists, err = c.deletedIndexer.GetByKey(key); err == nil && exists {\n\n\t\t\tpod := obj.(*v1.Pod)\n\t\t\townerInfo := getPodOwnerInfo(pod)\n\t\t\tklog.Infof(\"disabling metric %s\", ownerInfo.name)\n\t\t\tc.disableMetric(ownerInfo)\n\t\t\t_ = c.deletedIndexer.Delete(key)\n\t\t}\n\n\t} else {\n\t\tpod := obj.(*v1.Pod)\n\t\townerInfo := getPodOwnerInfo(pod)\n\n\t\tif _, ok := c.podCache[ownerInfo.name]; !ok {\n\t\t\tc.podCache[ownerInfo.name] = map[VolumeName]bool{}\n\t\t}\n\t\tklog.Infof(\"controlling backup config for %s\", pod.GetName())\n\t\tmissings := c.getMissingBackups(pod)\n\t\tif len(missings) > 0 {\n\t\t\tklog.Infof(\"backup missing enable metric for %s\", ownerInfo.name)\n\t\t\tc.enableMetric(ownerInfo, missings)\n\t\t}\n\t}\n\n\treturn nil\n}", "func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {\n\tpods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{\n\t\t// Find all running pods\n\t\tFieldSelector: \"status.phase=Running\",\n\t\t// Find all injected pods. We don't care about non-injected pods, because the new behavior\n\t\t// mirrors Kubernetes; this is only a breaking change for existing Istio users.\n\t\tLabelSelector: \"security.istio.io/tlsMode=istio\",\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar messages diag.Messages = make([]diag.Message, 0)\n\tg := errgroup.Group{}\n\n\tsem := semaphore.NewWeighted(25)\n\tfor _, pod := range pods.Items {\n\t\tpod := pod\n\t\tif !fromLegacyNetworkingVersion(pod) {\n\t\t\t// Skip check. This pod is already on a version where the change has been made; if they were going\n\t\t\t// to break they would already be broken.\n\t\t\tcontinue\n\t\t}\n\t\tg.Go(func() error {\n\t\t\t_ = sem.Acquire(context.Background(), 1)\n\t\t\tdefer sem.Release(1)\n\t\t\t// Fetch list of all clusters to get which ports we care about\n\t\t\tresp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, \"GET\", \"config_dump?resource=dynamic_active_clusters&mask=cluster.name\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get config dump: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tports, err := extractInboundPorts(resp)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get ports: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Next, look at what ports the pod is actually listening on\n\t\t\t// This requires parsing the output from ss; the version we use doesn't support JSON\n\t\t\tout, _, err := cli.PodExec(pod.Name, pod.Namespace, \"istio-proxy\", \"ss -ltnH\")\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"executable file not found\") {\n\t\t\t\t\t// Likely distroless or other custom build without ss. Nothing we can do here...\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"failed to get listener state: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfor _, ss := range strings.Split(out, \"\\n\") {\n\t\t\t\tif len(ss) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbind, port, err := net.SplitHostPort(getColumn(ss, 3))\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"failed to get parse state: \", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tip, _ := netip.ParseAddr(bind)\n\t\t\t\tportn, _ := strconv.Atoi(port)\n\t\t\t\tif _, f := ports[portn]; f {\n\t\t\t\t\tc := ports[portn]\n\t\t\t\t\tif bind == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if bind == \"*\" || ip.IsUnspecified() {\n\t\t\t\t\t\tc.Wildcard = true\n\t\t\t\t\t} else if ip.IsLoopback() {\n\t\t\t\t\t\tc.Lo = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Explicit = true\n\t\t\t\t\t}\n\t\t\t\t\tports[portn] = c\n\t\t\t\t}\n\t\t\t}\n\n\t\t\torigin := &kube3.Origin{\n\t\t\t\tType: gvk.Pod,\n\t\t\t\tFullName: resource.FullName{\n\t\t\t\t\tNamespace: resource.Namespace(pod.Namespace),\n\t\t\t\t\tName: resource.LocalName(pod.Name),\n\t\t\t\t},\n\t\t\t\tResourceVersion: resource.Version(pod.ResourceVersion),\n\t\t\t}\n\t\t\tfor port, status := range ports {\n\t\t\t\t// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.\n\t\t\t\tif status.Lo {\n\t\t\t\t\tmessages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn messages, nil\n}", "func SkipIfNoRoutingAPI() {\n\t// TODO: #161159794 remove this function and check a nicer error message when available\n\tvar response struct {\n\t\tRoutingEndpoint string `json:\"routing_endpoint\"`\n\t}\n\tCurl(&response, \"/v2/info\")\n\n\tif response.RoutingEndpoint == \"\" {\n\t\tSkip(\"Test requires routing endpoint on /v2/info\")\n\t}\n}", "func shouldDiscoverAPI(endpoint string, discoveryTags, ignoreTags, apiTags []string) bool {\n\n\tif endpoint == \"\" {\n\t\t// If the API has no exposed endpoint we're not going to discover it.\n\t\treturn false\n\t}\n\n\tif doesAPIContainAnyMatchingTag(ignoreTags, apiTags) {\n\t\treturn false\n\t}\n\n\tif len(discoveryTags) > 0 {\n\t\tif !doesAPIContainAnyMatchingTag(discoveryTags, apiTags) {\n\t\t\treturn false // ignore\n\t\t}\n\t}\n\treturn true\n}", "func (r *Reconciler) checkHTTsProxyApicastServiceReconciled(ctx context.Context, serverClient k8sclient.Client, svcName string) (integreatlyv1alpha1.StatusPhase, bool, error) {\n\tservice := &corev1.Service{}\n\terr := serverClient.Get(ctx, k8sTypes.NamespacedName{Name: svcName, Namespace: r.Config.GetNamespace()}, service)\n\tif err != nil {\n\t\tif k8serr.IsNotFound(err) {\n\t\t\treturn integreatlyv1alpha1.PhaseAwaitingComponents, false, nil\n\t\t}\n\t\treturn integreatlyv1alpha1.PhaseFailed, false, err\n\t}\n\n\tfor _, serviceName := range service.Spec.Ports {\n\t\tif serviceName.Name == \"httpsproxy\" {\n\t\t\treturn integreatlyv1alpha1.PhaseCompleted, true, err\n\t\t}\n\t}\n\n\treturn integreatlyv1alpha1.PhaseCompleted, false, nil\n}", "func (a *FixedAllocateAction) checkIPOccupied() (pbcommon.ErrCode, string) {\n\texistedIP, err := a.storeIf.GetIPObject(a.ctx, a.req.Address)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\tblog.Infof(\"no allocated ip, try to allocate one\")\n\t\t\treturn pbcommon.ErrCode_ERROR_OK, \"\"\n\t\t}\n\t\treturn pbcommon.ErrCode_ERROR_CLOUD_NETSERVICE_STOREOPS_FAILED, \"get ip object failed\"\n\t}\n\tif !existedIP.IsFixed {\n\t\treturn pbcommon.ErrCode_ERROR_CLOUD_NETSERVICE_ALLOCATE_IP_NOT_MATCH, \"found ip, but it is not fixed ip\"\n\t}\n\t// check info\n\tif existedIP.VpcID != a.req.VpcID ||\n\t\texistedIP.Region != a.req.Region ||\n\t\texistedIP.SubnetID != a.req.SubnetID ||\n\t\texistedIP.Cluster != a.req.Cluster ||\n\t\texistedIP.Namespace != a.req.Namespace ||\n\t\texistedIP.PodName != a.req.PodName ||\n\t\texistedIP.WorkloadName != a.req.WorkloadName ||\n\t\texistedIP.WorkloadKind != a.req.WorkloadKind {\n\n\t\treturn pbcommon.ErrCode_ERROR_CLOUD_NETSERVICE_ALLOCATE_IP_NOT_MATCH,\n\t\t\t\"found allocated fixed ip, but info not match\"\n\t}\n\t// should not happen, may be the last time release is failed\n\tif existedIP.Status == types.IP_STATUS_ACTIVE {\n\t\treturn pbcommon.ErrCode_ERROR_CLOUD_NETSERVICE_TRY_TO_ALLOCATE_ACTIVE_IP,\n\t\t\t\"dirty data, request fixed ip is active\"\n\t}\n\ta.allocatedIPObj = existedIP\n\treturn pbcommon.ErrCode_ERROR_OK, \"\"\n}", "func (c *CheckAnnotationsIsNotPresent) Check(t *testing.T, obj *metav1.ObjectMeta) error {\n\treturn checkLabelIsNotPresent(t, c.Key, obj.Annotations, \"obj.Annotations\")\n}", "func TestRangeDelete(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\t// Add a new CIDR, this should not have any effect on the existing service.\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif !strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.20\") {\n\t\t\tt.Error(\"Expected new ingress to be in the 10.0.20.0/24 range\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t// Remove the existing range, this should trigger the re-allocation of the existing service\n\tpoolA.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.20.0/24\",\n\t\t},\n\t}\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func ForwardInconsistent() RequestCallback {\n\treturn func(req *Request) {\n\t\treq.Headers.Set(HeaderInconsistent, \"forward-active-node\")\n\t}\n}", "func (c *MockAzureCloud) GetApiIngressStatus(cluster *kops.Cluster) ([]fi.ApiIngressStatus, error) {\n\treturn nil, nil\n}", "func (err *RuleNotSatisfied) RuleNotSatisfied() {}", "func (endpointSliceStrategy) AllowUnconditionalUpdate() bool {\n\treturn true\n}", "func (s *Service) Reconcile(ctx context.Context) error {\n\tfor _, inboundNatSpec := range s.Scope.InboundNatSpecs() {\n\t\ts.Scope.V(2).Info(\"creating inbound NAT rule\", \"NAT rule\", inboundNatSpec.Name)\n\n\t\tlb, err := s.LoadBalancersClient.Get(ctx, s.Scope.ResourceGroup(), inboundNatSpec.LoadBalancerName)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to get Load Balancer %s\", inboundNatSpec.LoadBalancerName)\n\t\t}\n\n\t\tif lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil || lb.InboundNatRules == nil {\n\t\t\treturn errors.Errorf(\"Could not get existing inbound NAT rules from load balancer %s properties\", to.String(lb.Name))\n\t\t}\n\n\t\tports := make(map[int32]struct{})\n\t\tif s.natRuleExists(ports)(*lb.InboundNatRules, inboundNatSpec.Name) {\n\t\t\t// Inbound NAT Rule already exists, nothing to do here.\n\t\t\tcontinue\n\t\t}\n\n\t\tsshFrontendPort, err := s.getAvailablePort(ports)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to find available SSH Frontend port for NAT Rule %s in load balancer %s\", inboundNatSpec.Name, to.String(lb.Name))\n\t\t}\n\n\t\trule := network.InboundNatRule{\n\t\t\tName: to.StringPtr(inboundNatSpec.Name),\n\t\t\tInboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{\n\t\t\t\tBackendPort: to.Int32Ptr(22),\n\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\tID: (*lb.FrontendIPConfigurations)[0].ID,\n\t\t\t\t},\n\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\tFrontendPort: &sshFrontendPort,\n\t\t\t},\n\t\t}\n\t\ts.Scope.V(3).Info(\"Creating rule %s using port %d\", \"NAT rule\", inboundNatSpec.Name, \"port\", sshFrontendPort)\n\n\t\terr = s.Client.CreateOrUpdate(ctx, s.Scope.ResourceGroup(), to.String(lb.Name), inboundNatSpec.Name, rule)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to create inbound NAT rule %s\", inboundNatSpec.Name)\n\t\t}\n\n\t\ts.Scope.V(2).Info(\"successfully created inbound NAT rule\", \"NAT rule\", inboundNatSpec.Name)\n\t}\n\treturn nil\n}" ]
[ "0.6973834", "0.6291292", "0.598272", "0.5893382", "0.5833556", "0.56057334", "0.55467504", "0.5459466", "0.54379237", "0.54132277", "0.5397058", "0.5396501", "0.5384227", "0.5249351", "0.52340305", "0.52306193", "0.52165854", "0.5210059", "0.51991975", "0.5189912", "0.5188358", "0.5183264", "0.5170641", "0.5166016", "0.5163699", "0.51625454", "0.5147407", "0.51443416", "0.50970834", "0.5092712", "0.5037543", "0.50337917", "0.50260186", "0.50211227", "0.501268", "0.4994278", "0.49744323", "0.49385974", "0.49362275", "0.4936132", "0.49231932", "0.4922402", "0.49197346", "0.4918952", "0.4913101", "0.4912048", "0.49106637", "0.49043623", "0.49031785", "0.4897783", "0.48905012", "0.48674974", "0.48649788", "0.48481435", "0.48477566", "0.484367", "0.48393524", "0.4835811", "0.4835319", "0.4835311", "0.48241478", "0.480564", "0.4800135", "0.4798611", "0.47904712", "0.4784375", "0.478236", "0.47817984", "0.4780744", "0.47772723", "0.4772971", "0.4772815", "0.47537252", "0.47536787", "0.47458345", "0.47416785", "0.47414452", "0.47251272", "0.47249377", "0.4723982", "0.47227064", "0.4716607", "0.47023177", "0.47009677", "0.46916598", "0.46814632", "0.46787688", "0.46769482", "0.4676827", "0.46766374", "0.46752623", "0.466969", "0.4668932", "0.4667885", "0.4664234", "0.46632835", "0.46595296", "0.46577737", "0.46561706", "0.46556512" ]
0.6990018
0
String returns the string representation
func (s StartMonitoringMembersInput) String() string { return awsutil.Prettify(s) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateCanaryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Library) String() string {\n\tres := make([]string, 5)\n\tres[0] = \"ID: \" + reform.Inspect(s.ID, true)\n\tres[1] = \"UserID: \" + reform.Inspect(s.UserID, true)\n\tres[2] = \"VolumeID: \" + reform.Inspect(s.VolumeID, true)\n\tres[3] = \"CreatedAt: \" + reform.Inspect(s.CreatedAt, true)\n\tres[4] = \"UpdatedAt: \" + reform.Inspect(s.UpdatedAt, true)\n\treturn strings.Join(res, \", \")\n}", "func (r Info) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (s ReEncryptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateFHIRDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\toutput := output{\n\t\tRerun: Rerun,\n\t\tVariables: Variables,\n\t\tItems: Items,\n\t}\n\tvar err error\n\tvar b []byte\n\tif Indent == \"\" {\n\t\tb, err = json.Marshal(output)\n\t} else {\n\t\tb, err = json.MarshalIndent(output, \"\", Indent)\n\t}\n\tif err != nil {\n\t\tmessageErr := Errorf(\"Error in parser. Please report this output to https://github.com/drgrib/alfred/issues: %v\", err)\n\t\tpanic(messageErr)\n\t}\n\ts := string(b)\n\treturn s\n}", "func (s CreateQuickConnectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *Registry) String() string {\n\tout := make([]string, 0, len(r.nameToObject))\n\tfor name, object := range r.nameToObject {\n\t\tout = append(out, fmt.Sprintf(\"* %s:\\n%s\", name, object.serialization))\n\t}\n\treturn strings.Join(out, \"\\n\\n\")\n}", "func (s CreateSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSafetyRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLanguageModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (r SendAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (r ReceiveAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (enc *simpleEncoding) String() string {\n\treturn \"simpleEncoding(\" + enc.baseName + \")\"\n}", "func (s CreateDatabaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienium) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (s CreateHITTypeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateEntityOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Addshifttraderequest) String() string {\n \n \n \n \n o.AcceptableIntervals = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateUseCaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Rooms) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (i Info) String() string {\n\ts, _ := i.toJSON()\n\treturn s\n}", "func (o *Botversionsummary) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (e ExternalCfps) String() string {\n\tje, _ := json.Marshal(e)\n\treturn string(je)\n}", "func (s CreateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\treturn fmt.Sprintf(\n\t\t\"AppVersion = %s\\n\"+\n\t\t\t\"VCSRef = %s\\n\"+\n\t\t\t\"BuildVersion = %s\\n\"+\n\t\t\t\"BuildDate = %s\",\n\t\tAppVersion, VCSRef, BuildVersion, Date,\n\t)\n}", "func (s CreateDataLakeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSolutionVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i NotMachine) String() string { return toString(i) }", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s StartPipelineReprocessingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSequenceStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Adjustablelivespeakerdetection) String() string {\n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateRateBasedRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Resiliency) String() string {\n\tb, _ := json.Marshal(r)\n\treturn string(b)\n}", "func (s RestoreFromRecoveryPointOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateWaveOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (s CreateRoomOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotLocaleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienia) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (i *Info) String() string {\n\tb, _ := json.Marshal(i)\n\treturn string(b)\n}", "func (s ProcessingFeatureStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r RoomOccupancies) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (r *InterRecord) String() string {\n\tbuf := r.Bytes()\n\tdefer ffjson.Pool(buf)\n\n\treturn string(buf)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Coretype) String() string {\n \n \n \n \n \n o.ValidationFields = []string{\"\"} \n \n o.ItemValidationFields = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateLayerOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelCardOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Limitchangerequestdetails) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s NetworkPathComponentDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t Terms) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (g GetObjectOutput) String() string {\n\treturn helper.Prettify(g)\n}", "func (s StartContactEvaluationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Interactionstatsalert) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Digitalcondition) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (r RoomOccupancy) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (d *Diagram) String() string { return toString(d) }", "func (o *Outboundroute) String() string {\n \n \n \n \n o.ClassificationTypes = []string{\"\"} \n \n \n o.ExternalTrunkBases = []Domainentityref{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateCodeRepositoryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateActivationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolutionTechniques) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateTrialComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c CourseCode) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (p *Parms) String() string {\n\tout, _ := json.MarshalIndent(p, \"\", \"\\t\")\n\treturn string(out)\n}", "func (p polynomial) String() (str string) {\n\tfor _, m := range p.monomials {\n\t\tstr = str + \" \" + m.String() + \" +\"\n\t}\n\tstr = strings.TrimRight(str, \"+\")\n\treturn \"f(x) = \" + strings.TrimSpace(str)\n}", "func (s CreateThingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *RUT) String() string {\n\treturn r.Format(DefaultFormatter)\n}", "func (s CreatePatchBaselineOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Crossplatformpolicycreate) String() string {\n \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s BotVersionLocaleDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestInitiated) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteMultiplexProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetObjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestReverted) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDocumentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateIntegrationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Commonruleconditions) String() string {\n o.Clauses = []Commonruleconditions{{}} \n o.Predicates = []Commonrulepredicate{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (t Test1s) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (o *Directrouting) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (s CreateContactFlowOutput) String() string {\n\treturn awsutil.Prettify(s)\n}" ]
[ "0.7215058", "0.7215058", "0.72000957", "0.7199919", "0.7177383", "0.7166947", "0.7118059", "0.7087492", "0.70870787", "0.7079275", "0.70782894", "0.7067719", "0.7031721", "0.70269966", "0.7026298", "0.70251423", "0.7021565", "0.70164025", "0.701059", "0.7010184", "0.70022964", "0.6997043", "0.6996532", "0.6992619", "0.69909185", "0.69900763", "0.69862556", "0.6985364", "0.6975378", "0.69738907", "0.69624275", "0.6961772", "0.69603413", "0.69507927", "0.6946753", "0.69460964", "0.69460964", "0.6944943", "0.694029", "0.69369334", "0.69332623", "0.69287163", "0.692656", "0.6924643", "0.69216746", "0.69213074", "0.69181406", "0.6917802", "0.6911058", "0.69104654", "0.6909528", "0.690845", "0.690454", "0.6899065", "0.6896141", "0.6894107", "0.6894107", "0.6894107", "0.68921995", "0.68920684", "0.689124", "0.68893504", "0.688871", "0.6884391", "0.6882336", "0.6880731", "0.68767136", "0.68766147", "0.68766147", "0.68751997", "0.68735147", "0.68734384", "0.68731403", "0.6871602", "0.6869421", "0.68684965", "0.68677104", "0.68677104", "0.68677104", "0.68677104", "0.68673396", "0.68622416", "0.6862084", "0.6859391", "0.6857645", "0.6853781", "0.68523467", "0.6851581", "0.6846037", "0.6844023", "0.6843859", "0.68434954", "0.68419206", "0.68416274", "0.684033", "0.6839815", "0.68363225", "0.6835165", "0.68334675", "0.68327725", "0.6832733" ]
0.0
-1
Validate inspects the fields of the type to determine if they are valid.
func (s *StartMonitoringMembersInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "StartMonitoringMembersInput"} if s.AccountIds == nil { invalidParams.Add(aws.NewErrParamRequired("AccountIds")) } if s.AccountIds != nil && len(s.AccountIds) < 1 { invalidParams.Add(aws.NewErrParamMinLen("AccountIds", 1)) } if s.DetectorId == nil { invalidParams.Add(aws.NewErrParamRequired("DetectorId")) } if s.DetectorId != nil && len(*s.DetectorId) < 1 { invalidParams.Add(aws.NewErrParamMinLen("DetectorId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *InfoField) Validate() error {\n\tif err := f.BWCls.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.RLC.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.Idx.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.PathType.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (f FieldSpec) Validate() error {\n\tif f.Name == \"\" {\n\t\treturn errors.New(\"Field name required\")\n\t}\n\n\tif f.Type == \"\" {\n\t\treturn errors.New(\"Field type required\")\n\t}\n\n\treturn nil\n}", "func (p *Pass) FieldsValid() bool {\n\tfmt.Printf(\"validating: \")\n\tvalid := true\n\tfor k, v := range *p {\n\t\tfmt.Printf(\"%v...\", k)\n\t\tv := isFieldValid(k, v)\n\t\tvalid = valid && v\n\t\tif v {\n\t\t\tfmt.Printf(\"VALID \")\n\t\t} else {\n\t\t\tfmt.Printf(\"INVALID \")\n\t\t}\n\t}\n\n\tfmt.Println(\"\")\n\treturn valid\n}", "func (m Type) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *TestFieldsEx2) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFieldType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProjectID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Validate(instance interface{}) string {\n\tval := unwrap(reflect.ValueOf(instance))\n\ttyp := val.Type()\n\n\tif typ.Kind() != reflect.Struct {\n\t\tcore.DefaultLogger.Panic(\"The provided instance is not a struct\")\n\t}\n\n\tvar result []string\n\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\t\tfieldTag := field.Tag\n\t\tif len(fieldTag) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldVal := val.Field(i)\n\t\tfieldKind := fieldVal.Kind()\n\t\tif !fieldVal.CanInterface() || fieldKind == reflect.Invalid {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar toEval []evalContext\n\t\tvar requiredCtx *evalContext\n\n\t\tfor _, v := range validators {\n\t\t\tif param, found := fieldTag.Lookup(v.key); found {\n\t\t\t\tctx := evalContext{validator: v, param: param}\n\n\t\t\t\tif v.key == required.key {\n\t\t\t\t\trequiredCtx = &ctx\n\t\t\t\t} else {\n\t\t\t\t\ttoEval = append(toEval, ctx)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(toEval) == 0 && requiredCtx == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif requiredCtx == nil {\n\t\t\trequiredCtx = &evalContext{validator: required, param: \"true\"}\n\t\t}\n\n\t\tvar errors []string\n\t\teval := func(ctx evalContext) bool {\n\t\t\tif err := ctx.validator.fn(fieldVal, ctx.param); len(err) > 0 {\n\t\t\t\terrors = append(errors, err)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\n\t\tif eval(*requiredCtx) {\n\t\t\tfor _, ctx := range toEval {\n\t\t\t\teval(ctx)\n\t\t\t}\n\t\t}\n\n\t\tif len(errors) > 0 {\n\t\t\tresult = append(result, fmt.Sprintf(\"%s: %s\", field.Name, strings.Join(errors, \", \")))\n\t\t}\n\t}\n\n\treturn strings.Join(result, \"; \")\n}", "func (info *structInfo) fieldValid(i int, t reflect.Type) bool {\n\treturn info.field(i).isValid(i, t)\n}", "func (v *ClassValue) Valid() bool {\n\tfor _, f := range v.Fields {\n\t\tif !f.Valid() {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (self *StructFieldDef) Validate() error {\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"StructFieldDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"Identifier\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.name does not contain a valid Identifier (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"StructFieldDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items != \"\" {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Keys != \"\" {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Keys)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.keys does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *Validator) Validate(data interface{}) (bool, []string, error) {\n\t//validate and check for any errors, reading explicit validations errors and returning\n\t//a list of fields that failed or the error\n\terr := v.Validator.Struct(data)\n\tif err != nil {\n\t\tvalidationErrs, ok := err.(validator.ValidationErrors)\n\t\tif !ok {\n\t\t\treturn false, nil, errors.Wrap(err, \"validate\")\n\t\t}\n\t\tfields := make([]string, 0)\n\t\tfor _, validationErr := range validationErrs {\n\t\t\tfields = append(fields, validationErr.Field())\n\t\t}\n\t\treturn false, fields, nil\n\t}\n\treturn true, nil, nil\n}", "func validateFields(req *logical.Request, data *framework.FieldData) error {\n\tvar unknownFields []string\n\tfor k := range req.Data {\n\t\tif _, ok := data.Schema[k]; !ok {\n\t\t\tunknownFields = append(unknownFields, k)\n\t\t}\n\t}\n\n\tif len(unknownFields) > 0 {\n\t\t// Sort since this is a human error\n\t\tsort.Strings(unknownFields)\n\n\t\treturn fmt.Errorf(\"unknown fields: %q\", unknownFields)\n\t}\n\n\treturn nil\n}", "func validateFields(req *logical.Request, data *framework.FieldData) error {\n\tvar unknownFields []string\n\tfor k := range req.Data {\n\t\tif _, ok := data.Schema[k]; !ok {\n\t\t\tunknownFields = append(unknownFields, k)\n\t\t}\n\t}\n\n\tif len(unknownFields) > 0 {\n\t\t// Sort since this is a human error\n\t\tsort.Strings(unknownFields)\n\n\t\treturn fmt.Errorf(\"unknown fields: %q\", unknownFields)\n\t}\n\n\treturn nil\n}", "func (s *RecordSchema) Validate(v reflect.Value) bool {\n\tv = dereference(v)\n\tif v.Kind() != reflect.Struct || !v.CanAddr() || !v.CanInterface() {\n\t\treturn false\n\t}\n\trec, ok := v.Interface().(GenericRecord)\n\tif !ok {\n\t\t// This is not a generic record and is likely a specific record. Hence\n\t\t// use the basic check.\n\t\treturn v.Kind() == reflect.Struct\n\t}\n\n\tfieldCount := 0\n\tfor key, val := range rec.fields {\n\t\tfor idx := range s.Fields {\n\t\t\t// key.Name must have rs.Fields[idx].Name as a suffix\n\t\t\tif len(s.Fields[idx].Name) <= len(key) {\n\t\t\t\tlhs := key[len(key)-len(s.Fields[idx].Name):]\n\t\t\t\tif lhs == s.Fields[idx].Name {\n\t\t\t\t\tif !s.Fields[idx].Type.Validate(reflect.ValueOf(val)) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\tfieldCount++\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// All of the fields set must be accounted for in the union.\n\tif fieldCount < len(rec.fields) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s StructSpec) Validate() error {\n\tfor _, f := range s.Fields {\n\t\terr := f.Validate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\terrorRes, err := cv.Validator.Struct(i).(validator.ValidationErrors)\n\tif !err {\n\t\treturn nil\n\t}\n\terrorFields := []string{}\n\tfor _, k := range errorRes {\n\t\terrorFields = append(errorFields, k.StructField())\n\t}\n\tif len(errorFields) == 1 {\n\t\treturn errors.New(strings.Join(errorFields, \", \") + \" field is invalid or missing.\")\n\t}\n\treturn errors.New(strings.Join(errorFields, \", \") + \" fields are invalid or missing.\")\n}", "func Validate(v interface{}) error {\n\n\t// returns nil or ValidationErrors ( []FieldError )\n\terr := val.Struct(v)\n\tif err != nil {\n\n\t\t// this check is only needed when your code could produce\n\t\t// an invalid value for validation such as interface with nil\n\t\t// value most including myself do not usually have code like this.\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\treturn nil\n}", "func ValidateFields(model interface{}) error {\n\terr := validator.Validate(model)\n\tif err != nil {\n\t\terrs, ok := err.(validator.ErrorMap)\n\t\tif ok {\n\t\t\tfor f, _ := range errs {\n\t\t\t\treturn errors.New(ecodes.ValidateField, constant.ValidateFieldErr+\"-\"+f)\n\t\t\t}\n\t\t} else {\n\t\t\treturn errors.New(ecodes.ValidationUnknown, constant.ValidationUnknownErr)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (v *Validator) ValidateFields(input map[string]string) {\n\tfor field, value := range input {\n\t\t_, found := find(requiredFields, field)\n\t\tif !found {\n\t\t\tv.errors[\"errors\"] = append(v.errors[field], fmt.Sprintf(\"%+v is not valid, check docs for valid fields\", field))\n\t\t}\n\t\t(v.model)[field] = value\n\t}\n}", "func (self *TypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"TypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"TypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (mt *EasypostFieldObject) Validate() (err error) {\n\tif mt.Key == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"key\"))\n\t}\n\tif mt.Value == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"value\"))\n\t}\n\tif mt.Visibility == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"visibility\"))\n\t}\n\tif mt.Label == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"label\"))\n\t}\n\n\treturn\n}", "func (ti TypeInfo) Validate() error {\n\tif len(ti.Type) == 0 {\n\t\treturn errors.Wrap(ErrValidatingData, \"TypeInfo requires a type\")\n\t}\n\treturn nil\n}", "func ValidateStructFields(in interface{}, requiredFieldIDs []string) (err error) {\n\tvar inAsMap map[string]interface{}\n\ttemp, err := json.Marshal(in)\n\tif err != nil {\n\t\treturn errors.New(\"error validating input struct\")\n\t}\n\terr = json.Unmarshal(temp, &inAsMap)\n\tif err != nil {\n\t\treturn errors.New(\"error validating input struct\")\n\t}\n\n\tfor _, requiredFieldID := range requiredFieldIDs {\n\t\t// Make sure the field is in the data.\n\t\tif val, ok := inAsMap[requiredFieldID]; !ok || len(fmt.Sprintf(\"%v\", val)) == 0 {\n\t\t\treturn errors.New(\"required input field \" + requiredFieldID + \" not specified\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func Validate(value interface{}) error {\n\tv := reflect.Indirect(reflect.ValueOf(value))\n\tt := v.Type()\n\n\t// Look for an IsValid method on value. To check that this IsValid method\n\t// exists, we need to retrieve it with MethodByName, which returns a\n\t// reflect.Value. This reflect.Value, m, has a method that is called\n\t// IsValid as well, which tells us whether v actually represents the\n\t// function we're looking for. But they're two completely different IsValid\n\t// methods. Yes, this is confusing.\n\tm := reflect.ValueOf(value).MethodByName(\"IsValid\")\n\tif m.IsValid() {\n\t\te := m.Call([]reflect.Value{})\n\t\terr, ok := e[0].Interface().(error)\n\t\tif ok && err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// For non-struct values, we cannot do much, as there's no associated tags\n\t// to lookup to decide how to validate, so we have to assume they're valid.\n\tif t.Kind() != reflect.Struct {\n\t\treturn nil\n\t}\n\n\t// For struct values, iterate through the fields and use the type of field\n\t// along with its validate tags to decide next steps\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Field(i)\n\n\t\tswitch field.Type().Kind() {\n\t\tcase reflect.Struct:\n\t\t\tdv := field.Interface()\n\t\t\tif err := Validate(dv); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tdv := reflect.ValueOf(field.Interface())\n\t\t\tif tag, ok := t.Field(i).Tag.Lookup(\"validate\"); ok {\n\t\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor j := 0; j < dv.Len(); j++ {\n\t\t\t\tif err := Validate(dv.Index(j).Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Map:\n\t\t\tif tag, ok := t.Field(i).Tag.Lookup(\"validate\"); ok {\n\t\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Bool, reflect.Int, reflect.Int64, reflect.Float64, reflect.String:\n\t\t\ttag, ok := t.Field(i).Tag.Lookup(\"validate\")\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Chan:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unimplemented struct field type: %s\", t.Field(i).Name)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *Type1) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (u *Usecase) validFields(d *Device) error {\n\tif d.Name == \"\" {\n\t\treturn &InvalidError{\"attribute `Name` must not be empty\"}\n\t}\n\n\tif d.User == 0 {\n\t\treturn &InvalidError{\"invalid user\"}\n\t}\n\n\treturn nil\n}", "func Validate(schema interface{}, errors []map[string]interface{}) {\n\t/**\n\t * create validator instance\n\t */\n\tvalidate := validator.New()\n\n\tif err := validate.Struct(schema); err != nil {\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\terrors = append(errors, map[string]interface{}{\n\t\t\t\t\"message\": fmt.Sprint(err), \"flag\": \"INVALID_BODY\"},\n\t\t\t)\n\t\t}\n\n\t\tfor _, err := range err.(validator.ValidationErrors) {\n\t\t\terrors = append(errors, map[string]interface{}{\n\t\t\t\t\"message\": fmt.Sprint(err), \"flag\": \"INVALID_BODY\"},\n\t\t\t)\n\t\t}\n\t\texception.BadRequest(\"Validation error\", errors)\n\t}\n\tif errors != nil {\n\t\texception.BadRequest(\"Validation error\", errors)\n\t}\n}", "func (s *FieldStatsService) Validate() error {\n\tvar invalid []string\n\tif s.level != \"\" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) {\n\t\tinvalid = append(invalid, \"Level\")\n\t}\n\tif len(invalid) != 0 {\n\t\treturn fmt.Errorf(\"missing or invalid required fields: %v\", invalid)\n\t}\n\treturn nil\n}", "func (t Type) Validate() error {\n\tswitch t {\n\tcase git:\n\t\treturn nil\n\tcase nop:\n\t\treturn nil\n\tdefault:\n\t\treturn ErrInvalidType\n\t}\n}", "func (time Time) Validate() bool {\n\tret := true\n\tif ret == true && time.hours != (Hours{}) {\n\t\tret = time.hours.Validate()\n\t}\n\n\tif ret == true && time.minutes != (Minutes{}) {\n\t\tret = time.minutes.Validate()\n\t}\n\n\tif ret == true && time.seconds != (Seconds{}) {\n\t\tret = time.seconds.Validate()\n\t}\n\n\tif ret == true && time.delay != (Delay{}) {\n\t\tret = time.delay.Validate()\n\t}\n\n\tif ret != true {\n\t\tlog.Println(\"Failed to validate time '\" + time.value + \"'\")\n\t}\n\treturn ret\n}", "func (p *Publication) IsValidFields() error {\n\tif p.Content != \"\" {\n\t\treturn nil\n\t}\n\treturn errorstatus.ErrorBadInfo\n\n}", "func (a Relayer) Validate() error {\n\treturn validation.ValidateStruct(&a,\n\t\tvalidation.Field(&a.Address, validation.Required),\n\t)\n}", "func (builder *Builder) ValidateFields() error {\n\tvmImageRefFields := []string{\"ImageSku\", \"ImageVersion\"}\n\tcustomVMIMageRefFields := []string{\"Image\", \"ImageResourceGroup\", \"ImageStorageAccount\", \"ImageContainer\"}\n\n\tif !builder.hasMarketplaceVMImageRef() && !builder.hasCustomVMIMageRef() {\n\t\treturn fmt.Errorf(\n\t\t\t\"missing fields: you must provide values for either %s fields or %s fields\",\n\t\t\tstrings.Join(vmImageRefFields, \", \"),\n\t\t\tstrings.Join(customVMIMageRefFields, \", \"),\n\t\t)\n\t}\n\n\tif builder.hasMarketplaceVMImageRef() && builder.hasCustomVMIMageRef() {\n\t\treturn fmt.Errorf(\n\t\t\t\"confilicting fields: you must provide values for either %s fields or %s fields\",\n\t\t\tstrings.Join(vmImageRefFields, \", \"),\n\t\t\tstrings.Join(customVMIMageRefFields, \", \"),\n\t\t)\n\t}\n\n\treturn nil\n}", "func (self *NumberTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"NumberTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"NumberTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (mt *EasypostCarrierTypes) Validate() (err error) {\n\tif mt.Type == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"type\"))\n\t}\n\tif mt.Object == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"object\"))\n\t}\n\tif mt.Fields == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"fields\"))\n\t}\n\n\tif ok := goa.ValidatePattern(`^CarrierType$`, mt.Object); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.object`, mt.Object, `^CarrierType$`))\n\t}\n\treturn\n}", "func (t *Transform) Validate() *field.Error {\n\tswitch t.Type {\n\tcase TransformTypeMath:\n\t\tif t.Math == nil {\n\t\t\treturn field.Required(field.NewPath(\"math\"), \"given transform type math requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Math.Validate(), field.NewPath(\"math\"))\n\tcase TransformTypeMap:\n\t\tif t.Map == nil {\n\t\t\treturn field.Required(field.NewPath(\"map\"), \"given transform type map requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Map.Validate(), field.NewPath(\"map\"))\n\tcase TransformTypeMatch:\n\t\tif t.Match == nil {\n\t\t\treturn field.Required(field.NewPath(\"match\"), \"given transform type match requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Match.Validate(), field.NewPath(\"match\"))\n\tcase TransformTypeString:\n\t\tif t.String == nil {\n\t\t\treturn field.Required(field.NewPath(\"string\"), \"given transform type string requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.String.Validate(), field.NewPath(\"string\"))\n\tcase TransformTypeConvert:\n\t\tif t.Convert == nil {\n\t\t\treturn field.Required(field.NewPath(\"convert\"), \"given transform type convert requires configuration\")\n\t\t}\n\t\tif err := t.Convert.Validate(); err != nil {\n\t\t\treturn verrors.WrapFieldError(err, field.NewPath(\"convert\"))\n\t\t}\n\tdefault:\n\t\t// Should never happen\n\t\treturn field.Invalid(field.NewPath(\"type\"), t.Type, \"unknown transform type\")\n\t}\n\n\treturn nil\n}", "func (strategy UpdateScatterStrategy) FieldsValidation() error {\n\tif len(strategy) == 0 {\n\t\treturn nil\n\t}\n\n\tm := make(map[string]struct{}, len(strategy))\n\tfor _, term := range strategy {\n\t\tif term.Key == \"\" {\n\t\t\treturn fmt.Errorf(\"key should not be empty\")\n\t\t}\n\t\tid := term.Key + \":\" + term.Value\n\t\tif _, ok := m[id]; !ok {\n\t\t\tm[id] = struct{}{}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"duplicated key=%v value=%v\", term.Key, term.Value)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (self *StructTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"StructTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"StructTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Fields == nil {\n\t\treturn fmt.Errorf(\"StructTypeDef: Missing required field: fields\")\n\t}\n\treturn nil\n}", "func (self *MapTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Keys == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.keys is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Keys)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.keys does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.items is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (s StructInCustom) Validate() []string {\n\tvar errs []string\n\tif s.Name == \"\" {\n\t\terrs = append(errs, \"name::is_required\")\n\t}\n\n\treturn errs\n}", "func (cv Validator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func Validate(v interface{}) (error, bool) {\n\tresult, err := govalidator.ValidateStruct(v)\n\tif err != nil {\n\t\tlog.Println(\"Invalid data\", err)\n\t}\n\treturn err, result\n}", "func validateFieldDurations(fl validator.FieldLevel) bool {\n\tv := fl.Field().Bool()\n\tif v {\n\t\t//read the parameter and extract the other fields that were specified\n\t\tparam := fl.Param()\n\t\tfields := strings.Fields(param)\n\t\tfor _, field := range fields {\n\t\t\t//check if the field is set\n\t\t\tstructField, _, _, ok := fl.GetStructFieldOKAdvanced2(fl.Parent(), field)\n\t\t\tif !ok || structField.IsZero() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (h *HazardType) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\terrors := validate.Validate(\n\t\t&validators.StringIsPresent{Name: \"Label\", Field: h.Label, Message: \"A label is required.\"},\n\t\t&validators.StringIsPresent{Name: \"Description\", Field: h.Description, Message: \"Please provide a brief description.\"},\n\t)\n\n\treturn errors, nil\n}", "func (tS *testAInfo) Validate(msg actor.Msg) bool {\n\tswitch m := msg[0].(type) {\n\tcase int:\n\t\tif m > 0 && m < 10 {\n\t\t\treturn true\n\t\t}\n\tdefault:\n\t\tfor _, datum := range tS.allowed {\n\t\t\tif reflect.TypeOf(msg[0]) ==\n\t\t\t\treflect.TypeOf(datum) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\t// Does not match a valid type\n\treturn false\n}", "func (ut *RegisterPayload) Validate() (err error) {\n\tif ut.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"email\"))\n\t}\n\tif ut.Password == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"password\"))\n\t}\n\tif ut.FirstName == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"first_name\"))\n\t}\n\tif ut.LastName == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"last_name\"))\n\t}\n\tif err2 := goa.ValidateFormat(goa.FormatEmail, ut.Email); err2 != nil {\n\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`type.email`, ut.Email, goa.FormatEmail, err2))\n\t}\n\tif utf8.RuneCountInString(ut.Email) < 6 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.email`, ut.Email, utf8.RuneCountInString(ut.Email), 6, true))\n\t}\n\tif utf8.RuneCountInString(ut.Email) > 150 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.email`, ut.Email, utf8.RuneCountInString(ut.Email), 150, false))\n\t}\n\tif utf8.RuneCountInString(ut.FirstName) < 1 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.first_name`, ut.FirstName, utf8.RuneCountInString(ut.FirstName), 1, true))\n\t}\n\tif utf8.RuneCountInString(ut.FirstName) > 200 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.first_name`, ut.FirstName, utf8.RuneCountInString(ut.FirstName), 200, false))\n\t}\n\tif utf8.RuneCountInString(ut.LastName) < 1 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.last_name`, ut.LastName, utf8.RuneCountInString(ut.LastName), 1, true))\n\t}\n\tif utf8.RuneCountInString(ut.LastName) > 200 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.last_name`, ut.LastName, utf8.RuneCountInString(ut.LastName), 200, false))\n\t}\n\tif utf8.RuneCountInString(ut.Password) < 5 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.password`, ut.Password, utf8.RuneCountInString(ut.Password), 5, true))\n\t}\n\tif utf8.RuneCountInString(ut.Password) > 100 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.password`, ut.Password, utf8.RuneCountInString(ut.Password), 100, false))\n\t}\n\treturn\n}", "func (u Phone) Validate() error {\n\treturn nil\n\t// return validation.ValidateStruct(&u,\n\t// \tvalidation.Field(&u.Name, validation.Required),\n\t// \tvalidation.Field(&u.Created, validation.Required))\n}", "func (r *InfoReq) Validate() error {\n\treturn validate.Struct(r)\n}", "func (r *RouteSpecFields) Validate(ctx context.Context) (errs *apis.FieldError) {\n\n\tif r.Domain == \"\" {\n\t\terrs = errs.Also(apis.ErrMissingField(\"domain\"))\n\t}\n\n\tif r.Hostname == \"www\" {\n\t\terrs = errs.Also(apis.ErrInvalidValue(\"hostname\", r.Hostname))\n\t}\n\n\tif _, err := BuildPathRegexp(r.Path); err != nil {\n\t\terrs = errs.Also(apis.ErrInvalidValue(\"path\", r.Path))\n\t}\n\n\treturn errs\n}", "func (mt *EasypostScanform) Validate() (err error) {\n\tif mt.Address != nil {\n\t\tif err2 := mt.Address.Validate(); err2 != nil {\n\t\t\terr = goa.MergeErrors(err, err2)\n\t\t}\n\t}\n\tif mt.ID != nil {\n\t\tif ok := goa.ValidatePattern(`^sf_`, *mt.ID); !ok {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.id`, *mt.ID, `^sf_`))\n\t\t}\n\t}\n\tif ok := goa.ValidatePattern(`^ScanForm$`, mt.Object); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.object`, mt.Object, `^ScanForm$`))\n\t}\n\tif mt.Status != nil {\n\t\tif !(*mt.Status == \"creating\" || *mt.Status == \"created\" || *mt.Status == \"failed\") {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(`response.status`, *mt.Status, []interface{}{\"creating\", \"created\", \"failed\"}))\n\t\t}\n\t}\n\treturn\n}", "func Validate(schema interface{}) {\n\tvalidate := validator.New()\n\n\tif err := validate.Struct(schema); err != nil {\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\texception.BadRequest(fmt.Sprint(err), \"INVALID_BODY\")\n\t\t}\n\n\t\tfor _, err := range err.(validator.ValidationErrors) {\n\t\t\texception.BadRequest(fmt.Sprint(err), \"INVALID_BODY\")\n\t\t}\n\t}\n}", "func (v *Validation) Validate(i interface{}) ValidationErrors {\n\terrs := v.validate.Struct(i)\n\tif errs == nil {\n\t\treturn nil\n\t}\n\n\tvar returnErrs ValidationErrors\n\tfor _, err := range errs.(validator.ValidationErrors) {\n\t\t// cast the FieldError into our ValidationError and append to the slice\n\t\tve := ValidationError{err.(validator.FieldError)}\n\t\treturnErrs = append(returnErrs, ve)\n\t}\n\treturn returnErrs\n}", "func (s *MemberDefinition) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"MemberDefinition\"}\n\tif s.CognitoMemberDefinition != nil {\n\t\tif err := s.CognitoMemberDefinition.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CognitoMemberDefinition\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.OidcMemberDefinition != nil {\n\t\tif err := s.OidcMemberDefinition.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"OidcMemberDefinition\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *MeasurementType) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (s *UnionSchema) Validate(v reflect.Value) bool {\n\tv = dereference(v)\n\tfor i := range s.Types {\n\t\tif t := s.Types[i]; t.Validate(v) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (u *User) Validate() *errors.RestError {\n\tif err := validators.ValidateStruct(u); err != nil {\n\t\treturn err\n\t}\n\t// Sanitize Structure\n\tu.FirstName = strings.TrimSpace(u.FirstName)\n\tu.LastName = strings.TrimSpace(u.LastName)\n\tu.Email = strings.TrimSpace(u.Email)\n\tu.Username = strings.TrimSpace(u.Username)\n\tu.Password = strings.TrimSpace(u.Password)\n\t// Check password\n\tif err := u.validatePassword(); err != nil {\n\t\treturn err\n\t}\n\t// Check uniqueness\n\tif err := u.checkUniqueness(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (DirectorBindStrategy) Validate(ctx request.Context, obj runtime.Object) field.ErrorList {\n\to := obj.(*bind.DirectorBind)\n\tlog.Printf(\"Validating fields for DirectorBind %s\\n\", o.Name)\n\terrors := field.ErrorList{}\n\t// perform validation here and add to errors using field.Invalid\n\treturn errors\n}", "func (t *Test1) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.Validate(\n\t\t&validators.IntIsPresent{Field: t.Field1, Name: \"Field1\"},\n\t), nil\n}", "func (t ConvertTransform) Validate() *field.Error {\n\tif !t.GetFormat().IsValid() {\n\t\treturn field.Invalid(field.NewPath(\"format\"), t.Format, \"invalid format\")\n\t}\n\tif !t.ToType.IsValid() {\n\t\treturn field.Invalid(field.NewPath(\"toType\"), t.ToType, \"invalid type\")\n\t}\n\treturn nil\n}", "func (conf TypeConfig) Validate() error {\n\tfor _, rule := range conf.Rules {\n\t\td, ok := conf.Descriptors[rule.Descriptor]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"rule %s=%s uses descriptor %s that does not exist\", rule.Name, rule.Value, rule.Descriptor)\n\t\t}\n\t\tif !hasField(rule.Name, d) {\n\t\t\treturn fmt.Errorf(\"rule %s refers to field %s that is not present in descriptor\", rule.Descriptor, rule.Name)\n\t\t}\n\n\t}\n\tfor name, desc := range conf.Descriptors {\n\t\tfor i, d := range desc {\n\t\t\tcol, ok := d.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"descriptor %s has invalid structure in element %d\", name, i)\n\t\t\t}\n\t\t\tif col[\"name\"] == \"ts\" && col[\"type\"] != \"time\" {\n\t\t\t\treturn fmt.Errorf(\"descriptor %s has field ts with wrong type %s\", name, col[\"type\"])\n\t\t\t}\n\t\t}\n\t\tcol := desc[0].(map[string]interface{})\n\t\tif col[\"name\"] != \"_path\" {\n\t\t\treturn fmt.Errorf(\"descriptor %s does not have _path as first column\", name)\n\t\t}\n\t}\n\treturn nil\n}", "func (m APIStepType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateAPIStepTypeEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r *Version) Validate() error {\n\n\tR := *r\n\tif len(R) > 4 {\n\t\treturn errors.New(\"Version field may not contain more than 4 fields\")\n\t}\n\tif len(R) < 3 {\n\t\treturn errors.New(\"Version field must contain at least 3 fields\")\n\t}\n\tfor i, x := range R[:3] {\n\t\tn, ok := x.(int)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Version field %d is not an integer: %d\", i, n)\n\t\t}\n\t\tif n > 99 {\n\t\t\treturn fmt.Errorf(\"Version field %d value is over 99: %d\", i, n)\n\t\t}\n\t}\n\tif len(R) > 3 {\n\t\ts, ok := R[3].(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"optional field 4 of Version is not a string\")\n\t\t} else {\n\t\t\tfor i, x := range s {\n\t\t\t\tif !(unicode.IsLetter(x) || unicode.IsDigit(x)) {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"optional field 4 of Version contains other than letters and numbers at position %d: '%v,\", i, x)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ListAggregatedUtterancesInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ListAggregatedUtterancesInput\"}\n\tif s.AggregationDuration == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"AggregationDuration\"))\n\t}\n\tif s.BotAliasId != nil && len(*s.BotAliasId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasId\", 10))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.Filters != nil && len(s.Filters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Filters\", 1))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.MaxResults != nil && *s.MaxResults < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinValue(\"MaxResults\", 1))\n\t}\n\tif s.AggregationDuration != nil {\n\t\tif err := s.AggregationDuration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"AggregationDuration\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Filters != nil {\n\t\tfor i, v := range s.Filters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Filters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.SortBy != nil {\n\t\tif err := s.SortBy.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SortBy\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *CreateSlotTypeInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateSlotTypeInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 5 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 5))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.SlotTypeName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeName\"))\n\t}\n\tif s.SlotTypeName != nil && len(*s.SlotTypeName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeName\", 1))\n\t}\n\tif s.SlotTypeValues != nil && len(s.SlotTypeValues) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeValues\", 1))\n\t}\n\tif s.CompositeSlotTypeSetting != nil {\n\t\tif err := s.CompositeSlotTypeSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CompositeSlotTypeSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ExternalSourceSetting != nil {\n\t\tif err := s.ExternalSourceSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ExternalSourceSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SlotTypeValues != nil {\n\t\tfor i, v := range s.SlotTypeValues {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"SlotTypeValues\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ValueSelectionSetting != nil {\n\t\tif err := s.ValueSelectionSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ValueSelectionSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *OrderBy) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"OrderBy\"}\n\tif s.PropertyName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"PropertyName\"))\n\t}\n\tif s.PropertyName != nil && len(*s.PropertyName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PropertyName\", 1))\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *StripeRefundSpecificFields) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m ModelErrorDatumType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateModelErrorDatumTypeEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *CreateBotAliasInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateBotAliasInput\"}\n\tif s.BotAliasLocaleSettings != nil && len(s.BotAliasLocaleSettings) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasLocaleSettings\", 1))\n\t}\n\tif s.BotAliasName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasName\"))\n\t}\n\tif s.BotAliasName != nil && len(*s.BotAliasName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasName\", 1))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.BotAliasLocaleSettings != nil {\n\t\tfor i, v := range s.BotAliasLocaleSettings {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"BotAliasLocaleSettings\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ConversationLogSettings != nil {\n\t\tif err := s.ConversationLogSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ConversationLogSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SentimentAnalysisSettings != nil {\n\t\tif err := s.SentimentAnalysisSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SentimentAnalysisSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func Validate(ctx http.IContext, vld *validator.Validate, arg interface{}) bool {\n\n\tif err := ctx.GetRequest().GetBodyAs(arg); err != nil {\n\t\thttp.InternalServerException(ctx)\n\t\treturn false\n\t}\n\n\tswitch err := vld.Struct(arg); err.(type) {\n\tcase validator.ValidationErrors:\n\t\thttp.FailedValidationException(ctx, err.(validator.ValidationErrors))\n\t\treturn false\n\n\tcase nil:\n\t\tbreak\n\n\tdefault:\n\t\thttp.InternalServerException(ctx)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (a *Account) Validate() error {\n\tvalidate := validator.New()\n\treturn validate.Struct(a)\n}", "func (s *CreateMemberInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateMemberInput\"}\n\tif s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ClientRequestToken\", 1))\n\t}\n\tif s.InvitationId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"InvitationId\"))\n\t}\n\tif s.InvitationId != nil && len(*s.InvitationId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"InvitationId\", 1))\n\t}\n\tif s.MemberConfiguration == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"MemberConfiguration\"))\n\t}\n\tif s.NetworkId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"NetworkId\"))\n\t}\n\tif s.NetworkId != nil && len(*s.NetworkId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"NetworkId\", 1))\n\t}\n\tif s.MemberConfiguration != nil {\n\t\tif err := s.MemberConfiguration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"MemberConfiguration\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (ut *UpdateUserPayload) Validate() (err error) {\n\tif ut.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"name\"))\n\t}\n\tif ut.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"email\"))\n\t}\n\tif ut.Bio == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"bio\"))\n\t}\n\tif err2 := goa.ValidateFormat(goa.FormatEmail, ut.Email); err2 != nil {\n\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`type.email`, ut.Email, goa.FormatEmail, err2))\n\t}\n\tif ok := goa.ValidatePattern(`\\S`, ut.Name); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`type.name`, ut.Name, `\\S`))\n\t}\n\tif utf8.RuneCountInString(ut.Name) > 256 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.name`, ut.Name, utf8.RuneCountInString(ut.Name), 256, false))\n\t}\n\treturn\n}", "func (o *Virtualserver) validate(dbRecord *common.DbRecord) (ok bool, err error) {\n\t////////////////////////////////////////////////////////////////////////////\n\t// Marshal data interface.\n\t////////////////////////////////////////////////////////////////////////////\n\tvar data virtualserver.Data\n\terr = shared.MarshalInterface(dbRecord.Data, &data)\n\tif err != nil {\n\t\treturn\n\t}\n\t////////////////////////////////////////////////////////////////////////////\n\t// Test required fields.\n\t////////////////////////////////////////////////////////////////////////////\n\tok = true\n\trequired := make(map[string]bool)\n\trequired[\"ProductCode\"] = false\n\trequired[\"IP\"] = false\n\trequired[\"Port\"] = false\n\trequired[\"LoadBalancerIP\"] = false\n\trequired[\"Name\"] = false\n\t////////////////////////////////////////////////////////////////////////////\n\tif data.ProductCode != 0 {\n\t\trequired[\"ProductCode\"] = true\n\t}\n\tif len(dbRecord.LoadBalancerIP) > 0 {\n\t\trequired[\"LoadBalancerIP\"] = true\n\t}\n\tif len(data.Ports) != 0 {\n\t\trequired[\"Port\"] = true\n\t}\n\tif data.IP != \"\" {\n\t\trequired[\"IP\"] = true\n\t}\n\tif data.Name != \"\" {\n\t\trequired[\"Name\"] = true\n\t}\n\tfor _, val := range required {\n\t\tif val == false {\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\terr = fmt.Errorf(\"missing required fields - %+v\", required)\n\t}\n\treturn\n}", "func Validate(t interface{}) error {\n\treturn validator.Struct(t)\n}", "func (m *ColumnDetails) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKeyType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSortOrder(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValueType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func (s *WriteRecordsInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"WriteRecordsInput\"}\n\tif s.DatabaseName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"DatabaseName\"))\n\t}\n\tif s.Records == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Records\"))\n\t}\n\tif s.Records != nil && len(s.Records) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Records\", 1))\n\t}\n\tif s.TableName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"TableName\"))\n\t}\n\tif s.CommonAttributes != nil {\n\t\tif err := s.CommonAttributes.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CommonAttributes\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Records != nil {\n\t\tfor i, v := range s.Records {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Records\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *HashType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFunction(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateModifier(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *CognitoMemberDefinition) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CognitoMemberDefinition\"}\n\tif s.ClientId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ClientId\"))\n\t}\n\tif s.ClientId != nil && len(*s.ClientId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ClientId\", 1))\n\t}\n\tif s.UserGroup == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"UserGroup\"))\n\t}\n\tif s.UserGroup != nil && len(*s.UserGroup) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"UserGroup\", 1))\n\t}\n\tif s.UserPool == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"UserPool\"))\n\t}\n\tif s.UserPool != nil && len(*s.UserPool) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"UserPool\", 1))\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (recipe *Recipe) Validate() error {\n\tvalidate := validator.New()\n\treturn validate.Struct(recipe)\n}", "func (s *CreateInferenceExperimentInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateInferenceExperimentInput\"}\n\tif s.EndpointName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"EndpointName\"))\n\t}\n\tif s.ModelVariants == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ModelVariants\"))\n\t}\n\tif s.ModelVariants != nil && len(s.ModelVariants) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ModelVariants\", 1))\n\t}\n\tif s.Name == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Name\"))\n\t}\n\tif s.Name != nil && len(*s.Name) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Name\", 1))\n\t}\n\tif s.RoleArn == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"RoleArn\"))\n\t}\n\tif s.RoleArn != nil && len(*s.RoleArn) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"RoleArn\", 20))\n\t}\n\tif s.ShadowModeConfig == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ShadowModeConfig\"))\n\t}\n\tif s.Type == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Type\"))\n\t}\n\tif s.DataStorageConfig != nil {\n\t\tif err := s.DataStorageConfig.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"DataStorageConfig\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ModelVariants != nil {\n\t\tfor i, v := range s.ModelVariants {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"ModelVariants\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ShadowModeConfig != nil {\n\t\tif err := s.ShadowModeConfig.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ShadowModeConfig\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Tags != nil {\n\t\tfor i, v := range s.Tags {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Tags\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *UpdateSlotTypeInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateSlotTypeInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 5 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 5))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.SlotTypeId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeId\"))\n\t}\n\tif s.SlotTypeId != nil && len(*s.SlotTypeId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeId\", 10))\n\t}\n\tif s.SlotTypeName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeName\"))\n\t}\n\tif s.SlotTypeName != nil && len(*s.SlotTypeName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeName\", 1))\n\t}\n\tif s.SlotTypeValues != nil && len(s.SlotTypeValues) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeValues\", 1))\n\t}\n\tif s.CompositeSlotTypeSetting != nil {\n\t\tif err := s.CompositeSlotTypeSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CompositeSlotTypeSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ExternalSourceSetting != nil {\n\t\tif err := s.ExternalSourceSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ExternalSourceSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SlotTypeValues != nil {\n\t\tfor i, v := range s.SlotTypeValues {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"SlotTypeValues\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ValueSelectionSetting != nil {\n\t\tif err := s.ValueSelectionSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ValueSelectionSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func Validate(obj interface{}) (map[string]interface{}, bool) {\n\n\trules := govalidator.MapData{\n\t\t\"name\": []string{\"required\", \"between:3,150\"},\n\t\t//\"email\": []string{\"required\", \"min:4\", \"max:20\", \"email\"},\n\t\t//\"web\": []string{\"url\"},\n\t\t//\"age\": []string{\"numeric_between:18,56\"},\n\t}\n\n\treturn validate.Validate(rules, obj)\n}", "func (u *User) Validate() ([]app.Invalid, error) {\n\tvar inv []app.Invalid\n\n\tif u.UserType == 0 {\n\t\tinv = append(inv, app.Invalid{Fld: \"UserType\", Err: \"The value of UserType cannot be 0.\"})\n\t}\n\n\tif u.FirstName == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"FirstName\", Err: \"A value of FirstName cannot be empty.\"})\n\t}\n\n\tif u.LastName == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"LastName\", Err: \"A value of LastName cannot be empty.\"})\n\t}\n\n\tif u.Email == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"Email\", Err: \"A value of Email cannot be empty.\"})\n\t}\n\n\tif u.Company == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"Company\", Err: \"A value of Company cannot be empty.\"})\n\t}\n\n\tif len(u.Addresses) == 0 {\n\t\tinv = append(inv, app.Invalid{Fld: \"Addresses\", Err: \"There must be at least one address.\"})\n\t} else {\n\t\tfor _, ua := range u.Addresses {\n\t\t\tif va, err := ua.Validate(); err != nil {\n\t\t\t\tinv = append(inv, va...)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(inv) > 0 {\n\t\treturn inv, errors.New(\"Validation failures identified\")\n\t}\n\n\treturn nil, nil\n}", "func (s *GetPropertyValueHistoryInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"GetPropertyValueHistoryInput\"}\n\tif s.ComponentName != nil && len(*s.ComponentName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ComponentName\", 1))\n\t}\n\tif s.ComponentTypeId != nil && len(*s.ComponentTypeId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ComponentTypeId\", 1))\n\t}\n\tif s.EndTime != nil && len(*s.EndTime) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"EndTime\", 20))\n\t}\n\tif s.EntityId != nil && len(*s.EntityId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"EntityId\", 1))\n\t}\n\tif s.PropertyFilters != nil && len(s.PropertyFilters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PropertyFilters\", 1))\n\t}\n\tif s.SelectedProperties == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SelectedProperties\"))\n\t}\n\tif s.SelectedProperties != nil && len(s.SelectedProperties) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SelectedProperties\", 1))\n\t}\n\tif s.StartTime != nil && len(*s.StartTime) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"StartTime\", 20))\n\t}\n\tif s.WorkspaceId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"WorkspaceId\"))\n\t}\n\tif s.WorkspaceId != nil && len(*s.WorkspaceId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"WorkspaceId\", 1))\n\t}\n\tif s.PropertyFilters != nil {\n\t\tfor i, v := range s.PropertyFilters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"PropertyFilters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (v *validator) Validate(val interface{}) (bool, *domain.NuxError) {\n\tif l, ok := val.(int); ok {\n\t\treturn v.validateInt(l)\n\t}\n\n\tif l, ok := val.(int64); ok {\n\t\treturn v.validateInt64(l)\n\t}\n\n\tif l, ok := val.(float64); ok {\n\t\treturn v.validateFloat64(l)\n\t}\n\n\tif l, ok := val.(float32); ok {\n\t\treturn v.validateFloat32(l)\n\t}\n\n\treturn true, nil\n}", "func (d *Definition) Validate() (bool, error) {\n\treturn govalidator.ValidateStruct(d)\n}", "func (s *ServiceCatalogProvisioningDetails) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ServiceCatalogProvisioningDetails\"}\n\tif s.PathId != nil && len(*s.PathId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PathId\", 1))\n\t}\n\tif s.ProductId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ProductId\"))\n\t}\n\tif s.ProductId != nil && len(*s.ProductId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ProductId\", 1))\n\t}\n\tif s.ProvisioningArtifactId != nil && len(*s.ProvisioningArtifactId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ProvisioningArtifactId\", 1))\n\t}\n\tif s.ProvisioningParameters != nil {\n\t\tfor i, v := range s.ProvisioningParameters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"ProvisioningParameters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (self *AliasTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"AliasTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"AliasTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ListSlotTypesInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ListSlotTypesInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.Filters != nil && len(s.Filters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Filters\", 1))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.MaxResults != nil && *s.MaxResults < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinValue(\"MaxResults\", 1))\n\t}\n\tif s.Filters != nil {\n\t\tfor i, v := range s.Filters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Filters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.SortBy != nil {\n\t\tif err := s.SortBy.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SortBy\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *UpdateBotAliasInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateBotAliasInput\"}\n\tif s.BotAliasId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasId\"))\n\t}\n\tif s.BotAliasId != nil && len(*s.BotAliasId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasId\", 10))\n\t}\n\tif s.BotAliasLocaleSettings != nil && len(s.BotAliasLocaleSettings) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasLocaleSettings\", 1))\n\t}\n\tif s.BotAliasName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasName\"))\n\t}\n\tif s.BotAliasName != nil && len(*s.BotAliasName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasName\", 1))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.BotAliasLocaleSettings != nil {\n\t\tfor i, v := range s.BotAliasLocaleSettings {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"BotAliasLocaleSettings\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ConversationLogSettings != nil {\n\t\tif err := s.ConversationLogSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ConversationLogSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SentimentAnalysisSettings != nil {\n\t\tif err := s.SentimentAnalysisSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SentimentAnalysisSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (v *Validator) Validate(i interface{}) error {\n\treturn v.validator.Struct(i)\n}", "func (s *CreateProfileInput) Validate() error {\n\tinvalidParams := aws.ErrInvalidParams{Context: \"CreateProfileInput\"}\n\n\tif s.Address == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Address\"))\n\t}\n\tif s.Address != nil && len(*s.Address) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Address\", 1))\n\t}\n\tif s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 10 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"ClientRequestToken\", 10))\n\t}\n\tif len(s.DistanceUnit) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"DistanceUnit\"))\n\t}\n\tif s.Locale != nil && len(*s.Locale) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Locale\", 1))\n\t}\n\n\tif s.ProfileName == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"ProfileName\"))\n\t}\n\tif s.ProfileName != nil && len(*s.ProfileName) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"ProfileName\", 1))\n\t}\n\tif len(s.TemperatureUnit) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"TemperatureUnit\"))\n\t}\n\n\tif s.Timezone == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Timezone\"))\n\t}\n\tif s.Timezone != nil && len(*s.Timezone) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Timezone\", 1))\n\t}\n\tif len(s.WakeWord) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"WakeWord\"))\n\t}\n\tif s.MeetingRoomConfiguration != nil {\n\t\tif err := s.MeetingRoomConfiguration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"MeetingRoomConfiguration\", err.(aws.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Tags != nil {\n\t\tfor i, v := range s.Tags {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Tags\", i), err.(aws.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (l *logger) Validate() error {\n\tif l == nil {\n\t\treturn nil\n\t}\n\tif err := l.Console.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"`Console` field: %s\", err.Error())\n\t}\n\tif err := l.File.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"`File` field: %s\", err.Error())\n\t}\n\treturn nil\n}", "func (self *ArrayTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.items is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *RegexMatchTuple) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"RegexMatchTuple\"}\n\tif s.FieldToMatch == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"FieldToMatch\"))\n\t}\n\tif s.RegexPatternSetId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"RegexPatternSetId\"))\n\t}\n\tif s.RegexPatternSetId != nil && len(*s.RegexPatternSetId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"RegexPatternSetId\", 1))\n\t}\n\tif s.TextTransformation == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"TextTransformation\"))\n\t}\n\tif s.FieldToMatch != nil {\n\t\tif err := s.FieldToMatch.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"FieldToMatch\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (r *RecordValidator) Validate(i interface{}) error {\r\n\treturn r.validator.Struct(i)\r\n}", "func (s *Service) Validate() error {\n\tnonEmptyFields := map[string]checker{\n\t\t\"Name\": checker{s.Name, true},\n\t\t\"Type\": checker{s.Type.String(), false}, // Type is a enum, no need to check\n\t\t\"Owner\": checker{s.Owner, true},\n\t\t\"ClusterType\": checker{s.ClusterType, true},\n\t\t\"InstanceName\": checker{s.InstanceName.String(), true},\n\t}\n\n\tfor label, field := range nonEmptyFields {\n\t\tif field.val == \"\" {\n\t\t\treturn fmt.Errorf(errorTmpl, label+\" is empty\")\n\t\t} else if field.checkSeparator && strings.Contains(field.val, keyPartSeparator) {\n\t\t\treturn fmt.Errorf(errorTmpl, label+separatorErrorMsg)\n\t\t}\n\t}\n\n\tswitch {\n\tcase len([]rune(s.Name)) > maxServiceNameLen:\n\t\treturn fmt.Errorf(errorTmpl, fmt.Sprintf(\"Name %q is too long, max len is %d symbols\", s.Name, maxServiceNameLen))\n\tcase !reRolloutType.MatchString(s.RolloutType):\n\t\treturn fmt.Errorf(errorTmpl, \"RolloutType is invalid\")\n\t}\n\treturn nil\n}", "func (t *Visibility_Visibility) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"Visibility_Visibility\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *UpdateWorkteamInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateWorkteamInput\"}\n\tif s.Description != nil && len(*s.Description) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Description\", 1))\n\t}\n\tif s.MemberDefinitions != nil && len(s.MemberDefinitions) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"MemberDefinitions\", 1))\n\t}\n\tif s.WorkteamName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"WorkteamName\"))\n\t}\n\tif s.WorkteamName != nil && len(*s.WorkteamName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"WorkteamName\", 1))\n\t}\n\tif s.MemberDefinitions != nil {\n\t\tfor i, v := range s.MemberDefinitions {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"MemberDefinitions\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *SlotTypeValue) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"SlotTypeValue\"}\n\tif s.Synonyms != nil && len(s.Synonyms) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Synonyms\", 1))\n\t}\n\tif s.SampleValue != nil {\n\t\tif err := s.SampleValue.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SampleValue\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Synonyms != nil {\n\t\tfor i, v := range s.Synonyms {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Synonyms\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}" ]
[ "0.6366166", "0.6255708", "0.62440985", "0.6219268", "0.6205969", "0.6186602", "0.61787015", "0.6151207", "0.6135345", "0.6129121", "0.61265224", "0.61265224", "0.60985357", "0.60598147", "0.60547787", "0.60132855", "0.5993056", "0.5990731", "0.59752667", "0.59422064", "0.59114707", "0.59090024", "0.5889592", "0.58741313", "0.5829609", "0.58170855", "0.58096683", "0.58095896", "0.58095545", "0.58024305", "0.5794755", "0.57862866", "0.57858443", "0.57791334", "0.5764243", "0.57606256", "0.57459706", "0.5732621", "0.5724816", "0.5721725", "0.5710794", "0.57104737", "0.5704633", "0.5703819", "0.5702953", "0.56983054", "0.56940216", "0.5690886", "0.5657812", "0.5649313", "0.56480217", "0.564582", "0.563624", "0.5627615", "0.5625255", "0.5619124", "0.5613144", "0.56088334", "0.5605432", "0.56024873", "0.55947214", "0.55911726", "0.5589795", "0.5585938", "0.55821085", "0.5582017", "0.5581614", "0.55808634", "0.5580246", "0.5574314", "0.5568627", "0.55618674", "0.5560738", "0.55515087", "0.5550786", "0.5550786", "0.5541505", "0.5539938", "0.55395836", "0.5536529", "0.5532453", "0.5530356", "0.55274034", "0.5516386", "0.55141157", "0.551397", "0.5513621", "0.5507534", "0.55044377", "0.5499806", "0.5497794", "0.5496284", "0.5494955", "0.5485755", "0.54851174", "0.5484035", "0.54840046", "0.5483409", "0.5483303", "0.5483193", "0.5481435" ]
0.0
-1
MarshalFields encodes the AWS API shape using the passed in protocol encoder.
func (s StartMonitoringMembersInput) MarshalFields(e protocol.FieldEncoder) error { e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) if s.AccountIds != nil { v := s.AccountIds metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "accountIds", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) } ls0.End() } if s.DetectorId != nil { v := *s.DetectorId metadata := protocol.Metadata{} e.SetValue(protocol.PathTarget, "detectorId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s OutputService9TestShapeSingleStructure) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s OutputService6TestShapeSingleStructure) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Api) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s GetApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateRestApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.ApiKeySource) > 0 {\n\t\tv := s.ApiKeySource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.BinaryMediaTypes != nil {\n\t\tv := s.BinaryMediaTypes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"binaryMediaTypes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EndpointConfiguration != nil {\n\t\tv := s.EndpointConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"endpointConfiguration\", v, metadata)\n\t}\n\tif s.Id != nil {\n\t\tv := *s.Id\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"id\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MinimumCompressionSize != nil {\n\t\tv := *s.MinimumCompressionSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"minimumCompressionSize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Policy != nil {\n\t\tv := *s.Policy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"policy\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Warnings != nil {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s CreateCanaryInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ArtifactS3Location != nil {\n\t\tv := *s.ArtifactS3Location\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ArtifactS3Location\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Code != nil {\n\t\tv := s.Code\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Code\", v, metadata)\n\t}\n\tif s.ExecutionRoleArn != nil {\n\t\tv := *s.ExecutionRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ExecutionRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FailureRetentionPeriodInDays != nil {\n\t\tv := *s.FailureRetentionPeriodInDays\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FailureRetentionPeriodInDays\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RunConfig != nil {\n\t\tv := s.RunConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RunConfig\", v, metadata)\n\t}\n\tif s.RuntimeVersion != nil {\n\t\tv := *s.RuntimeVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RuntimeVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schedule != nil {\n\t\tv := s.Schedule\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Schedule\", v, metadata)\n\t}\n\tif s.SuccessRetentionPeriodInDays != nil {\n\t\tv := *s.SuccessRetentionPeriodInDays\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"SuccessRetentionPeriodInDays\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"Tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.VpcConfig != nil {\n\t\tv := s.VpcConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"VpcConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualGatewayRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualGatewayName != nil {\n\t\tv := *s.VirtualGatewayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualGatewayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAccessPointInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ClientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FileSystemId != nil {\n\t\tv := *s.FileSystemId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FileSystemId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PosixUser != nil {\n\t\tv := s.PosixUser\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"PosixUser\", v, metadata)\n\t}\n\tif s.RootDirectory != nil {\n\t\tv := s.RootDirectory\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RootDirectory\", v, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateRouteInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Api) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CorsConfiguration != nil {\n\t\tv := s.CorsConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"corsConfiguration\", v, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImportInfo != nil {\n\t\tv := s.ImportInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"importInfo\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Warnings != nil {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s OutputService15TestShapeItemDetailShape) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ID != nil {\n\t\tv := *s.ID\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ID\", protocol.StringValue(v), metadata)\n\t}\n\t// Skipping Type XML Attribute.\n\treturn nil\n}", "func (s CreateProxySessionInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Capabilities != nil {\n\t\tv := s.Capabilities\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Capabilities\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.ExpiryMinutes != nil {\n\t\tv := *s.ExpiryMinutes\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ExpiryMinutes\", protocol.Int64Value(v), metadata)\n\t}\n\tif len(s.GeoMatchLevel) > 0 {\n\t\tv := s.GeoMatchLevel\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"GeoMatchLevel\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.GeoMatchParams != nil {\n\t\tv := s.GeoMatchParams\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"GeoMatchParams\", v, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.NumberSelectionBehavior) > 0 {\n\t\tv := s.NumberSelectionBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"NumberSelectionBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.ParticipantPhoneNumbers != nil {\n\t\tv := s.ParticipantPhoneNumbers\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"ParticipantPhoneNumbers\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.VoiceConnectorId != nil {\n\t\tv := *s.VoiceConnectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"voiceConnectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AccessLogSettings != nil {\n\t\tv := s.AccessLogSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accessLogSettings\", v, metadata)\n\t}\n\tif s.ClientCertificateId != nil {\n\t\tv := *s.ClientCertificateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientCertificateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DefaultRouteSettings != nil {\n\t\tv := s.DefaultRouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"defaultRouteSettings\", v, metadata)\n\t}\n\tif s.DeploymentId != nil {\n\t\tv := *s.DeploymentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deploymentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RouteSettings) > 0 {\n\t\tv := s.RouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"routeSettings\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.StageVariables) > 0 {\n\t\tv := s.StageVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"stageVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualGatewaySpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s DescribeDetectorInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.DetectorModelName != nil {\n\t\tv := *s.DetectorModelName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorModelName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.KeyValue != nil {\n\t\tv := *s.KeyValue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"keyValue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateSignalingChannelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ChannelARN != nil {\n\t\tv := *s.ChannelARN\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ChannelARN\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CurrentVersion != nil {\n\t\tv := *s.CurrentVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CurrentVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SingleMasterConfiguration != nil {\n\t\tv := s.SingleMasterConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"SingleMasterConfiguration\", v, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiMappingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DomainName != nil {\n\t\tv := *s.DomainName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"domainName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateThingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AttributePayload != nil {\n\t\tv := s.AttributePayload\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"attributePayload\", v, metadata)\n\t}\n\tif s.BillingGroupName != nil {\n\t\tv := *s.BillingGroupName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingGroupName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ThingTypeName != nil {\n\t\tv := *s.ThingTypeName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"thingTypeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ThingName != nil {\n\t\tv := *s.ThingName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"thingName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func encodeSchema(w io.Writer, s *schema.Schema) (err error) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tew := errWriter{w: w}\n\tif s.Description != \"\" {\n\t\tew.writeFormat(`\"description\": %q, `, s.Description)\n\t}\n\tew.writeString(`\"type\": \"object\", `)\n\tew.writeString(`\"additionalProperties\": false, `)\n\tew.writeString(`\"properties\": {`)\n\tvar required []string\n\tvar notFirst bool\n\tfor _, key := range sortedFieldNames(s.Fields) {\n\t\tfield := s.Fields[key]\n\t\tif notFirst {\n\t\t\tew.writeString(\", \")\n\t\t}\n\t\tnotFirst = true\n\t\tif field.Required {\n\t\t\trequired = append(required, fmt.Sprintf(\"%q\", key))\n\t\t}\n\t\tew.err = encodeField(ew, key, field)\n\t\tif ew.err != nil {\n\t\t\treturn ew.err\n\t\t}\n\t}\n\tew.writeString(\"}\")\n\tif s.MinLen > 0 {\n\t\tew.writeFormat(`, \"minProperties\": %s`, strconv.FormatInt(int64(s.MinLen), 10))\n\t}\n\tif s.MaxLen > 0 {\n\t\tew.writeFormat(`, \"maxProperties\": %s`, strconv.FormatInt(int64(s.MaxLen), 10))\n\t}\n\n\tif len(required) > 0 {\n\t\tew.writeFormat(`, \"required\": [%s]`, strings.Join(required, \", \"))\n\t}\n\treturn ew.err\n}", "func (s OutputService15TestShapeItemShape) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ItemDetail != nil {\n\t\tv := s.ItemDetail\n\t\tattrs := make([]protocol.Attribute, 0, 1)\n\n\t\tif len(s.ItemDetail.Type) > 0 {\n\n\t\t\tv := s.ItemDetail.Type\n\t\t\tattrs = append(attrs, protocol.Attribute{Name: \"xsi:type\", Value: v, Meta: protocol.Metadata{}})\n\t\t}\n\t\tmetadata := protocol.Metadata{Attributes: attrs, XMLNamespacePrefix: \"xsi\", XMLNamespaceURI: \"http://www.w3.org/2001/XMLSchema-instance\"}\n\t\te.SetFields(protocol.BodyTarget, \"ItemDetail\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiMappingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DomainName != nil {\n\t\tv := *s.DomainName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"domainName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualServiceRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualServiceName != nil {\n\t\tv := *s.VirtualServiceName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualServiceName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIPSetInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Activate != nil {\n\t\tv := *s.Activate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"activate\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Location != nil {\n\t\tv := *s.Location\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"location\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DetectorId != nil {\n\t\tv := *s.DetectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IpSetId != nil {\n\t\tv := *s.IpSetId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"ipSetId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AccessLogSettings != nil {\n\t\tv := s.AccessLogSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accessLogSettings\", v, metadata)\n\t}\n\tif s.ClientCertificateId != nil {\n\t\tv := *s.ClientCertificateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientCertificateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DefaultRouteSettings != nil {\n\t\tv := s.DefaultRouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"defaultRouteSettings\", v, metadata)\n\t}\n\tif s.DeploymentId != nil {\n\t\tv := *s.DeploymentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deploymentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RouteSettings) > 0 {\n\t\tv := s.RouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"routeSettings\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StageVariables) > 0 {\n\t\tv := s.StageVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"stageVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetSigningPlatformInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PlatformId != nil {\n\t\tv := *s.PlatformId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"platformId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Robot) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Architecture) > 0 {\n\t\tv := s.Architecture\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"architecture\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.FleetArn != nil {\n\t\tv := *s.FleetArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"fleetArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.GreenGrassGroupId != nil {\n\t\tv := *s.GreenGrassGroupId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"greenGrassGroupId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastDeploymentJob != nil {\n\t\tv := *s.LastDeploymentJob\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastDeploymentJob\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastDeploymentTime != nil {\n\t\tv := *s.LastDeploymentTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastDeploymentTime\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Status) > 0 {\n\t\tv := s.Status\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"status\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIntegrationInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeInputDeviceOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionState) > 0 {\n\t\tv := s.ConnectionState\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionState\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.DeviceSettingsSyncState) > 0 {\n\t\tv := s.DeviceSettingsSyncState\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deviceSettingsSyncState\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.HdDeviceSettings != nil {\n\t\tv := s.HdDeviceSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"hdDeviceSettings\", v, metadata)\n\t}\n\tif s.Id != nil {\n\t\tv := *s.Id\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"id\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MacAddress != nil {\n\t\tv := *s.MacAddress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"macAddress\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.NetworkSettings != nil {\n\t\tv := s.NetworkSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"networkSettings\", v, metadata)\n\t}\n\tif s.SerialNumber != nil {\n\t\tv := *s.SerialNumber\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"serialNumber\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAliasInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FunctionVersion != nil {\n\t\tv := *s.FunctionVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FunctionVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RoutingConfig != nil {\n\t\tv := s.RoutingConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RoutingConfig\", v, metadata)\n\t}\n\tif s.FunctionName != nil {\n\t\tv := *s.FunctionName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"FunctionName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateImageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DistributionConfigurationArn != nil {\n\t\tv := *s.DistributionConfigurationArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"distributionConfigurationArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnhancedImageMetadataEnabled != nil {\n\t\tv := *s.EnhancedImageMetadataEnabled\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enhancedImageMetadataEnabled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImageRecipeArn != nil {\n\t\tv := *s.ImageRecipeArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"imageRecipeArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ImageTestsConfiguration != nil {\n\t\tv := s.ImageTestsConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"imageTestsConfiguration\", v, metadata)\n\t}\n\tif s.InfrastructureConfigurationArn != nil {\n\t\tv := *s.InfrastructureConfigurationArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"infrastructureConfigurationArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s VpcLink) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SecurityGroupIds != nil {\n\t\tv := s.SecurityGroupIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"securityGroupIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.SubnetIds != nil {\n\t\tv := s.SubnetIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"subnetIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.VpcLinkId != nil {\n\t\tv := *s.VpcLinkId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.VpcLinkStatus) > 0 {\n\t\tv := s.VpcLinkStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.VpcLinkStatusMessage != nil {\n\t\tv := *s.VpcLinkStatusMessage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkStatusMessage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.VpcLinkVersion) > 0 {\n\t\tv := s.VpcLinkVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkVersion\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.AuthorizationScopes != nil {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestModels != nil {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Integration) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.PayloadFormatVersion != nil {\n\t\tv := *s.PayloadFormatVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"payloadFormatVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestTemplates != nil {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.TlsConfig != nil {\n\t\tv := s.TlsConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"tlsConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateOTAUpdateInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AdditionalParameters != nil {\n\t\tv := s.AdditionalParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"additionalParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.AwsJobExecutionsRolloutConfig != nil {\n\t\tv := s.AwsJobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.AwsJobPresignedUrlConfig != nil {\n\t\tv := s.AwsJobPresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobPresignedUrlConfig\", v, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Files != nil {\n\t\tv := s.Files\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"files\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Protocols != nil {\n\t\tv := s.Protocols\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"protocols\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.RoleArn != nil {\n\t\tv := *s.RoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"roleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.OtaUpdateId != nil {\n\t\tv := *s.OtaUpdateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"otaUpdateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateIntegrationInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiKeyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Expires != nil {\n\t\tv := *s.Expires\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"expires\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.BackendEnvironmentArn != nil {\n\t\tv := *s.BackendEnvironmentArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"backendEnvironmentArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisplayName != nil {\n\t\tv := *s.DisplayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"displayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnablePullRequestPreview != nil {\n\t\tv := *s.EnablePullRequestPreview\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enablePullRequestPreview\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnvironmentVariables != nil {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PullRequestEnvironmentName != nil {\n\t\tv := *s.PullRequestEnvironmentName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"pullRequestEnvironmentName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateRouteResponseInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ResponseModels) > 0 {\n\t\tv := s.ResponseModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.ResponseParameters) > 0 {\n\t\tv := s.ResponseParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteResponseKey != nil {\n\t\tv := *s.RouteResponseKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseId != nil {\n\t\tv := *s.RouteResponseId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeResponseId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreatePackageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PackageDescription != nil {\n\t\tv := *s.PackageDescription\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageDescription\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PackageName != nil {\n\t\tv := *s.PackageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PackageSource != nil {\n\t\tv := s.PackageSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"PackageSource\", v, metadata)\n\t}\n\tif len(s.PackageType) > 0 {\n\t\tv := s.PackageType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdatePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\treturn nil\n}", "func (s OutputService11TestShapeOutputService11TestCaseOperation1Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-false-bool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Integer != nil {\n\t\tv := *s.Integer\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-int\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.RFC822TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-true-bool\", protocol.BoolValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteResponseInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ResponseModels) > 0 {\n\t\tv := s.ResponseModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.ResponseParameters) > 0 {\n\t\tv := s.ResponseParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteResponseKey != nil {\n\t\tv := *s.RouteResponseKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAppInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.CustomRules) > 0 {\n\t\tv := s.CustomRules\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"customRules\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBranchAutoBuild != nil {\n\t\tv := *s.EnableBranchAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBranchAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.IamServiceRoleArn != nil {\n\t\tv := *s.IamServiceRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"iamServiceRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OauthToken != nil {\n\t\tv := *s.OauthToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"oauthToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Repository != nil {\n\t\tv := *s.Repository\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"repository\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Tags) > 0 {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s GetApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.Tags) > 0 {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiMappingOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualNodeSpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Backends != nil {\n\t\tv := s.Backends\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"backends\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\tif s.ServiceDiscovery != nil {\n\t\tv := s.ServiceDiscovery\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"serviceDiscovery\", v, metadata)\n\t}\n\treturn nil\n}", "func (s Integration) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s NetworkPathComponent) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ComponentId != nil {\n\t\tv := *s.ComponentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ComponentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ComponentType != nil {\n\t\tv := *s.ComponentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ComponentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Egress != nil {\n\t\tv := s.Egress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Egress\", v, metadata)\n\t}\n\tif s.Ingress != nil {\n\t\tv := s.Ingress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Ingress\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService13TestShapeTimeContainer) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Bar != nil {\n\t\tv := *s.Bar\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"bar\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"unixTimestamp\", QuotedFormatTime: false}, metadata)\n\t}\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"foo\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\treturn nil\n}", "func (s ImportComponentInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ChangeDescription != nil {\n\t\tv := *s.ChangeDescription\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"changeDescription\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Data != nil {\n\t\tv := *s.Data\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"data\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Format) > 0 {\n\t\tv := s.Format\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"format\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.KmsKeyId != nil {\n\t\tv := *s.KmsKeyId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"kmsKeyId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.SemanticVersion != nil {\n\t\tv := *s.SemanticVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"semanticVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Uri != nil {\n\t\tv := *s.Uri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"uri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CodeReview) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CodeReviewArn != nil {\n\t\tv := *s.CodeReviewArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeReviewArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedTimeStamp != nil {\n\t\tv := *s.CreatedTimeStamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CreatedTimeStamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedTimeStamp != nil {\n\t\tv := *s.LastUpdatedTimeStamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdatedTimeStamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Metrics != nil {\n\t\tv := s.Metrics\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Metrics\", v, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Owner != nil {\n\t\tv := *s.Owner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Owner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProviderType) > 0 {\n\t\tv := s.ProviderType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProviderType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.PullRequestId != nil {\n\t\tv := *s.PullRequestId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PullRequestId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RepositoryName != nil {\n\t\tv := *s.RepositoryName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RepositoryName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SourceCodeType != nil {\n\t\tv := s.SourceCodeType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"SourceCodeType\", v, metadata)\n\t}\n\tif len(s.State) > 0 {\n\t\tv := s.State\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"State\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.StateReason != nil {\n\t\tv := *s.StateReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StateReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s PutObjectOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ContentSHA256 != nil {\n\t\tv := *s.ContentSHA256\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ContentSHA256\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ETag != nil {\n\t\tv := *s.ETag\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ETag\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StorageClass) > 0 {\n\t\tv := s.StorageClass\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StorageClass\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s GetRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService1TestShapeOutputService1TestCaseOperation1Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FalseBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Float64s != nil {\n\t\tv := s.Float64s\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Float64s\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.Float64Value(v1))\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Num != nil {\n\t\tv := *s.Num\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FooNum\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"TrueBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImaHeader != nil {\n\t\tv := *s.ImaHeader\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"ImaHeader\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ImaHeaderLocation != nil {\n\t\tv := *s.ImaHeaderLocation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"X-Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s GetStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateAppInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.CustomRules) > 0 {\n\t\tv := s.CustomRules\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"customRules\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBranchAutoBuild != nil {\n\t\tv := *s.EnableBranchAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBranchAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.IamServiceRoleArn != nil {\n\t\tv := *s.IamServiceRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"iamServiceRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService1TestShapeOutputService1TestCaseOperation2Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FalseBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Float64s != nil {\n\t\tv := s.Float64s\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Float64s\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.Float64Value(v1))\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Num != nil {\n\t\tv := *s.Num\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FooNum\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"TrueBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImaHeader != nil {\n\t\tv := *s.ImaHeader\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"ImaHeader\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ImaHeaderLocation != nil {\n\t\tv := *s.ImaHeaderLocation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"X-Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s GetIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s VirtualServiceBackend) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ClientPolicy != nil {\n\t\tv := s.ClientPolicy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"clientPolicy\", v, metadata)\n\t}\n\tif s.VirtualServiceName != nil {\n\t\tv := *s.VirtualServiceName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualServiceName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreatePolicyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PolicyDocument != nil {\n\t\tv := *s.PolicyDocument\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"policyDocument\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PolicyName != nil {\n\t\tv := *s.PolicyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"policyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBucketInput) MarshalFields(e protocol.FieldEncoder) error {\n\n\tif len(s.ACL) > 0 {\n\t\tv := s.ACL\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-acl\", v, metadata)\n\t}\n\tif s.GrantFullControl != nil {\n\t\tv := *s.GrantFullControl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-full-control\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantRead != nil {\n\t\tv := *s.GrantRead\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-read\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantReadACP != nil {\n\t\tv := *s.GrantReadACP\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-read-acp\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantWrite != nil {\n\t\tv := *s.GrantWrite\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-write\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantWriteACP != nil {\n\t\tv := *s.GrantWriteACP\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-write-acp\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ObjectLockEnabledForBucket != nil {\n\t\tv := *s.ObjectLockEnabledForBucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-bucket-object-lock-enabled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Bucket != nil {\n\t\tv := *s.Bucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"Bucket\", protocol.StringValue(v), metadata)\n\t}\n\tif s.CreateBucketConfiguration != nil {\n\t\tv := s.CreateBucketConfiguration\n\n\t\tmetadata := protocol.Metadata{XMLNamespaceURI: \"http://s3.amazonaws.com/doc/2006-03-01/\"}\n\t\te.SetFields(protocol.PayloadTarget, \"CreateBucketConfiguration\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiMappingOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Pipeline) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Activities) > 0 {\n\t\tv := s.Activities\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"activities\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreationTime != nil {\n\t\tv := *s.CreationTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"creationTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.LastUpdateTime != nil {\n\t\tv := *s.LastUpdateTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdateTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ReprocessingSummaries) > 0 {\n\t\tv := s.ReprocessingSummaries\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"reprocessingSummaries\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s VirtualNodeRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualNodeName != nil {\n\t\tv := *s.VirtualNodeName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualNodeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetIntrospectionSchemaInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Format) > 0 {\n\t\tv := s.Format\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"format\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IncludeDirectives != nil {\n\t\tv := *s.IncludeDirectives\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"includeDirectives\", protocol.BoolValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s Source) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Architecture) > 0 {\n\t\tv := s.Architecture\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"architecture\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Etag != nil {\n\t\tv := *s.Etag\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"etag\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.S3Bucket != nil {\n\t\tv := *s.S3Bucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"s3Bucket\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.S3Key != nil {\n\t\tv := *s.S3Key\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"s3Key\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OTAUpdateInfo) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AdditionalParameters != nil {\n\t\tv := s.AdditionalParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"additionalParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.AwsIotJobArn != nil {\n\t\tv := *s.AwsIotJobArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"awsIotJobArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AwsIotJobId != nil {\n\t\tv := *s.AwsIotJobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"awsIotJobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AwsJobExecutionsRolloutConfig != nil {\n\t\tv := s.AwsJobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.AwsJobPresignedUrlConfig != nil {\n\t\tv := s.AwsJobPresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobPresignedUrlConfig\", v, metadata)\n\t}\n\tif s.CreationDate != nil {\n\t\tv := *s.CreationDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"creationDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ErrorInfo != nil {\n\t\tv := s.ErrorInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"errorInfo\", v, metadata)\n\t}\n\tif s.LastModifiedDate != nil {\n\t\tv := *s.LastModifiedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastModifiedDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.OtaUpdateArn != nil {\n\t\tv := *s.OtaUpdateArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OtaUpdateFiles != nil {\n\t\tv := s.OtaUpdateFiles\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"otaUpdateFiles\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.OtaUpdateId != nil {\n\t\tv := *s.OtaUpdateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.OtaUpdateStatus) > 0 {\n\t\tv := s.OtaUpdateStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Protocols != nil {\n\t\tv := s.Protocols\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"protocols\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s CreateModelOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.HopDestinations != nil {\n\t\tv := s.HopDestinations\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"hopDestinations\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\t}\n\tif s.AccelerationSettings != nil {\n\t\tv := s.AccelerationSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accelerationSettings\", v, metadata)\n\t}\n\tif len(s.BillingTagsSource) > 0 {\n\t\tv := s.BillingTagsSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingTagsSource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tvar ClientRequestToken string\n\tif s.ClientRequestToken != nil {\n\t\tClientRequestToken = *s.ClientRequestToken\n\t} else {\n\t\tClientRequestToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientRequestToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientRequestToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobTemplate != nil {\n\t\tv := *s.JobTemplate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobTemplate\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Priority != nil {\n\t\tv := *s.Priority\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"priority\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Queue != nil {\n\t\tv := *s.Queue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"queue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Role != nil {\n\t\tv := *s.Role\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"role\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Settings != nil {\n\t\tv := s.Settings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"settings\", v, metadata)\n\t}\n\tif len(s.SimulateReservedQueue) > 0 {\n\t\tv := s.SimulateReservedQueue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"simulateReservedQueue\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.StatusUpdateInterval) > 0 {\n\t\tv := s.StatusUpdateInterval\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"statusUpdateInterval\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.UserMetadata != nil {\n\t\tv := s.UserMetadata\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"userMetadata\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateBrokerStorageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.CurrentVersion != nil {\n\t\tv := *s.CurrentVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"currentVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TargetBrokerEBSVolumeInfo != nil {\n\t\tv := s.TargetBrokerEBSVolumeInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targetBrokerEBSVolumeInfo\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.ClusterArn != nil {\n\t\tv := *s.ClusterArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"clusterArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetMacieSessionInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\treturn nil\n}", "func (s CustomCodeSigning) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CertificateChain != nil {\n\t\tv := s.CertificateChain\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"certificateChain\", v, metadata)\n\t}\n\tif s.HashAlgorithm != nil {\n\t\tv := *s.HashAlgorithm\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"hashAlgorithm\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Signature != nil {\n\t\tv := s.Signature\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signature\", v, metadata)\n\t}\n\tif s.SignatureAlgorithm != nil {\n\t\tv := *s.SignatureAlgorithm\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"signatureAlgorithm\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s MeshRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s Resource) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Attributes) > 0 {\n\t\tv := s.Attributes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"attributes\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Feature != nil {\n\t\tv := *s.Feature\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"feature\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Type != nil {\n\t\tv := *s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GatewayRouteRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.GatewayRouteName != nil {\n\t\tv := *s.GatewayRouteName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"gatewayRouteName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualGatewayName != nil {\n\t\tv := *s.VirtualGatewayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualGatewayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s RouteRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteName != nil {\n\t\tv := *s.RouteName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualRouterName != nil {\n\t\tv := *s.VirtualRouterName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualRouterName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Pipeline != nil {\n\t\tv := s.Pipeline\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"pipeline\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Pipeline != nil {\n\t\tv := s.Pipeline\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"pipeline\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeDetectorModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.DetectorModelName != nil {\n\t\tv := *s.DetectorModelName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorModelName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DetectorModelVersion != nil {\n\t\tv := *s.DetectorModelVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s AttachPolicyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PolicyName != nil {\n\t\tv := *s.PolicyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"policyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (v *Service) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 7, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.Name); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 1, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.ThriftName); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.ParentID != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 4, Type: wire.TI32}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.ParentID.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 5, Type: wire.TList}); err != nil {\n\t\treturn err\n\t}\n\tif err := _List_Function_Encode(v.Functions, sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 6, Type: wire.TI32}); err != nil {\n\t\treturn err\n\t}\n\tif err := v.ModuleID.Encode(sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.Annotations != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 8, Type: wire.TMap}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := _Map_String_String_Encode(v.Annotations, sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "func (s GetSigningPlatformOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Category) > 0 {\n\t\tv := s.Category\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"category\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.DisplayName != nil {\n\t\tv := *s.DisplayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"displayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MaxSizeInMB != nil {\n\t\tv := *s.MaxSizeInMB\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"maxSizeInMB\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Partner != nil {\n\t\tv := *s.Partner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"partner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PlatformId != nil {\n\t\tv := *s.PlatformId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platformId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SigningConfiguration != nil {\n\t\tv := s.SigningConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signingConfiguration\", v, metadata)\n\t}\n\tif s.SigningImageFormat != nil {\n\t\tv := s.SigningImageFormat\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signingImageFormat\", v, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Product) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ActivationUrl != nil {\n\t\tv := *s.ActivationUrl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ActivationUrl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Categories != nil {\n\t\tv := s.Categories\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Categories\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.CompanyName != nil {\n\t\tv := *s.CompanyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CompanyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationTypes != nil {\n\t\tv := s.IntegrationTypes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"IntegrationTypes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.MarketplaceUrl != nil {\n\t\tv := *s.MarketplaceUrl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MarketplaceUrl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductArn != nil {\n\t\tv := *s.ProductArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductName != nil {\n\t\tv := *s.ProductName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductSubscriptionResourcePolicy != nil {\n\t\tv := *s.ProductSubscriptionResourcePolicy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductSubscriptionResourcePolicy\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s AwsLambdaFunctionLayer) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CodeSize != nil {\n\t\tv := *s.CodeSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeSize\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s HttpAuthorization) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Sigv4 != nil {\n\t\tv := s.Sigv4\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"sigv4\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PipelineName != nil {\n\t\tv := *s.PipelineName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"pipelineName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}" ]
[ "0.6348559", "0.6248897", "0.6195551", "0.6180413", "0.61684614", "0.61208856", "0.6100623", "0.6070698", "0.606548", "0.6012609", "0.60108584", "0.5961231", "0.5953864", "0.5934938", "0.5929273", "0.5922881", "0.5910155", "0.59065044", "0.59003735", "0.58856225", "0.58790755", "0.5875828", "0.5874432", "0.5872241", "0.58716893", "0.5858529", "0.5851449", "0.58507365", "0.5844942", "0.5837428", "0.5833994", "0.58214754", "0.5820069", "0.58158517", "0.58149785", "0.58125305", "0.580506", "0.5803897", "0.5803712", "0.578714", "0.57817864", "0.5777309", "0.57750463", "0.57733595", "0.5765925", "0.5761909", "0.57602566", "0.5759691", "0.5752248", "0.5750325", "0.57488763", "0.5747476", "0.57432723", "0.5740528", "0.5733633", "0.5733325", "0.57294506", "0.5721927", "0.5720165", "0.571053", "0.570292", "0.5702867", "0.57019335", "0.56969184", "0.56954247", "0.5693915", "0.5686742", "0.56838375", "0.5679277", "0.56771725", "0.56763923", "0.5665559", "0.5661512", "0.56612706", "0.5660435", "0.56588036", "0.56538945", "0.5650536", "0.5646301", "0.56446356", "0.5644526", "0.5639443", "0.5637914", "0.56363106", "0.5635751", "0.5635708", "0.5631697", "0.5626627", "0.56260794", "0.56240636", "0.5623764", "0.5623764", "0.56213856", "0.56168675", "0.561474", "0.56128246", "0.56114304", "0.561078", "0.5609868", "0.56065774", "0.5589743" ]
0.0
-1
String returns the string representation
func (s StartMonitoringMembersOutput) String() string { return awsutil.Prettify(s) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateCanaryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Library) String() string {\n\tres := make([]string, 5)\n\tres[0] = \"ID: \" + reform.Inspect(s.ID, true)\n\tres[1] = \"UserID: \" + reform.Inspect(s.UserID, true)\n\tres[2] = \"VolumeID: \" + reform.Inspect(s.VolumeID, true)\n\tres[3] = \"CreatedAt: \" + reform.Inspect(s.CreatedAt, true)\n\tres[4] = \"UpdatedAt: \" + reform.Inspect(s.UpdatedAt, true)\n\treturn strings.Join(res, \", \")\n}", "func (r Info) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (s ReEncryptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateFHIRDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\toutput := output{\n\t\tRerun: Rerun,\n\t\tVariables: Variables,\n\t\tItems: Items,\n\t}\n\tvar err error\n\tvar b []byte\n\tif Indent == \"\" {\n\t\tb, err = json.Marshal(output)\n\t} else {\n\t\tb, err = json.MarshalIndent(output, \"\", Indent)\n\t}\n\tif err != nil {\n\t\tmessageErr := Errorf(\"Error in parser. Please report this output to https://github.com/drgrib/alfred/issues: %v\", err)\n\t\tpanic(messageErr)\n\t}\n\ts := string(b)\n\treturn s\n}", "func (s CreateQuickConnectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *Registry) String() string {\n\tout := make([]string, 0, len(r.nameToObject))\n\tfor name, object := range r.nameToObject {\n\t\tout = append(out, fmt.Sprintf(\"* %s:\\n%s\", name, object.serialization))\n\t}\n\treturn strings.Join(out, \"\\n\\n\")\n}", "func (s CreateSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSafetyRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLanguageModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (r SendAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (r ReceiveAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (enc *simpleEncoding) String() string {\n\treturn \"simpleEncoding(\" + enc.baseName + \")\"\n}", "func (s CreateDatabaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienium) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (s CreateHITTypeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateEntityOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Addshifttraderequest) String() string {\n \n \n \n \n o.AcceptableIntervals = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateUseCaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Rooms) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (i Info) String() string {\n\ts, _ := i.toJSON()\n\treturn s\n}", "func (o *Botversionsummary) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (e ExternalCfps) String() string {\n\tje, _ := json.Marshal(e)\n\treturn string(je)\n}", "func (s CreateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\treturn fmt.Sprintf(\n\t\t\"AppVersion = %s\\n\"+\n\t\t\t\"VCSRef = %s\\n\"+\n\t\t\t\"BuildVersion = %s\\n\"+\n\t\t\t\"BuildDate = %s\",\n\t\tAppVersion, VCSRef, BuildVersion, Date,\n\t)\n}", "func (s CreateDataLakeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSolutionVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i NotMachine) String() string { return toString(i) }", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s StartPipelineReprocessingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSequenceStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Adjustablelivespeakerdetection) String() string {\n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateRateBasedRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Resiliency) String() string {\n\tb, _ := json.Marshal(r)\n\treturn string(b)\n}", "func (s RestoreFromRecoveryPointOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateWaveOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (s CreateRoomOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotLocaleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienia) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (i *Info) String() string {\n\tb, _ := json.Marshal(i)\n\treturn string(b)\n}", "func (s ProcessingFeatureStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r RoomOccupancies) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (r *InterRecord) String() string {\n\tbuf := r.Bytes()\n\tdefer ffjson.Pool(buf)\n\n\treturn string(buf)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Coretype) String() string {\n \n \n \n \n \n o.ValidationFields = []string{\"\"} \n \n o.ItemValidationFields = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateLayerOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelCardOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Limitchangerequestdetails) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s NetworkPathComponentDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t Terms) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (g GetObjectOutput) String() string {\n\treturn helper.Prettify(g)\n}", "func (s StartContactEvaluationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Interactionstatsalert) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Digitalcondition) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (r RoomOccupancy) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (d *Diagram) String() string { return toString(d) }", "func (o *Outboundroute) String() string {\n \n \n \n \n o.ClassificationTypes = []string{\"\"} \n \n \n o.ExternalTrunkBases = []Domainentityref{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateCodeRepositoryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateActivationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolutionTechniques) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateTrialComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c CourseCode) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (p *Parms) String() string {\n\tout, _ := json.MarshalIndent(p, \"\", \"\\t\")\n\treturn string(out)\n}", "func (p polynomial) String() (str string) {\n\tfor _, m := range p.monomials {\n\t\tstr = str + \" \" + m.String() + \" +\"\n\t}\n\tstr = strings.TrimRight(str, \"+\")\n\treturn \"f(x) = \" + strings.TrimSpace(str)\n}", "func (s CreateThingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *RUT) String() string {\n\treturn r.Format(DefaultFormatter)\n}", "func (s CreatePatchBaselineOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Crossplatformpolicycreate) String() string {\n \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s BotVersionLocaleDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestInitiated) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteMultiplexProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetObjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestReverted) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDocumentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateIntegrationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Commonruleconditions) String() string {\n o.Clauses = []Commonruleconditions{{}} \n o.Predicates = []Commonrulepredicate{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (t Test1s) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (o *Directrouting) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (s CreateContactFlowOutput) String() string {\n\treturn awsutil.Prettify(s)\n}" ]
[ "0.7215058", "0.7215058", "0.72000957", "0.7199919", "0.7177383", "0.7166947", "0.7118059", "0.7087492", "0.70870787", "0.7079275", "0.70782894", "0.7067719", "0.7031721", "0.70269966", "0.7026298", "0.70251423", "0.7021565", "0.70164025", "0.701059", "0.7010184", "0.70022964", "0.6997043", "0.6996532", "0.6992619", "0.69909185", "0.69900763", "0.69862556", "0.6985364", "0.6975378", "0.69738907", "0.69624275", "0.6961772", "0.69603413", "0.69507927", "0.6946753", "0.69460964", "0.69460964", "0.6944943", "0.694029", "0.69369334", "0.69332623", "0.69287163", "0.692656", "0.6924643", "0.69216746", "0.69213074", "0.69181406", "0.6917802", "0.6911058", "0.69104654", "0.6909528", "0.690845", "0.690454", "0.6899065", "0.6896141", "0.6894107", "0.6894107", "0.6894107", "0.68921995", "0.68920684", "0.689124", "0.68893504", "0.688871", "0.6884391", "0.6882336", "0.6880731", "0.68767136", "0.68766147", "0.68766147", "0.68751997", "0.68735147", "0.68734384", "0.68731403", "0.6871602", "0.6869421", "0.68684965", "0.68677104", "0.68677104", "0.68677104", "0.68677104", "0.68673396", "0.68622416", "0.6862084", "0.6859391", "0.6857645", "0.6853781", "0.68523467", "0.6851581", "0.6846037", "0.6844023", "0.6843859", "0.68434954", "0.68419206", "0.68416274", "0.684033", "0.6839815", "0.68363225", "0.6835165", "0.68334675", "0.68327725", "0.6832733" ]
0.0
-1
MarshalFields encodes the AWS API shape using the passed in protocol encoder.
func (s StartMonitoringMembersOutput) MarshalFields(e protocol.FieldEncoder) error { if s.UnprocessedAccounts != nil { v := s.UnprocessedAccounts metadata := protocol.Metadata{} ls0 := e.List(protocol.BodyTarget, "unprocessedAccounts", metadata) ls0.Start() for _, v1 := range v { ls0.ListAddFields(v1) } ls0.End() } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s OutputService9TestShapeSingleStructure) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s OutputService6TestShapeSingleStructure) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Api) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s GetApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateRestApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.ApiKeySource) > 0 {\n\t\tv := s.ApiKeySource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.BinaryMediaTypes != nil {\n\t\tv := s.BinaryMediaTypes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"binaryMediaTypes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EndpointConfiguration != nil {\n\t\tv := s.EndpointConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"endpointConfiguration\", v, metadata)\n\t}\n\tif s.Id != nil {\n\t\tv := *s.Id\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"id\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MinimumCompressionSize != nil {\n\t\tv := *s.MinimumCompressionSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"minimumCompressionSize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Policy != nil {\n\t\tv := *s.Policy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"policy\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Warnings != nil {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s CreateCanaryInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ArtifactS3Location != nil {\n\t\tv := *s.ArtifactS3Location\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ArtifactS3Location\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Code != nil {\n\t\tv := s.Code\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Code\", v, metadata)\n\t}\n\tif s.ExecutionRoleArn != nil {\n\t\tv := *s.ExecutionRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ExecutionRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FailureRetentionPeriodInDays != nil {\n\t\tv := *s.FailureRetentionPeriodInDays\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FailureRetentionPeriodInDays\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RunConfig != nil {\n\t\tv := s.RunConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RunConfig\", v, metadata)\n\t}\n\tif s.RuntimeVersion != nil {\n\t\tv := *s.RuntimeVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RuntimeVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schedule != nil {\n\t\tv := s.Schedule\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Schedule\", v, metadata)\n\t}\n\tif s.SuccessRetentionPeriodInDays != nil {\n\t\tv := *s.SuccessRetentionPeriodInDays\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"SuccessRetentionPeriodInDays\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"Tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.VpcConfig != nil {\n\t\tv := s.VpcConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"VpcConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualGatewayRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualGatewayName != nil {\n\t\tv := *s.VirtualGatewayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualGatewayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAccessPointInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ClientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FileSystemId != nil {\n\t\tv := *s.FileSystemId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FileSystemId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PosixUser != nil {\n\t\tv := s.PosixUser\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"PosixUser\", v, metadata)\n\t}\n\tif s.RootDirectory != nil {\n\t\tv := s.RootDirectory\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RootDirectory\", v, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateRouteInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Api) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CorsConfiguration != nil {\n\t\tv := s.CorsConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"corsConfiguration\", v, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImportInfo != nil {\n\t\tv := s.ImportInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"importInfo\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Warnings != nil {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s OutputService15TestShapeItemDetailShape) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ID != nil {\n\t\tv := *s.ID\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ID\", protocol.StringValue(v), metadata)\n\t}\n\t// Skipping Type XML Attribute.\n\treturn nil\n}", "func (s CreateProxySessionInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Capabilities != nil {\n\t\tv := s.Capabilities\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Capabilities\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.ExpiryMinutes != nil {\n\t\tv := *s.ExpiryMinutes\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ExpiryMinutes\", protocol.Int64Value(v), metadata)\n\t}\n\tif len(s.GeoMatchLevel) > 0 {\n\t\tv := s.GeoMatchLevel\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"GeoMatchLevel\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.GeoMatchParams != nil {\n\t\tv := s.GeoMatchParams\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"GeoMatchParams\", v, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.NumberSelectionBehavior) > 0 {\n\t\tv := s.NumberSelectionBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"NumberSelectionBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.ParticipantPhoneNumbers != nil {\n\t\tv := s.ParticipantPhoneNumbers\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"ParticipantPhoneNumbers\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.VoiceConnectorId != nil {\n\t\tv := *s.VoiceConnectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"voiceConnectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AccessLogSettings != nil {\n\t\tv := s.AccessLogSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accessLogSettings\", v, metadata)\n\t}\n\tif s.ClientCertificateId != nil {\n\t\tv := *s.ClientCertificateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientCertificateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DefaultRouteSettings != nil {\n\t\tv := s.DefaultRouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"defaultRouteSettings\", v, metadata)\n\t}\n\tif s.DeploymentId != nil {\n\t\tv := *s.DeploymentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deploymentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RouteSettings) > 0 {\n\t\tv := s.RouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"routeSettings\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.StageVariables) > 0 {\n\t\tv := s.StageVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"stageVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualGatewaySpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s DescribeDetectorInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.DetectorModelName != nil {\n\t\tv := *s.DetectorModelName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorModelName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.KeyValue != nil {\n\t\tv := *s.KeyValue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"keyValue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateSignalingChannelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ChannelARN != nil {\n\t\tv := *s.ChannelARN\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ChannelARN\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CurrentVersion != nil {\n\t\tv := *s.CurrentVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CurrentVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SingleMasterConfiguration != nil {\n\t\tv := s.SingleMasterConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"SingleMasterConfiguration\", v, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiMappingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DomainName != nil {\n\t\tv := *s.DomainName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"domainName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateThingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AttributePayload != nil {\n\t\tv := s.AttributePayload\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"attributePayload\", v, metadata)\n\t}\n\tif s.BillingGroupName != nil {\n\t\tv := *s.BillingGroupName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingGroupName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ThingTypeName != nil {\n\t\tv := *s.ThingTypeName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"thingTypeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ThingName != nil {\n\t\tv := *s.ThingName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"thingName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func encodeSchema(w io.Writer, s *schema.Schema) (err error) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tew := errWriter{w: w}\n\tif s.Description != \"\" {\n\t\tew.writeFormat(`\"description\": %q, `, s.Description)\n\t}\n\tew.writeString(`\"type\": \"object\", `)\n\tew.writeString(`\"additionalProperties\": false, `)\n\tew.writeString(`\"properties\": {`)\n\tvar required []string\n\tvar notFirst bool\n\tfor _, key := range sortedFieldNames(s.Fields) {\n\t\tfield := s.Fields[key]\n\t\tif notFirst {\n\t\t\tew.writeString(\", \")\n\t\t}\n\t\tnotFirst = true\n\t\tif field.Required {\n\t\t\trequired = append(required, fmt.Sprintf(\"%q\", key))\n\t\t}\n\t\tew.err = encodeField(ew, key, field)\n\t\tif ew.err != nil {\n\t\t\treturn ew.err\n\t\t}\n\t}\n\tew.writeString(\"}\")\n\tif s.MinLen > 0 {\n\t\tew.writeFormat(`, \"minProperties\": %s`, strconv.FormatInt(int64(s.MinLen), 10))\n\t}\n\tif s.MaxLen > 0 {\n\t\tew.writeFormat(`, \"maxProperties\": %s`, strconv.FormatInt(int64(s.MaxLen), 10))\n\t}\n\n\tif len(required) > 0 {\n\t\tew.writeFormat(`, \"required\": [%s]`, strings.Join(required, \", \"))\n\t}\n\treturn ew.err\n}", "func (s OutputService15TestShapeItemShape) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ItemDetail != nil {\n\t\tv := s.ItemDetail\n\t\tattrs := make([]protocol.Attribute, 0, 1)\n\n\t\tif len(s.ItemDetail.Type) > 0 {\n\n\t\t\tv := s.ItemDetail.Type\n\t\t\tattrs = append(attrs, protocol.Attribute{Name: \"xsi:type\", Value: v, Meta: protocol.Metadata{}})\n\t\t}\n\t\tmetadata := protocol.Metadata{Attributes: attrs, XMLNamespacePrefix: \"xsi\", XMLNamespaceURI: \"http://www.w3.org/2001/XMLSchema-instance\"}\n\t\te.SetFields(protocol.BodyTarget, \"ItemDetail\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiMappingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DomainName != nil {\n\t\tv := *s.DomainName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"domainName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualServiceRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualServiceName != nil {\n\t\tv := *s.VirtualServiceName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualServiceName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIPSetInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Activate != nil {\n\t\tv := *s.Activate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"activate\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Location != nil {\n\t\tv := *s.Location\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"location\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DetectorId != nil {\n\t\tv := *s.DetectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IpSetId != nil {\n\t\tv := *s.IpSetId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"ipSetId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AccessLogSettings != nil {\n\t\tv := s.AccessLogSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accessLogSettings\", v, metadata)\n\t}\n\tif s.ClientCertificateId != nil {\n\t\tv := *s.ClientCertificateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientCertificateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DefaultRouteSettings != nil {\n\t\tv := s.DefaultRouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"defaultRouteSettings\", v, metadata)\n\t}\n\tif s.DeploymentId != nil {\n\t\tv := *s.DeploymentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deploymentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RouteSettings) > 0 {\n\t\tv := s.RouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"routeSettings\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StageVariables) > 0 {\n\t\tv := s.StageVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"stageVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetSigningPlatformInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PlatformId != nil {\n\t\tv := *s.PlatformId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"platformId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Robot) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Architecture) > 0 {\n\t\tv := s.Architecture\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"architecture\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.FleetArn != nil {\n\t\tv := *s.FleetArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"fleetArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.GreenGrassGroupId != nil {\n\t\tv := *s.GreenGrassGroupId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"greenGrassGroupId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastDeploymentJob != nil {\n\t\tv := *s.LastDeploymentJob\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastDeploymentJob\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastDeploymentTime != nil {\n\t\tv := *s.LastDeploymentTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastDeploymentTime\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Status) > 0 {\n\t\tv := s.Status\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"status\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIntegrationInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAliasInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FunctionVersion != nil {\n\t\tv := *s.FunctionVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FunctionVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RoutingConfig != nil {\n\t\tv := s.RoutingConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RoutingConfig\", v, metadata)\n\t}\n\tif s.FunctionName != nil {\n\t\tv := *s.FunctionName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"FunctionName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeInputDeviceOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionState) > 0 {\n\t\tv := s.ConnectionState\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionState\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.DeviceSettingsSyncState) > 0 {\n\t\tv := s.DeviceSettingsSyncState\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deviceSettingsSyncState\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.HdDeviceSettings != nil {\n\t\tv := s.HdDeviceSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"hdDeviceSettings\", v, metadata)\n\t}\n\tif s.Id != nil {\n\t\tv := *s.Id\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"id\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MacAddress != nil {\n\t\tv := *s.MacAddress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"macAddress\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.NetworkSettings != nil {\n\t\tv := s.NetworkSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"networkSettings\", v, metadata)\n\t}\n\tif s.SerialNumber != nil {\n\t\tv := *s.SerialNumber\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"serialNumber\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateImageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DistributionConfigurationArn != nil {\n\t\tv := *s.DistributionConfigurationArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"distributionConfigurationArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnhancedImageMetadataEnabled != nil {\n\t\tv := *s.EnhancedImageMetadataEnabled\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enhancedImageMetadataEnabled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImageRecipeArn != nil {\n\t\tv := *s.ImageRecipeArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"imageRecipeArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ImageTestsConfiguration != nil {\n\t\tv := s.ImageTestsConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"imageTestsConfiguration\", v, metadata)\n\t}\n\tif s.InfrastructureConfigurationArn != nil {\n\t\tv := *s.InfrastructureConfigurationArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"infrastructureConfigurationArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s VpcLink) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SecurityGroupIds != nil {\n\t\tv := s.SecurityGroupIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"securityGroupIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.SubnetIds != nil {\n\t\tv := s.SubnetIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"subnetIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.VpcLinkId != nil {\n\t\tv := *s.VpcLinkId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.VpcLinkStatus) > 0 {\n\t\tv := s.VpcLinkStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.VpcLinkStatusMessage != nil {\n\t\tv := *s.VpcLinkStatusMessage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkStatusMessage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.VpcLinkVersion) > 0 {\n\t\tv := s.VpcLinkVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkVersion\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.AuthorizationScopes != nil {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestModels != nil {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Integration) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.PayloadFormatVersion != nil {\n\t\tv := *s.PayloadFormatVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"payloadFormatVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestTemplates != nil {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.TlsConfig != nil {\n\t\tv := s.TlsConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"tlsConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateOTAUpdateInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AdditionalParameters != nil {\n\t\tv := s.AdditionalParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"additionalParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.AwsJobExecutionsRolloutConfig != nil {\n\t\tv := s.AwsJobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.AwsJobPresignedUrlConfig != nil {\n\t\tv := s.AwsJobPresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobPresignedUrlConfig\", v, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Files != nil {\n\t\tv := s.Files\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"files\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Protocols != nil {\n\t\tv := s.Protocols\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"protocols\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.RoleArn != nil {\n\t\tv := *s.RoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"roleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.OtaUpdateId != nil {\n\t\tv := *s.OtaUpdateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"otaUpdateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateIntegrationInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiKeyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Expires != nil {\n\t\tv := *s.Expires\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"expires\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.BackendEnvironmentArn != nil {\n\t\tv := *s.BackendEnvironmentArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"backendEnvironmentArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisplayName != nil {\n\t\tv := *s.DisplayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"displayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnablePullRequestPreview != nil {\n\t\tv := *s.EnablePullRequestPreview\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enablePullRequestPreview\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnvironmentVariables != nil {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PullRequestEnvironmentName != nil {\n\t\tv := *s.PullRequestEnvironmentName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"pullRequestEnvironmentName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateRouteResponseInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ResponseModels) > 0 {\n\t\tv := s.ResponseModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.ResponseParameters) > 0 {\n\t\tv := s.ResponseParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteResponseKey != nil {\n\t\tv := *s.RouteResponseKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseId != nil {\n\t\tv := *s.RouteResponseId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeResponseId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreatePackageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PackageDescription != nil {\n\t\tv := *s.PackageDescription\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageDescription\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PackageName != nil {\n\t\tv := *s.PackageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PackageSource != nil {\n\t\tv := s.PackageSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"PackageSource\", v, metadata)\n\t}\n\tif len(s.PackageType) > 0 {\n\t\tv := s.PackageType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdatePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\treturn nil\n}", "func (s OutputService11TestShapeOutputService11TestCaseOperation1Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-false-bool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Integer != nil {\n\t\tv := *s.Integer\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-int\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.RFC822TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-true-bool\", protocol.BoolValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteResponseInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ResponseModels) > 0 {\n\t\tv := s.ResponseModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.ResponseParameters) > 0 {\n\t\tv := s.ResponseParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteResponseKey != nil {\n\t\tv := *s.RouteResponseKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAppInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.CustomRules) > 0 {\n\t\tv := s.CustomRules\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"customRules\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBranchAutoBuild != nil {\n\t\tv := *s.EnableBranchAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBranchAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.IamServiceRoleArn != nil {\n\t\tv := *s.IamServiceRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"iamServiceRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OauthToken != nil {\n\t\tv := *s.OauthToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"oauthToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Repository != nil {\n\t\tv := *s.Repository\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"repository\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Tags) > 0 {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s GetApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.Tags) > 0 {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiMappingOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualNodeSpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Backends != nil {\n\t\tv := s.Backends\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"backends\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\tif s.ServiceDiscovery != nil {\n\t\tv := s.ServiceDiscovery\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"serviceDiscovery\", v, metadata)\n\t}\n\treturn nil\n}", "func (s Integration) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s NetworkPathComponent) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ComponentId != nil {\n\t\tv := *s.ComponentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ComponentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ComponentType != nil {\n\t\tv := *s.ComponentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ComponentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Egress != nil {\n\t\tv := s.Egress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Egress\", v, metadata)\n\t}\n\tif s.Ingress != nil {\n\t\tv := s.Ingress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Ingress\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService13TestShapeTimeContainer) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Bar != nil {\n\t\tv := *s.Bar\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"bar\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"unixTimestamp\", QuotedFormatTime: false}, metadata)\n\t}\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"foo\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\treturn nil\n}", "func (s ImportComponentInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ChangeDescription != nil {\n\t\tv := *s.ChangeDescription\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"changeDescription\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Data != nil {\n\t\tv := *s.Data\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"data\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Format) > 0 {\n\t\tv := s.Format\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"format\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.KmsKeyId != nil {\n\t\tv := *s.KmsKeyId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"kmsKeyId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.SemanticVersion != nil {\n\t\tv := *s.SemanticVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"semanticVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Uri != nil {\n\t\tv := *s.Uri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"uri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CodeReview) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CodeReviewArn != nil {\n\t\tv := *s.CodeReviewArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeReviewArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedTimeStamp != nil {\n\t\tv := *s.CreatedTimeStamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CreatedTimeStamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedTimeStamp != nil {\n\t\tv := *s.LastUpdatedTimeStamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdatedTimeStamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Metrics != nil {\n\t\tv := s.Metrics\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Metrics\", v, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Owner != nil {\n\t\tv := *s.Owner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Owner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProviderType) > 0 {\n\t\tv := s.ProviderType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProviderType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.PullRequestId != nil {\n\t\tv := *s.PullRequestId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PullRequestId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RepositoryName != nil {\n\t\tv := *s.RepositoryName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RepositoryName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SourceCodeType != nil {\n\t\tv := s.SourceCodeType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"SourceCodeType\", v, metadata)\n\t}\n\tif len(s.State) > 0 {\n\t\tv := s.State\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"State\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.StateReason != nil {\n\t\tv := *s.StateReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StateReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s PutObjectOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ContentSHA256 != nil {\n\t\tv := *s.ContentSHA256\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ContentSHA256\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ETag != nil {\n\t\tv := *s.ETag\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ETag\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StorageClass) > 0 {\n\t\tv := s.StorageClass\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StorageClass\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s GetRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService1TestShapeOutputService1TestCaseOperation1Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FalseBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Float64s != nil {\n\t\tv := s.Float64s\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Float64s\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.Float64Value(v1))\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Num != nil {\n\t\tv := *s.Num\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FooNum\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"TrueBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImaHeader != nil {\n\t\tv := *s.ImaHeader\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"ImaHeader\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ImaHeaderLocation != nil {\n\t\tv := *s.ImaHeaderLocation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"X-Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s GetStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateAppInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.CustomRules) > 0 {\n\t\tv := s.CustomRules\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"customRules\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBranchAutoBuild != nil {\n\t\tv := *s.EnableBranchAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBranchAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.IamServiceRoleArn != nil {\n\t\tv := *s.IamServiceRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"iamServiceRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService1TestShapeOutputService1TestCaseOperation2Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FalseBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Float64s != nil {\n\t\tv := s.Float64s\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Float64s\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.Float64Value(v1))\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Num != nil {\n\t\tv := *s.Num\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FooNum\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"TrueBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImaHeader != nil {\n\t\tv := *s.ImaHeader\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"ImaHeader\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ImaHeaderLocation != nil {\n\t\tv := *s.ImaHeaderLocation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"X-Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s GetIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s VirtualServiceBackend) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ClientPolicy != nil {\n\t\tv := s.ClientPolicy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"clientPolicy\", v, metadata)\n\t}\n\tif s.VirtualServiceName != nil {\n\t\tv := *s.VirtualServiceName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualServiceName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreatePolicyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PolicyDocument != nil {\n\t\tv := *s.PolicyDocument\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"policyDocument\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PolicyName != nil {\n\t\tv := *s.PolicyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"policyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBucketInput) MarshalFields(e protocol.FieldEncoder) error {\n\n\tif len(s.ACL) > 0 {\n\t\tv := s.ACL\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-acl\", v, metadata)\n\t}\n\tif s.GrantFullControl != nil {\n\t\tv := *s.GrantFullControl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-full-control\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantRead != nil {\n\t\tv := *s.GrantRead\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-read\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantReadACP != nil {\n\t\tv := *s.GrantReadACP\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-read-acp\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantWrite != nil {\n\t\tv := *s.GrantWrite\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-write\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantWriteACP != nil {\n\t\tv := *s.GrantWriteACP\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-write-acp\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ObjectLockEnabledForBucket != nil {\n\t\tv := *s.ObjectLockEnabledForBucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-bucket-object-lock-enabled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Bucket != nil {\n\t\tv := *s.Bucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"Bucket\", protocol.StringValue(v), metadata)\n\t}\n\tif s.CreateBucketConfiguration != nil {\n\t\tv := s.CreateBucketConfiguration\n\n\t\tmetadata := protocol.Metadata{XMLNamespaceURI: \"http://s3.amazonaws.com/doc/2006-03-01/\"}\n\t\te.SetFields(protocol.PayloadTarget, \"CreateBucketConfiguration\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiMappingOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Pipeline) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Activities) > 0 {\n\t\tv := s.Activities\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"activities\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreationTime != nil {\n\t\tv := *s.CreationTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"creationTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.LastUpdateTime != nil {\n\t\tv := *s.LastUpdateTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdateTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ReprocessingSummaries) > 0 {\n\t\tv := s.ReprocessingSummaries\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"reprocessingSummaries\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s VirtualNodeRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualNodeName != nil {\n\t\tv := *s.VirtualNodeName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualNodeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetIntrospectionSchemaInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Format) > 0 {\n\t\tv := s.Format\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"format\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IncludeDirectives != nil {\n\t\tv := *s.IncludeDirectives\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"includeDirectives\", protocol.BoolValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s Source) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Architecture) > 0 {\n\t\tv := s.Architecture\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"architecture\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Etag != nil {\n\t\tv := *s.Etag\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"etag\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.S3Bucket != nil {\n\t\tv := *s.S3Bucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"s3Bucket\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.S3Key != nil {\n\t\tv := *s.S3Key\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"s3Key\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OTAUpdateInfo) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AdditionalParameters != nil {\n\t\tv := s.AdditionalParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"additionalParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.AwsIotJobArn != nil {\n\t\tv := *s.AwsIotJobArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"awsIotJobArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AwsIotJobId != nil {\n\t\tv := *s.AwsIotJobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"awsIotJobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AwsJobExecutionsRolloutConfig != nil {\n\t\tv := s.AwsJobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.AwsJobPresignedUrlConfig != nil {\n\t\tv := s.AwsJobPresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobPresignedUrlConfig\", v, metadata)\n\t}\n\tif s.CreationDate != nil {\n\t\tv := *s.CreationDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"creationDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ErrorInfo != nil {\n\t\tv := s.ErrorInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"errorInfo\", v, metadata)\n\t}\n\tif s.LastModifiedDate != nil {\n\t\tv := *s.LastModifiedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastModifiedDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.OtaUpdateArn != nil {\n\t\tv := *s.OtaUpdateArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OtaUpdateFiles != nil {\n\t\tv := s.OtaUpdateFiles\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"otaUpdateFiles\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.OtaUpdateId != nil {\n\t\tv := *s.OtaUpdateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.OtaUpdateStatus) > 0 {\n\t\tv := s.OtaUpdateStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Protocols != nil {\n\t\tv := s.Protocols\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"protocols\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s CreateModelOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.HopDestinations != nil {\n\t\tv := s.HopDestinations\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"hopDestinations\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\t}\n\tif s.AccelerationSettings != nil {\n\t\tv := s.AccelerationSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accelerationSettings\", v, metadata)\n\t}\n\tif len(s.BillingTagsSource) > 0 {\n\t\tv := s.BillingTagsSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingTagsSource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tvar ClientRequestToken string\n\tif s.ClientRequestToken != nil {\n\t\tClientRequestToken = *s.ClientRequestToken\n\t} else {\n\t\tClientRequestToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientRequestToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientRequestToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobTemplate != nil {\n\t\tv := *s.JobTemplate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobTemplate\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Priority != nil {\n\t\tv := *s.Priority\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"priority\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Queue != nil {\n\t\tv := *s.Queue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"queue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Role != nil {\n\t\tv := *s.Role\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"role\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Settings != nil {\n\t\tv := s.Settings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"settings\", v, metadata)\n\t}\n\tif len(s.SimulateReservedQueue) > 0 {\n\t\tv := s.SimulateReservedQueue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"simulateReservedQueue\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.StatusUpdateInterval) > 0 {\n\t\tv := s.StatusUpdateInterval\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"statusUpdateInterval\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.UserMetadata != nil {\n\t\tv := s.UserMetadata\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"userMetadata\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateBrokerStorageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.CurrentVersion != nil {\n\t\tv := *s.CurrentVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"currentVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TargetBrokerEBSVolumeInfo != nil {\n\t\tv := s.TargetBrokerEBSVolumeInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targetBrokerEBSVolumeInfo\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.ClusterArn != nil {\n\t\tv := *s.ClusterArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"clusterArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CustomCodeSigning) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CertificateChain != nil {\n\t\tv := s.CertificateChain\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"certificateChain\", v, metadata)\n\t}\n\tif s.HashAlgorithm != nil {\n\t\tv := *s.HashAlgorithm\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"hashAlgorithm\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Signature != nil {\n\t\tv := s.Signature\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signature\", v, metadata)\n\t}\n\tif s.SignatureAlgorithm != nil {\n\t\tv := *s.SignatureAlgorithm\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"signatureAlgorithm\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetMacieSessionInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\treturn nil\n}", "func (s MeshRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s Resource) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Attributes) > 0 {\n\t\tv := s.Attributes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"attributes\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Feature != nil {\n\t\tv := *s.Feature\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"feature\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Type != nil {\n\t\tv := *s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GatewayRouteRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.GatewayRouteName != nil {\n\t\tv := *s.GatewayRouteName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"gatewayRouteName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualGatewayName != nil {\n\t\tv := *s.VirtualGatewayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualGatewayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s RouteRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteName != nil {\n\t\tv := *s.RouteName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualRouterName != nil {\n\t\tv := *s.VirtualRouterName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualRouterName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Pipeline != nil {\n\t\tv := s.Pipeline\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"pipeline\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Pipeline != nil {\n\t\tv := s.Pipeline\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"pipeline\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeDetectorModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.DetectorModelName != nil {\n\t\tv := *s.DetectorModelName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorModelName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DetectorModelVersion != nil {\n\t\tv := *s.DetectorModelVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s AttachPolicyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PolicyName != nil {\n\t\tv := *s.PolicyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"policyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetSigningPlatformOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Category) > 0 {\n\t\tv := s.Category\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"category\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.DisplayName != nil {\n\t\tv := *s.DisplayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"displayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MaxSizeInMB != nil {\n\t\tv := *s.MaxSizeInMB\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"maxSizeInMB\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Partner != nil {\n\t\tv := *s.Partner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"partner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PlatformId != nil {\n\t\tv := *s.PlatformId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platformId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SigningConfiguration != nil {\n\t\tv := s.SigningConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signingConfiguration\", v, metadata)\n\t}\n\tif s.SigningImageFormat != nil {\n\t\tv := s.SigningImageFormat\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signingImageFormat\", v, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (v *Service) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 7, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.Name); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 1, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.ThriftName); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.ParentID != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 4, Type: wire.TI32}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.ParentID.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 5, Type: wire.TList}); err != nil {\n\t\treturn err\n\t}\n\tif err := _List_Function_Encode(v.Functions, sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 6, Type: wire.TI32}); err != nil {\n\t\treturn err\n\t}\n\tif err := v.ModuleID.Encode(sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.Annotations != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 8, Type: wire.TMap}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := _Map_String_String_Encode(v.Annotations, sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "func (s Product) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ActivationUrl != nil {\n\t\tv := *s.ActivationUrl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ActivationUrl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Categories != nil {\n\t\tv := s.Categories\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Categories\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.CompanyName != nil {\n\t\tv := *s.CompanyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CompanyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationTypes != nil {\n\t\tv := s.IntegrationTypes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"IntegrationTypes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.MarketplaceUrl != nil {\n\t\tv := *s.MarketplaceUrl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MarketplaceUrl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductArn != nil {\n\t\tv := *s.ProductArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductName != nil {\n\t\tv := *s.ProductName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductSubscriptionResourcePolicy != nil {\n\t\tv := *s.ProductSubscriptionResourcePolicy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductSubscriptionResourcePolicy\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s AwsLambdaFunctionLayer) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CodeSize != nil {\n\t\tv := *s.CodeSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeSize\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s HttpAuthorization) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Sigv4 != nil {\n\t\tv := s.Sigv4\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"sigv4\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PipelineName != nil {\n\t\tv := *s.PipelineName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"pipelineName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}" ]
[ "0.63479704", "0.6247795", "0.6195172", "0.61806905", "0.61683095", "0.61200184", "0.6099719", "0.6069325", "0.6064495", "0.60124534", "0.6009836", "0.59606516", "0.59526104", "0.5935234", "0.5928707", "0.59224474", "0.5910614", "0.59063023", "0.58990103", "0.5884719", "0.58780664", "0.58758223", "0.58740026", "0.5870821", "0.58704156", "0.58583754", "0.5851065", "0.5847724", "0.584571", "0.5837251", "0.583382", "0.5820691", "0.5819901", "0.58150333", "0.5815032", "0.58111376", "0.5804827", "0.5802364", "0.5802092", "0.578717", "0.5780785", "0.57765293", "0.5774867", "0.5773512", "0.57650334", "0.5761848", "0.5759235", "0.5758919", "0.5750916", "0.5749155", "0.5746686", "0.574606", "0.5741942", "0.5740067", "0.57334137", "0.57329136", "0.57288283", "0.5720536", "0.57199717", "0.5709697", "0.57026315", "0.57025653", "0.57011104", "0.56961375", "0.5694661", "0.569277", "0.56852376", "0.568256", "0.5678673", "0.56765914", "0.5675795", "0.56648564", "0.5661323", "0.5660928", "0.56593114", "0.5657954", "0.56536096", "0.565022", "0.5645035", "0.56447184", "0.56441903", "0.56391424", "0.56385344", "0.56368303", "0.5635701", "0.5635248", "0.56317043", "0.5625839", "0.5625511", "0.56232154", "0.56216896", "0.56216896", "0.56204224", "0.5616888", "0.56134516", "0.5612859", "0.56120634", "0.56104606", "0.56095034", "0.56052333", "0.5590397" ]
0.0
-1
Send marshals and sends the StartMonitoringMembers API request.
func (r StartMonitoringMembersRequest) Send(ctx context.Context) (*StartMonitoringMembersResponse, error) { r.Request.SetContext(ctx) err := r.Request.Send() if err != nil { return nil, err } resp := &StartMonitoringMembersResponse{ StartMonitoringMembersOutput: r.Request.Data.(*StartMonitoringMembersOutput), response: &aws.Response{Request: r.Request}, } return resp, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r CreateMembersRequest) Send(ctx context.Context) (*CreateMembersResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateMembersResponse{\n\t\tCreateMembersOutput: r.Request.Data.(*CreateMembersOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (r CreateMembersRequest) Send(ctx context.Context) (*CreateMembersResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateMembersResponse{\n\t\tCreateMembersOutput: r.Request.Data.(*CreateMembersOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (o *AssignUserToCustomerGroupUsingPATCH1Params) SetMembers(members *models.MemberListWsDTO) {\n\to.Members = members\n}", "func (s StartMonitoringMembersInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AccountIds != nil {\n\t\tv := s.AccountIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"accountIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.DetectorId != nil {\n\t\tv := *s.DetectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (g *Group) RequestMemberlist() {\n\tjoinMessage := &JoinMessage{\n\t\tMember: g.Member().ProtoMember(),\n\t}\n\tprotoJoinMessage, _ := ptypes.MarshalAny(joinMessage)\n\n\tm := &Message{\n\t\tMessageType: Message_INTRO_SYN,\n\t\tAddr: g.Addr,\n\t\tDetails: protoJoinMessage,\n\t}\n\n\t// Only join using primary coordinator\n\tfor _, coordinator := range g.coordinators {\n\t\tif coordinator == g.Addr {\n\t\t\tcontinue\n\t\t}\n\t\tSendTCP(coordinator, m)\n\t}\n}", "func (uc *UserMember) Start() {\n\n\tvar err error\n\n\tmn := &uc.MemberMaker\n\terr = mn.OpenAcc() // runs the node, opening acceptors\n\tif err == nil {\n\t\tgo func() {\n\t\t\tvar (\n\t\t\t\tcnx *xt.TcpConnection\n\t\t\t\tversion1 uint32\n\t\t\t\tversion2 uint32\n\t\t\t)\n\t\t\tcnx, version2, err = mn.SessionSetup(version1)\n\t\t\tif cnx != nil {\n\t\t\t\tdefer cnx.Close()\n\t\t\t}\n\t\t\t_ = version2 // not yet used\n\t\t\tif err == nil {\n\t\t\t\terr = mn.MemberAndOK()\n\t\t\t}\n\t\t\t// XXX MODIFY TO USE CLUSTER_ID PASSED TO UserMember\n\t\t\t// 2013-10-12 this is a join by cluster name\n\t\t\tif err == nil {\n\t\t\t\terr = mn.JoinAndReply()\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\terr = mn.GetAndMembers()\n\t\t\t}\n\t\t\t// DEBUG ====================================================\n\t\t\tvar nilMembers []int\n\t\t\tfor i := 0; i < len(uc.Members); i++ {\n\t\t\t\tif uc.Members[i] == nil {\n\t\t\t\t\tnilMembers = append(nilMembers, i)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(nilMembers) > 0 {\n\t\t\t\tfmt.Printf(\"UserMember.Start() after Get finds nil members: %v\\n\",\n\t\t\t\t\tnilMembers)\n\t\t\t}\n\t\t\t// END ======================================================\n\n\t\t\tif err == nil {\n\t\t\t\terr = mn.ByeAndAck()\n\t\t\t}\n\n\t\t\tmn.DoneCh <- err\n\t\t}()\n\t} else {\n\t\tmn.DoneCh <- err\n\t}\n}", "func (o *DashboardAllOfLinks) SetMembers(v string) {\n\to.Members = &v\n}", "func (cm *ClusterMember) Start() error {\n\tif cm.started {\n\t\treturn ErrClusterJoined\n\t}\n\n\tcm.started = true\n\n\tmi, err := cm.cmKVS.RegisterAgent(cm.name)\n\tif err != nil {\n\t\treturn &RegisterError{err: err, name: cm.name}\n\t}\n\n\tcm.modifiedIndex = mi\n\n\tgo cm.schedule(cm, \"poll\", poll, time.Second)\n\tgo cm.schedule(cm, \"refresh\", refresh, cm.cmKVS.CheckTTL/2)\n\n\treturn nil\n}", "func (b *ChatRequestBuilder) Members() *ChatMembersCollectionRequestBuilder {\n\tbb := &ChatMembersCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/members\"\n\treturn bb\n}", "func (o *PostAPI24PoliciesNfsMembersParams) SetMembers(members *models.PolicyMemberExportPost) {\n\to.Members = members\n}", "func (s *Status) Members(args *structs.GenericRequest, reply *structs.ServerMembersResponse) error {\n\t// Check node read permissions\n\tif aclObj, err := s.srv.ResolveToken(args.AuthToken); err != nil {\n\t\treturn err\n\t} else if aclObj != nil && !aclObj.AllowNodeRead() {\n\t\treturn structs.ErrPermissionDenied\n\t}\n\n\tserfMembers := s.srv.Members()\n\tmembers := make([]*structs.ServerMember, len(serfMembers))\n\tfor i, mem := range serfMembers {\n\t\tmembers[i] = &structs.ServerMember{\n\t\t\tName: mem.Name,\n\t\t\tAddr: mem.Addr,\n\t\t\tPort: mem.Port,\n\t\t\tTags: mem.Tags,\n\t\t\tStatus: mem.Status.String(),\n\t\t\tProtocolMin: mem.ProtocolMin,\n\t\t\tProtocolMax: mem.ProtocolMax,\n\t\t\tProtocolCur: mem.ProtocolCur,\n\t\t\tDelegateMin: mem.DelegateMin,\n\t\t\tDelegateMax: mem.DelegateMax,\n\t\t\tDelegateCur: mem.DelegateCur,\n\t\t}\n\t}\n\t*reply = structs.ServerMembersResponse{\n\t\tServerName: s.srv.config.NodeName,\n\t\tServerRegion: s.srv.config.Region,\n\t\tServerDC: s.srv.config.Datacenter,\n\t\tMembers: members,\n\t}\n\treturn nil\n}", "func (s *ListMembersOutput) SetMembers(v []*MemberSummary) *ListMembersOutput {\n\ts.Members = v\n\treturn s\n}", "func (o *UpdateRole) SetMembers(v []int32) {\n\to.Members = &v\n}", "func (a *Agent) Start() error {\n\tgo func() {\n\t\tfor range a.peerUpdateChan {\n\t\t\tif err := a.members.UpdateNode(nodeUpdateTimeout); err != nil {\n\t\t\t\tlogrus.Errorf(\"error updating node metadata: %s\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tif len(a.config.Peers) > 0 {\n\t\tif _, err := a.members.Join(a.config.Peers); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (m *TeamItemRequestBuilder) Members()(*ifc923048eff969ef232f17cdaf6c11e18676c5c9e2918bc19001d30cbdc4b5c1.MembersRequestBuilder) {\n return ifc923048eff969ef232f17cdaf6c11e18676c5c9e2918bc19001d30cbdc4b5c1.NewMembersRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (r CreateMemberRequest) Send(ctx context.Context) (*CreateMemberResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateMemberResponse{\n\t\tCreateMemberOutput: r.Request.Data.(*CreateMemberOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (e *PrometheusExporter) Start() {\n\tpromoExposer := NewPrometheusHTTPExposer(e)\n\tgo promoExposer.Run(PromoHTTPEndpoint, PromoHTTPPort)\n}", "func AllMembers(req routes.MemberRequest) {\n\tdefer connLog.FuncTimeTrack(time.Now(), \"Member All Full Function\")\n\tfmt.Println(\"Endpoint Hit: All Members Endpoint\")\n\n\t//TODO: Run Debug, gets stuck in infinite loop\n\t//connLog.ConnTimeGet(config.MemberAll)\n\tresults, err := config.SqlDb().Query(\"SELECT * FROM members\")\n\tdefer config.SqlDb().Close()\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tbuildResponse(results, err)\n}", "func (r *SpacesMembersService) List(parent string) *SpacesMembersListCall {\n\tc := &SpacesMembersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\treturn c\n}", "func (mem *Member) joinRequest() {\n\tSend(Configuration.Service.introducerIP+\":\"+fmt.Sprint(Configuration.Service.port), JoinMsg, nil)\n}", "func (s StartMonitoringMembersInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func ExampleMembersClient_BeginCreate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armblockchain.NewMembersClient(\"51766542-3ed7-4a72-a187-0c8ab644ddab\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := client.BeginCreate(ctx,\n\t\t\"contosemember1\",\n\t\t\"mygroup\",\n\t\t&armblockchain.MembersClientBeginCreateOptions{BlockchainMember: &armblockchain.Member{\n\t\t\tLocation: to.Ptr(\"southeastasia\"),\n\t\t\tProperties: &armblockchain.MemberProperties{\n\t\t\t\tConsortium: to.Ptr(\"ContoseConsortium\"),\n\t\t\t\tConsortiumManagementAccountPassword: to.Ptr(\"<consortiumManagementAccountPassword>\"),\n\t\t\t\tPassword: to.Ptr(\"<password>\"),\n\t\t\t\tValidatorNodesSKU: &armblockchain.MemberNodesSKU{\n\t\t\t\t\tCapacity: to.Ptr[int32](2),\n\t\t\t\t},\n\t\t\t\tProtocol: to.Ptr(armblockchain.BlockchainProtocolQuorum),\n\t\t\t},\n\t\t},\n\t\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// TODO: use response item\n\t_ = res\n}", "func (m *ChatItemRequestBuilder) Members()(*i29250e65d3047fac7086c3da168f1dc97b716e57040772b108c92eb7e41e5082.MembersRequestBuilder) {\n return i29250e65d3047fac7086c3da168f1dc97b716e57040772b108c92eb7e41e5082.NewMembersRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (s CreateMembersInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Accounts != nil {\n\t\tv := s.Accounts\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Accounts\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.GraphArn != nil {\n\t\tv := *s.GraphArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"GraphArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Message != nil {\n\t\tv := *s.Message\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Message\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s StartMonitoringMembersOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.UnprocessedAccounts != nil {\n\t\tv := s.UnprocessedAccounts\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"unprocessedAccounts\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func startGroupMember(f HarvestTriggerFunc, trigger chan HarvestType) chan bool {\n\tcancel := make(chan bool)\n\tgo f(trigger, cancel)\n\treturn cancel\n}", "func (r *Registration) Start(ctx context.Context) {\n\tif r.apiKey != \"\" {\n\t\tctx = metadata.AppendToOutgoingContext(ctx, auth.APIKeyHeader, r.apiKey)\n\t}\n\n\tgo func() {\n\t\tr.maintainRegistrationAndStreamWork(ctx)\n\t}()\n}", "func (r *ProjectsGroupsMembersService) List(name string) *ProjectsGroupsMembersListCall {\n\tc := &ProjectsGroupsMembersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (m Member) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tpopulate(objectMap, \"id\", m.ID)\n\tpopulate(objectMap, \"location\", m.Location)\n\tpopulate(objectMap, \"name\", m.Name)\n\tpopulate(objectMap, \"properties\", m.Properties)\n\tpopulate(objectMap, \"sku\", m.SKU)\n\tpopulate(objectMap, \"tags\", m.Tags)\n\tpopulate(objectMap, \"type\", m.Type)\n\treturn json.Marshal(objectMap)\n}", "func (o *AssignUserToCustomerGroupUsingPATCH1Params) WithMembers(members *models.MemberListWsDTO) *AssignUserToCustomerGroupUsingPATCH1Params {\n\to.SetMembers(members)\n\treturn o\n}", "func (c *Client) StartMonitoringGroups(ctx context.Context, partnerID string, groupIDs []string, taskID gocql.UUID) error {\n\tfor _, id := range groupIDs {\n\t\tpayload := m.MonitoringDG{\n\t\t\tPartnerID: partnerID,\n\t\t\tDynamicGroupID: id,\n\t\t\tServiceID: TaskingServiceIDPrefix + taskID.String(),\n\t\t\tOperation: MessageTypeDynamicGroupStartMonitoring,\n\t\t}\n\n\t\tif err := c.client.Push(ctx, payload); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func ExampleSyncMembersClient_BeginRefreshMemberSchema() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armsql.NewSyncMembersClient(\"00000000-1111-2222-3333-444444444444\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := client.BeginRefreshMemberSchema(ctx,\n\t\t\"syncgroupcrud-65440\",\n\t\t\"syncgroupcrud-8475\",\n\t\t\"syncgroupcrud-4328\",\n\t\t\"syncgroupcrud-3187\",\n\t\t\"syncgroupcrud-4879\",\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t_, err = poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n}", "func (c *Client) NewListMemberRequest(ctx context.Context, path string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"https\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.JWTSigner != nil {\n\t\tif err := c.JWTSigner.Sign(req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn req, nil\n}", "func (a *Agent) Start() error {\n\tgo func() {\n\t\tfor range a.peerUpdateChan {\n\t\t\tif err := a.members.UpdateNode(nodeUpdateTimeout); err != nil {\n\t\t\t\tlogrus.Errorf(\"error updating node metadata: %s\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tif len(a.config.Peers) > 0 {\n\t\tdoneCh := make(chan bool)\n\t\tgo func() {\n\t\t\ttimeout := time.Now().Add(a.config.LeaderPromotionTimeout)\n\t\t\tfor {\n\t\t\t\tif time.Now().After(timeout) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif _, err := a.members.Join(a.config.Peers); err != nil {\n\t\t\t\t\tlogrus.WithError(err).Warn(\"unable to join\")\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdoneCh <- true\n\t\t}()\n\n\t\tselect {\n\t\tcase <-time.After(a.config.LeaderPromotionTimeout):\n\t\t\tlogrus.Infof(\"timeout (%s) trying to join peers; self-electing as leader\", a.config.LeaderPromotionTimeout)\n\t\tcase <-doneCh:\n\t\t\tlogrus.Infof(\"joined peers %s\", a.config.Peers)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *RequestGuildMembersCommand) MarshalJSON() ([]byte, error) {\n\ttype raw RequestGuildMembersCommand\n\n\tif c.UserIDs != nil && c.Query != nil {\n\t\treturn nil, errors.New(\"neither UserIDs nor Query can be filled\")\n\t}\n\n\tvar marshaling interface{} = (*raw)(c)\n\tif c.Query != nil {\n\t\t// Force the Limit field to be present if Query is present.\n\t\tmarshaling = struct {\n\t\t\t*raw\n\t\t\tLimit uint `json:\"limit\"`\n\t\t}{\n\t\t\traw: (*raw)(c),\n\t\t\tLimit: c.Limit,\n\t\t}\n\t}\n\n\treturn json.Marshal(marshaling)\n}", "func (m *DeviceRequestBuilder) GetMemberObjects()(*i07bc89ff8c55d06b076d4f0a8ad636a90ed70691edcc9301ad5f2b5508af5d88.GetMemberObjectsRequestBuilder) {\n return i07bc89ff8c55d06b076d4f0a8ad636a90ed70691edcc9301ad5f2b5508af5d88.NewGetMemberObjectsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func SearchMember(w http.ResponseWriter, r *http.Request) {\n\tvar data []SearchRequest\n var payload ResponseValues\n\n enc := json.NewEncoder(w)\n enc.SetIndent(\"\", \" \")\n\n // Decodes the request body\n\terr := json.NewDecoder(r.Body).Decode(&data)\n\tif err != nil {\n writeError(w, &enc, &payload, http.StatusBadRequest, \"Invalid request data format\")\n return\n\t}\n\n // Validates the incoming request data\n validated := validateRequestData(data)\n if !validated {\n writeError(w, &enc, &payload, http.StatusBadRequest, \"Invalid request data format\")\n return\n }\n\n // Uses request body data to build the elastic query body\n\telasticQueryBody, err := buildElasticQuery(data)\n\tif err != nil {\n writeError(w, &enc, &payload, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n // Queries the elastic service\n payload, err = elasticService.QueryElasticService(elasticQueryBody)\n if err != nil {\n writeError(w, &enc, &payload, http.StatusBadRequest, payload.Error)\n return\n }\n\n w.Header().Set(\"Content-Type\", \"application/json\")\n w.WriteHeader(http.StatusOK)\n enc.Encode(payload)\n\n\treturn\n}", "func (s CreateMembersOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Members != nil {\n\t\tv := s.Members\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Members\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.UnprocessedAccounts != nil {\n\t\tv := s.UnprocessedAccounts\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"UnprocessedAccounts\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func ExampleMembers() {\n\ta := fixture.ReplicaSets[0].Members\n\tconst x = \"myReplicaSet_2\"\n\ti, found := search.Members(a, func(m opsmngr.Member) bool { return m.Host == x })\n\tif i < len(a) && found {\n\t\tfmt.Printf(\"found %v at index %d\\n\", x, i)\n\t} else {\n\t\tfmt.Printf(\"%s not found\\n\", x)\n\t}\n\t// Output:\n\t// found myReplicaSet_2 at index 1\n}", "func (s Serf) Start() error {\n\teventCh := make(chan serf.Event, 64)\n\n\t// Get the bind address\n\tbindAddr := os.Getenv(\"SERVUS_BIND_ADDRESS\")\n\tif len(bindAddr) == 0 {\n\t\tbindAddr = defaultBindAddr + \":\" + defaultBindPort\n\t}\n\taddr, strPort, err := net.SplitHostPort(bindAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(strPort) == 0 {\n\t\tstrPort = defaultBindPort\n\t}\n\tport, err := strconv.Atoi(strPort)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconf := serf.DefaultConfig()\n\t// Get tags from config settigns\n\tconf.Tags = map[string]string{\n\t\t\"role\": \"servus\",\n\t\t\"tag1\": \"foo\",\n\t\t\"tag2\": \"bar\",\n\t}\n\n\tconf.Init()\n\n\t// Get these parameters from config settings\n\tconf.MemberlistConfig.BindAddr = addr\n\tconf.MemberlistConfig.BindPort = port\n\tconf.NodeName = bindAddr\n\n\tconf.EventCh = eventCh\n\ts.eventCh = eventCh\n\n\tsrf, err := serf.Create(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.serf = srf\n\n\t// Join, move to func\n\t// Get members in the clusters to join\n\tleader := os.Getenv(\"SERVUS_LEADER_ADDRESS\")\n\tinitMembers := []string{leader}\n\n\tif len(leader) != 0 {\n\t\ts.initMembers = initMembers\n\t\tnum, err := s.serf.Join(s.initMembers, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Log?:\n\t\tfmt.Printf(\"Node join to the cluster with %d nodes\", num)\n\t} else {\n\t\t// Log?:\n\t\tfmt.Print(\"First node in the cluster\\n\")\n\t}\n\n\tgo s.serfEventHandlerLoop()\n\n\treturn nil\n}", "func (client ConversationsClient) GetActivityMembersMethodSender(req *http.Request) (*http.Response, error) {\n sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n return autorest.SendWithSender(client, req, sd...)\n }", "func (i *Info) Start() {\n\ti.Parent = i.Port\n\ti.Children = make(map[string]bool)\n\ti.ExpectedMsg = len(i.Neighbours)\n\n\tfor _, neighbor := range i.Neighbours {\n\t\tmsgOut := Models.Message{\n\t\t\tSource: i.Port,\n\t\t\tIntent: constants.IntentSendGo,\n\t\t\tData: \"Some starting message\",\n\t\t}\n\n\t\tif err := i.SendMsg(msgOut, neighbor); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n}", "func (_AuthContract *AuthContractCaller) Members(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _AuthContract.contract.Call(opts, out, \"members\", arg0)\n\treturn *ret0, err\n}", "func (k Keeper) SetMembers(ctx sdk.Context, Members types.Members) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MembersKey))\n\tb := k.cdc.MustMarshalBinaryBare(&Members)\n\tstore.Set(GetMembersIDBytes(Members.Id), b)\n}", "func (m *Group) SetMembers(value []DirectoryObjectable)() {\n m.members = value\n}", "func (s CreateMembersInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AccountDetails != nil {\n\t\tv := s.AccountDetails\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"AccountDetails\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (m *AdministrativeUnit) SetMembers(value []DirectoryObjectable)() {\n m.members = value\n}", "func (c *Client) ClanMembers(tag string, params url.Values) (members ClanMemberList, err error) {\n\tvar b []byte\n\tpath := \"/clans/%23\" + strings.ToUpper(tag) + \"/members\"\n\tif b, err = c.get(path, params); err == nil {\n\t\terr = json.Unmarshal(b, &members)\n\t}\n\treturn\n}", "func (c *Client) NewListIDMemberRequest(ctx context.Context, path string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"https\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.JWTSigner != nil {\n\t\tif err := c.JWTSigner.Sign(req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn req, nil\n}", "func Start(numInstances int) error {\n\t// Ideally we should let the socket choose the port, but then\n\t// some things like the logger will not be set correctly.\n\tvar peers []gubernator.PeerInfo\n\tport := 1111\n\tfor i := 0; i < numInstances; i++ {\n\t\tpeers = append(peers, gubernator.PeerInfo{\n\t\t\tHTTPAddress: fmt.Sprintf(\"localhost:%d\", port),\n\t\t\tGRPCAddress: fmt.Sprintf(\"localhost:%d\", port+1),\n\t\t})\n\t\tport += 2\n\t}\n\treturn StartWith(peers)\n}", "func (s *BaselimboListener) EnterMod_member_list(ctx *Mod_member_listContext) {}", "func startChat(arguments []string) bool {\n\n\t// Get rid off warning\n\t_ = arguments\n\n\tif conf == nil {\n\t\tdisplayError(\"/chatjoin: could not start listener without memberlist configuration\")\n\t\treturn false\n\t}\n\n\tif mlist == nil {\n\t\tdisplayError(\"/chatjoin: could not start listener without created memberlist\")\n\t\treturn false\n\t}\n\n\tif broadcasts == nil {\n\t\tdisplayError(\"/chatjoin: could not start listener without broadcasting memberlist\")\n\t}\n\n\tchatStop = false\n\n\tgo func() {\n\n\t\t// listen for TCP connections on localhost\n\t\tlistener, err := net.Listen(\"tcp\", \"localhost:0\")\n\t\tif err != nil {\n\t\t\tdisplayError(\"could not listen to localhost:0\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer listener.Close()\n\n\t\tdisplayText(strings.Trim(fmt.Sprintf(\"listener.Addr(): %v\\n%s\", listener.Addr(),\n\t\t\tprompt), \"\\n\"))\n\n\t\tchatSelf.MsgType = chatmember.Member_JOIN\n\t\tchatSelf.Name = conf.Name\n\t\tchatSelf.Sender = listener.Addr().String()\n\n\t\t// Set timestamp for joined peer\n\t\tjointime := ptypes.TimestampNow()\n\t\tchatSelf.Timestamp = jointime\n\n\t\t//fmt.Printf(\"chatSelf: %v\\n\", chatSelf)\n\t\t//fmt.Printf(\"chatSelf.MsgType: %v\\n\", chatSelf.MsgType)\n\n\t\terr = joiningChat(chatSelf)\n\t\tif err != nil {\n\t\t\tdisplayError(\"could not join the chat\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// wait for connections\n\t\tfor {\n\t\t\t// accept connection\n\t\t\tconn, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\tdisplayError(\"could not accept connection\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif chatStop {\n\t\t\t\t_ = conn.Close()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// create a goroutine for connection\n\t\t\tgo func(conn net.Conn) {\n\n\t\t\t\t// read and print the message\n\t\t\t\tmsg, err := bufio.NewReader(conn).ReadString('\\n')\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == io.EOF {\n\t\t\t\t\t\t_ = conn.Close()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tdisplayError(\"could not read message\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif strings.Fields(msg)[0] == \"<left>\" {\n\t\t\t\t\tif bootstrapApi == nil {\n\t\t\t\t\t\tinitializeBootstrapApi()\n\t\t\t\t\t\tif bootstrapApi != nil {\n\t\t\t\t\t\t\tbootstrapData = bootstrapApi.Refill()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tdisplayColoredMessages(msg)\n\n\t\t\t\t// close connection\n\t\t\t\t_ = conn.Close()\n\t\t\t}(conn)\n\t\t}\n\t}()\n\treturn true\n}", "func (m MemberProperties) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tpopulate(objectMap, \"consortium\", m.Consortium)\n\tpopulate(objectMap, \"consortiumManagementAccountAddress\", m.ConsortiumManagementAccountAddress)\n\tpopulate(objectMap, \"consortiumManagementAccountPassword\", m.ConsortiumManagementAccountPassword)\n\tpopulate(objectMap, \"consortiumMemberDisplayName\", m.ConsortiumMemberDisplayName)\n\tpopulate(objectMap, \"consortiumRole\", m.ConsortiumRole)\n\tpopulate(objectMap, \"dns\", m.DNS)\n\tpopulate(objectMap, \"firewallRules\", m.FirewallRules)\n\tpopulate(objectMap, \"password\", m.Password)\n\tpopulate(objectMap, \"protocol\", m.Protocol)\n\tpopulate(objectMap, \"provisioningState\", m.ProvisioningState)\n\tpopulate(objectMap, \"publicKey\", m.PublicKey)\n\tpopulate(objectMap, \"rootContractAddress\", m.RootContractAddress)\n\tpopulate(objectMap, \"userName\", m.UserName)\n\tpopulate(objectMap, \"validatorNodesSku\", m.ValidatorNodesSKU)\n\treturn json.Marshal(objectMap)\n}", "func (m *AdministrativeUnitsAdministrativeUnitItemRequestBuilder) Members()(*AdministrativeUnitsItemMembersRequestBuilder) {\n return NewAdministrativeUnitsItemMembersRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func (s StartMonitoringMembersOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (m *DeviceRequestBuilder) CheckMemberObjects()(*i553b650c39e50880eca4870435aea1e0efeb032bd90fd0433cb73afdaee8deea.CheckMemberObjectsRequestBuilder) {\n return i553b650c39e50880eca4870435aea1e0efeb032bd90fd0433cb73afdaee8deea.NewCheckMemberObjectsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (_BondedECDSAKeep *BondedECDSAKeepCaller) Members(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _BondedECDSAKeep.contract.Call(opts, out, \"members\", arg0)\n\treturn *ret0, err\n}", "func (m *Value) WithMembers(v map[string]xmlrpc.Value) *Value {\n\tm.KindMock = func() xmlrpc.Kind { return xmlrpc.Struct }\n\tm.MembersMock = func() []xmlrpc.Member { return m.membersFromMap(v) }\n\treturn m\n}", "func ListMembers(client *clientv3.Client) (*clientv3.MemberListResponse, error) {\n\tctx, cancel := context.WithTimeout(client.Ctx(), DefaultRequestTimeout)\n\tdefer cancel()\n\treturn client.MemberList(ctx)\n}", "func (s StaticMember) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"etag\", s.Etag)\n\tpopulate(objectMap, \"id\", s.ID)\n\tpopulate(objectMap, \"name\", s.Name)\n\tpopulate(objectMap, \"properties\", s.Properties)\n\tpopulate(objectMap, \"systemData\", s.SystemData)\n\tpopulate(objectMap, \"type\", s.Type)\n\treturn json.Marshal(objectMap)\n}", "func (s *BaselimboListener) EnterAdt_member_list(ctx *Adt_member_listContext) {}", "func StartPrometheusExporter(ctx context.Context, promAddr string, getRsc ResourceAvailable, update time.Duration, logger *log.Logger) {\n\n\tgo monitoringExporter(ctx, getRsc, update, logger)\n\n\t// start the prometheus http server for metrics\n\tgo func() {\n\t\tif err := runPrometheus(ctx, promAddr, logger); err != nil {\n\t\t\tlogger.Warn(fmt.Sprint(err, stack.Trace().TrimRuntime()))\n\t\t}\n\t}()\n\n}", "func (a *Agent) WANMembers() []serf.Member {\n\tif srv, ok := a.delegate.(*consul.Server); ok {\n\t\treturn srv.WANMembers()\n\t}\n\treturn nil\n}", "func (s Member) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AccountId != nil {\n\t\tv := *s.AccountId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"AccountId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Email != nil {\n\t\tv := *s.Email\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Email\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.InvitedAt != nil {\n\t\tv := *s.InvitedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"InvitedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MasterId != nil {\n\t\tv := *s.MasterId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MasterId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MemberStatus != nil {\n\t\tv := *s.MemberStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MemberStatus\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.UpdatedAt != nil {\n\t\tv := *s.UpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"UpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\treturn nil\n}", "func CreateListClusterMembersRequest() (request *ListClusterMembersRequest) {\n\trequest = &ListClusterMembersRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Edas\", \"2017-08-01\", \"ListClusterMembers\", \"/pop/v5/resource/cluster_member_list\", \"Edas\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (m *Memberlist) Setup(t *Toystore) {\n\tmemberConfig := memberlist.DefaultLocalConfig()\n\tmemberConfig.BindAddr = t.Host\n\tmemberConfig.Name = t.Host\n\t// Set IndirectChecks to 0 so we see a local view of membership.\n\t// I.e. we don't care about nodes hidden by partitions.\n\tmemberConfig.IndirectChecks = 0\n\t// This is set really low for testing purposes. Should be ~100ms.\n\tmemberConfig.GossipInterval = time.Millisecond * 20\n\t// Sets delegate to handle membership change events.\n\tmemberConfig.Events = &MemberlistEvents{t}\n\n\tlist, err := memberlist.Create(memberConfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tm.list = list\n\tn := m.list.LocalNode()\n\tn.Meta = []byte(t.rpcAddress())\n\n\tif err != nil {\n\t\tpanic(\"Failed to create memberlist: \" + err.Error())\n\t}\n}", "func (c Routes) StartGin() {\n\tr := gin.Default()\n\tapi := r.Group(\"/api\")\n\t{\n\t\tapi.GET(\"/\", welcome)\n\t\tapi.GET(\"/users\", user.GetAllUsers)\n\t\tapi.POST(\"/users\", user.CreateUser)\n\t}\n\tr.Run(\":8000\")\n}", "func (o *DeleteAPI24PoliciesSmbMembersParams) SetMemberNames(memberNames []string) {\n\to.MemberNames = memberNames\n}", "func GiveMemberList(pc net.PacketConn, addr net.Addr, memberList *[]MemberID) {\n\treply, _ := json.Marshal(*memberList)\n\tpc.WriteTo(reply, addr)\n}", "func (m *DeviceRequestBuilder) CheckMemberGroups()(*ic7d58e9cd8eac2848f5fd58ee8fb59715aa65a362fdcf803751512a46c6834bf.CheckMemberGroupsRequestBuilder) {\n return ic7d58e9cd8eac2848f5fd58ee8fb59715aa65a362fdcf803751512a46c6834bf.NewCheckMemberGroupsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (in *MembersStatus) DeepCopy() *MembersStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MembersStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (mp MemberProperties) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tif mp.Protocol != \"\" {\n\t\tobjectMap[\"protocol\"] = mp.Protocol\n\t}\n\tif mp.ValidatorNodesSku != nil {\n\t\tobjectMap[\"validatorNodesSku\"] = mp.ValidatorNodesSku\n\t}\n\tif mp.Password != nil {\n\t\tobjectMap[\"password\"] = mp.Password\n\t}\n\tif mp.Consortium != nil {\n\t\tobjectMap[\"consortium\"] = mp.Consortium\n\t}\n\tif mp.ConsortiumManagementAccountPassword != nil {\n\t\tobjectMap[\"consortiumManagementAccountPassword\"] = mp.ConsortiumManagementAccountPassword\n\t}\n\tif mp.ConsortiumRole != nil {\n\t\tobjectMap[\"consortiumRole\"] = mp.ConsortiumRole\n\t}\n\tif mp.ConsortiumMemberDisplayName != nil {\n\t\tobjectMap[\"consortiumMemberDisplayName\"] = mp.ConsortiumMemberDisplayName\n\t}\n\tif mp.FirewallRules != nil {\n\t\tobjectMap[\"firewallRules\"] = mp.FirewallRules\n\t}\n\treturn json.Marshal(objectMap)\n}", "func (v VirtualNetworkGatewayPolicyGroupMember) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"attributeType\", v.AttributeType)\n\tpopulate(objectMap, \"attributeValue\", v.AttributeValue)\n\tpopulate(objectMap, \"name\", v.Name)\n\treturn json.Marshal(objectMap)\n}", "func MembersAdd(w http.ResponseWriter, r *http.Request) {\n\tauth := service.GetSessionMember(r)\n\t// Parse form\n\tif err := r.ParseMultipartForm(setting.FileMaxSize); err != nil {\n\t\tLogger.Error(err.Error())\n\t}\n\n\t// Default avatar\n\tavatar := setting.SetDefaultAvatar().MemberAvatar\n\n\tmember := model.Member{}\n\tvar convertErr error\n\tvar teamList []int\n\n\tmember.LoginID = r.FormValue(\"loginID\")\n\tmember.Password = r.FormValue(\"password\")\n\tmember.Name = r.FormValue(\"name\")\n\tmember.RoleType, _ = strconv.Atoi(r.FormValue(\"role\"))\n\tmember.GenderType, convertErr = strconv.Atoi(r.FormValue(\"gender\"))\n\tmember.Comment = sql.NullString{String: r.FormValue(\"comment\"), Valid: true}\n\tmember.CompanyID, convertErr = strconv.Atoi(r.FormValue(\"company\"))\n\tmember.DepartmentID, convertErr = strconv.Atoi(r.FormValue(\"department\"))\n\tgithubAccount := r.FormValue(\"githubAccount\")\n\n\t// timestamp is time if user not input date\n\ttimestamp, _ := time.Parse(\"2006-01-02 15:04:05\", \"0001-01-01 00:00:00\")\n\tbirthdayInput, _ := time.Parse(\"01-02-2006\", r.FormValue(\"birthday\"))\n\tif birthdayInput == timestamp {\n\t\tmember.Birthday = mysql.NullTime{Time: birthdayInput, Valid: false}\n\t} else {\n\t\tmember.Birthday = mysql.NullTime{Time: birthdayInput, Valid: true}\n\t}\n\n\tfor _, value := range r.PostForm[\"team\"] {\n\t\tteamID, _ := strconv.Atoi(value)\n\t\tif teamID != 0 {\n\t\t\tteamList = append(teamList, teamID)\n\t\t}\n\t}\n\n\tif convertErr != nil {\n\t\tLogger.Error(convertErr.Error())\n\t}\n\n\tpasswordData := map[string]string{\n\t\t\"password\": member.Password,\n\t}\n\n\t// validate member data\n\tvalidateErr := model.ValidateMember(member, r.ContentLength)\n\n\tpasswordErr := model.ValidatePassword(passwordData)\n\tsnsAccountErr := model.ValidateSNSAccount(githubAccount)\n\n\t// if validate are ok\n\tif len(validateErr) == 0 && len(passwordErr) == 0 && len(snsAccountErr) == 0 {\n\n\t\t// get data of file uploaded\n\t\tfile, handler, err := r.FormFile(\"avatar\")\n\n\t\tif err == nil {\n\t\t\tavatar = handler.Filename\n\n\t\t\t//create temporary file to save image\n\t\t\ttempFile, err := ioutil.TempFile(setting.ImageBaseURL, \"*\"+avatar)\n\n\t\t\t//get path of image\n\t\t\tavatar = tempFile.Name()\n\t\t\tif err != nil {\n\t\t\t\tLogger.Error(err.Error())\n\t\t\t}\n\t\t\tdefer tempFile.Close()\n\n\t\t\t// read all of the contents of our uploaded file into a byte array\n\t\t\tfileBytes, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tLogger.Error(err.Error())\n\t\t\t}\n\n\t\t\t// write this byte array to our temporary file\n\t\t\ttempFile.Write(fileBytes)\n\n\t\t\t// if app running on EC2\n\t\t\tif setting.UseS3Service() {\n\t\t\t\t// upload file to aws S3\n\t\t\t\tservice.UploadImageToS3(avatar, setting.MemberFolderType)\n\t\t\t\t// save path on S3 bucket\n\t\t\t\tavatar = setting.S3BucketURL + setting.S3MemberFolder + filepath.Base(avatar)\n\t\t\t}\n\t\t} else if err != http.ErrMissingFile {\n\t\t\tLogger.Error(err.Error())\n\t\t}\n\n\t\tmember.PictureURL = sql.NullString{String: avatar, Valid: true}\n\n\t\tmemberID := model.SaveMember(member)\n\t\tsns := model.SNS{\n\t\t\tMemberID: memberID,\n\t\t\tGithub: githubAccount,\n\t\t}\n\t\tmodel.SaveSNSAccount(sns)\n\n\t\t// get array of team of user\n\t\tfor _, teamID := range teamList {\n\t\t\tif !model.CheckTeamMemberExist(teamID, memberID) {\n\t\t\t\tmodel.SaveTeamMember(teamID, memberID)\n\t\t\t}\n\t\t}\n\t\thttp.Redirect(w, r, \"/admin/members\", 301)\n\t}\n\n\t// get all team, company and department to select\n\tallTeams := model.GetAllTeam()\n\tcompanies := model.GetAllCompany()\n\tdepartments := model.GetAllDepartment()\n\n\ttemplateData := map[string]interface{}{\n\t\t\"allTeams\": allTeams.List,\n\t\t\"companies\": companies,\n\t\t\"departments\": departments,\n\t\t\"validateError\": validateErr,\n\t\t\"passwordError\": passwordErr,\n\t\t\"snsAccountError\": snsAccountErr,\n\t\t\"member\": member,\n\t\t\"teamList\": teamList,\n\t\t\"snsAccount\": githubAccount,\n\t\t\"title\": \"Add Member\",\n\t\t\"auth\": auth,\n\t\t\"tab\": setting.MembersTab,\n\t}\n\n\ttmpl := template.Must(template.ParseFiles(\"template/admin_members/member_add.tmpl\", setting.AdminTemplate))\n\tif err := tmpl.ExecuteTemplate(w, \"base\", templateData); err != nil {\n\t\tLogger.Error(err.Error())\n\t}\n}", "func (m *OrganizationManager) AddMembers(id string, memberIDs []string, opts ...RequestOption) (err error) {\n\tbody := struct {\n\t\tMembers []string `json:\"members\"`\n\t}{\n\t\tMembers: memberIDs,\n\t}\n\terr = m.Request(\"POST\", m.URI(\"organizations\", id, \"members\"), &body, opts...)\n\treturn\n}", "func (p *FakePeer) StartRequest() {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tp.status.PendingRequestCount++\n}", "func (m *member) Launch() error {\n\tvar err error\n\tif m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {\n\t\treturn fmt.Errorf(\"failed to initialize the etcd server: %v\", err)\n\t}\n\tm.s.SyncTicker = time.Tick(500 * time.Millisecond)\n\tm.s.Start()\n\n\tm.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s.Cluster, m.s.RaftHandler())}\n\n\tfor _, ln := range m.PeerListeners {\n\t\ths := &httptest.Server{\n\t\t\tListener: ln,\n\t\t\tConfig: &http.Server{Handler: m.raftHandler},\n\t\t}\n\t\ths.Start()\n\t\tm.hss = append(m.hss, hs)\n\t}\n\tfor _, ln := range m.ClientListeners {\n\t\ths := &httptest.Server{\n\t\t\tListener: ln,\n\t\t\tConfig: &http.Server{Handler: etcdhttp.NewClientHandler(m.s)},\n\t\t}\n\t\ths.Start()\n\t\tm.hss = append(m.hss, hs)\n\t}\n\treturn nil\n}", "func (b *ChatMembersCollectionRequestBuilder) Request() *ChatMembersCollectionRequest {\n\treturn &ChatMembersCollectionRequest{\n\t\tBaseRequest: BaseRequest{baseURL: b.baseURL, client: b.client},\n\t}\n}", "func AllListMembers(w http.ResponseWriter, r *http.Request) {\r\n\tvar listMembers []ListMember\r\n\r\n\tdb.Find(&listMembers)\r\n\tjson.NewEncoder(w).Encode(listMembers)\r\n\t// fmt.Fprintf(w, \"All listMembers Endpoint Hit\")\r\n}", "func (v VPNServerConfigurationPolicyGroupMember) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"attributeType\", v.AttributeType)\n\tpopulate(objectMap, \"attributeValue\", v.AttributeValue)\n\tpopulate(objectMap, \"name\", v.Name)\n\treturn json.Marshal(objectMap)\n}", "func (r StartReplicationRequest) Send(ctx context.Context) (*StartReplicationResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &StartReplicationResponse{\n\t\tStartReplicationOutput: r.Request.Data.(*StartReplicationOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func ExampleSyncMembersClient_BeginCreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armsql.NewSyncMembersClient(\"00000000-1111-2222-3333-444444444444\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := client.BeginCreateOrUpdate(ctx,\n\t\t\"syncgroupcrud-65440\",\n\t\t\"syncgroupcrud-8475\",\n\t\t\"syncgroupcrud-4328\",\n\t\t\"syncgroupcrud-3187\",\n\t\t\"syncmembercrud-4879\",\n\t\tarmsql.SyncMember{\n\t\t\tProperties: &armsql.SyncMemberProperties{\n\t\t\t\tDatabaseName: to.Ptr(\"syncgroupcrud-7421\"),\n\t\t\t\tDatabaseType: to.Ptr(armsql.SyncMemberDbTypeAzureSQLDatabase),\n\t\t\t\tServerName: to.Ptr(\"syncgroupcrud-3379.database.windows.net\"),\n\t\t\t\tSyncDirection: to.Ptr(armsql.SyncDirectionBidirectional),\n\t\t\t\tSyncMemberAzureDatabaseResourceID: to.Ptr(\"/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328\"),\n\t\t\t\tUsePrivateLinkConnection: to.Ptr(true),\n\t\t\t\tUserName: to.Ptr(\"myUser\"),\n\t\t\t},\n\t\t},\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// TODO: use response item\n\t_ = res\n}", "func (_AuthContract *AuthContractCallerSession) Members(arg0 *big.Int) (common.Address, error) {\n\treturn _AuthContract.Contract.Members(&_AuthContract.CallOpts, arg0)\n}", "func InitMemberList(name string, port int, seedServers []string, seedPort int) (*memberlist.Memberlist, int, error) {\n\tconf := memberlist.DefaultLANConfig()\n\tconf.Name = name\n\tconf.BindPort = port\n\n\tlist, err := memberlist.Create(conf)\n\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\tmyName := name + \":\" + strconv.Itoa(seedPort)\n\t// TODO Possibly examine # of nodes joined, if under a threshold... take action?\n\tprioritizedServers := prioritizeSeedServers(myName, seedServers)\n\tnodesJoined, err := list.Join(prioritizedServers)\n\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t}\n\n\tfor _, member := range list.Members() {\n\t\tlogrus.Printf(\"Member %s %s\\n\", member.Name, member.Addr)\n\t}\n\n\treturn list, nodesJoined, err\n}", "func (c Routes) StartGin() {\n\tr := gin.Default()\n\tr.Use(cors.Default())\n\tapi := r.Group(\"/api\")\n\t{\n\t\tapi.GET(\"/\", welcome)\n\t\tapi.GET(tasksResource, task.GetTasks)\n\t\tapi.GET(taskResource, task.GetTask)\n\t\tapi.POST(taskResource, task.CreateTask)\n\t\tapi.PATCH(taskResource, task.UpdateTaskStatus)\n\t\tapi.DELETE(taskResource, task.DeleteTask)\n\t}\n\n\tr.Run(\":8000\")\n}", "func StartWith(localPeers []gubernator.PeerInfo) error {\n\tfor _, peer := range localPeers {\n\t\tctx, cancel := ctxutil.WithTimeout(context.Background(), clock.Second*10)\n\t\td, err := gubernator.SpawnDaemon(ctx, gubernator.DaemonConfig{\n\t\t\tLogger: logrus.WithField(\"instance\", peer.GRPCAddress),\n\t\t\tGRPCListenAddress: peer.GRPCAddress,\n\t\t\tHTTPListenAddress: peer.HTTPAddress,\n\t\t\tDataCenter: peer.DataCenter,\n\t\t\tBehaviors: gubernator.BehaviorConfig{\n\t\t\t\t// Suitable for testing but not production\n\t\t\t\tGlobalSyncWait: clock.Millisecond * 50,\n\t\t\t\tGlobalTimeout: clock.Second * 5,\n\t\t\t\tBatchTimeout: clock.Second * 5,\n\t\t\t},\n\t\t})\n\t\tcancel()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"while starting server for addr '%s'\", peer.GRPCAddress)\n\t\t}\n\n\t\t// Add the peers and daemons to the package level variables\n\t\tpeers = append(peers, gubernator.PeerInfo{\n\t\t\tGRPCAddress: d.GRPCListeners[0].Addr().String(),\n\t\t\tHTTPAddress: d.HTTPListener.Addr().String(),\n\t\t\tDataCenter: peer.DataCenter,\n\t\t})\n\t\tdaemons = append(daemons, d)\n\t}\n\n\t// Tell each instance about the other peers\n\tfor _, d := range daemons {\n\t\td.SetPeers(peers)\n\t}\n\treturn nil\n}", "func (client ConversationsClient) GetConversationPagedMembersMethodSender(req *http.Request) (*http.Response, error) {\n sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n return autorest.SendWithSender(client, req, sd...)\n }", "func (_m *IProvider) Start(_a0 map[string]*model.WorkerConfig, _a1 []*message.Message) {\n\t_m.Called(_a0, _a1)\n}", "func (s *Service) Start(id int64, apistartrequestbodymessage *ApiStartRequestBodyMessage) *StartCall {\n\tc := &StartCall{s: s, urlParams_: make(gensupport.URLParams)}\n\tc.id = id\n\tc.apistartrequestbodymessage = apistartrequestbodymessage\n\treturn c\n}", "func (s Member) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AccountId != nil {\n\t\tv := *s.AccountId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"accountId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Email != nil {\n\t\tv := *s.Email\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"email\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.InvitedAt != nil {\n\t\tv := *s.InvitedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"invitedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MasterAccountId != nil {\n\t\tv := *s.MasterAccountId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"masterAccountId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RelationshipStatus) > 0 {\n\t\tv := s.RelationshipStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"relationshipStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.UpdatedAt != nil {\n\t\tv := *s.UpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"updatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\treturn nil\n}", "func (r *SpacesMembersService) Get(name string) *SpacesMembersGetCall {\n\tc := &SpacesMembersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (s StaticMemberProperties) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"provisioningState\", s.ProvisioningState)\n\tpopulate(objectMap, \"region\", s.Region)\n\tpopulate(objectMap, \"resourceId\", s.ResourceID)\n\treturn json.Marshal(objectMap)\n}", "func (s CreateMemberInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Account != nil {\n\t\tv := s.Account\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"account\", v, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (nr *namedReceiver) Start(ctx context.Context, d Dest) error {\n\tmetricRecvTotal.WithLabelValues(d.Type.String(), \"START\")\n\treturn nr.Receiver.Start(ctx, d)\n}", "func (r StartOnDemandReplicationRunRequest) Send(ctx context.Context) (*StartOnDemandReplicationRunOutput, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.Request.Data.(*StartOnDemandReplicationRunOutput), nil\n}", "func (m *DeploymentAudience) SetMembers(value []UpdatableAssetable)() {\n err := m.GetBackingStore().Set(\"members\", value)\n if err != nil {\n panic(err)\n }\n}", "func (a *MembersAPI) Get(ctx context.Context, req *http.Request) api.Response {\n\tdefer req.Body.Close()\n\n\td, err := api.GetDaemon(ctx)\n\tif err != nil {\n\t\treturn api.InternalError(err)\n\t}\n\n\tlistTask := membership.NewList(\n\t\tmakeMembershipStateShim(d.State()),\n\t)\n\tnodes, err := listTask.Run()\n\tif err != nil {\n\t\treturn api.SmartError(err)\n\t}\n\n\tresult := make([]ClusterRaftNode, len(nodes))\n\tfor k, v := range nodes {\n\t\tresult[k] = ClusterRaftNode{\n\t\t\tServerName: v.ServerName,\n\t\t\tURL: v.URL,\n\t\t\tDatabase: v.Database,\n\t\t\tStatus: v.Status,\n\t\t\tMessage: v.Message,\n\t\t}\n\t}\n\n\treturn api.SyncResponse(true, result)\n}", "func (g *GroupsService) AddMembers(group Group, users ...string) (bool, *Response, error) {\n\treturn g.memberAction(group, \"$add-members\", groupRequestBody(users...), nil)\n}", "func (s *service) fillMembersData(ctx context.Context, members []models.Member) ([]models.Member, error) {\n\tfor index, member := range members {\n\t\tuser, _, err := s.store.UserGetByID(ctx, member.ID, false)\n\t\tif err != nil || user == nil {\n\t\t\treturn nil, NewErrUserNotFound(member.ID, err)\n\t\t}\n\n\t\tmembers[index] = models.Member{ID: user.ID, Username: user.Username, Role: member.Role}\n\t}\n\n\treturn members, nil\n}", "func (mem *Member) SendAll(msgType MessageType, msg []byte) {\n\tfor _, v := range mem.membershipList {\n\t\tSend(v.IPaddr.String()+\":\"+fmt.Sprint(Configuration.Service.port), msgType, msg)\n\t}\n}" ]
[ "0.5814435", "0.5814435", "0.57384104", "0.57161516", "0.5597986", "0.55359256", "0.54366916", "0.54287744", "0.53845984", "0.53503877", "0.5317368", "0.5217504", "0.5216037", "0.52065617", "0.50651157", "0.5003769", "0.49975806", "0.49753883", "0.4969502", "0.49631017", "0.49454927", "0.49449065", "0.49340752", "0.4856808", "0.485458", "0.48413134", "0.4807254", "0.4806039", "0.4804052", "0.47848982", "0.47094056", "0.4706309", "0.47001782", "0.46952394", "0.467297", "0.46661112", "0.46570295", "0.46551967", "0.46447968", "0.46331882", "0.4625224", "0.4617635", "0.46104917", "0.4601201", "0.45861486", "0.45805943", "0.45659158", "0.45489252", "0.45472848", "0.45461074", "0.45409724", "0.45401105", "0.4534537", "0.4531486", "0.45309636", "0.45245868", "0.45213944", "0.4516163", "0.45151576", "0.45127723", "0.45008808", "0.44914854", "0.447486", "0.4453301", "0.4445403", "0.44384268", "0.4434254", "0.44232106", "0.44228438", "0.4399985", "0.4395211", "0.4390597", "0.43749267", "0.43716678", "0.4370549", "0.4367868", "0.4364496", "0.43531293", "0.43526787", "0.4341828", "0.43384102", "0.43353024", "0.43310308", "0.43295994", "0.43291137", "0.432628", "0.43206733", "0.4312097", "0.4304495", "0.4301932", "0.43014273", "0.4296604", "0.42815942", "0.42760575", "0.4272409", "0.4271473", "0.42672315", "0.42601192", "0.42545816", "0.42543066" ]
0.7580239
0
SDKResponseMetdata returns the response metadata for the StartMonitoringMembers request.
func (r *StartMonitoringMembersResponse) SDKResponseMetdata() *aws.Response { return r.response }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s StartMonitoringMembersInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AccountIds != nil {\n\t\tv := s.AccountIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"accountIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.DetectorId != nil {\n\t\tv := *s.DetectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (r *CreateMembersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateMembersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r StartMonitoringMembersRequest) Send(ctx context.Context) (*StartMonitoringMembersResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &StartMonitoringMembersResponse{\n\t\tStartMonitoringMembersOutput: r.Request.Data.(*StartMonitoringMembersOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (r *CreateMemberResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (s CreateMembersInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Accounts != nil {\n\t\tv := s.Accounts\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Accounts\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.GraphArn != nil {\n\t\tv := *s.GraphArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"GraphArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Message != nil {\n\t\tv := *s.Message\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Message\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Member) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AccountId != nil {\n\t\tv := *s.AccountId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"AccountId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Email != nil {\n\t\tv := *s.Email\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Email\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.InvitedAt != nil {\n\t\tv := *s.InvitedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"InvitedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MasterId != nil {\n\t\tv := *s.MasterId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MasterId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MemberStatus != nil {\n\t\tv := *s.MemberStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MemberStatus\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.UpdatedAt != nil {\n\t\tv := *s.UpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"UpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\treturn nil\n}", "func (s StartMonitoringMembersOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.UnprocessedAccounts != nil {\n\t\tv := s.UnprocessedAccounts\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"unprocessedAccounts\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s CreateMembersOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Members != nil {\n\t\tv := s.Members\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Members\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.UnprocessedAccounts != nil {\n\t\tv := s.UnprocessedAccounts\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"UnprocessedAccounts\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (r *CreateReplicationGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func CreateListClusterMembersResponse() (response *ListClusterMembersResponse) {\n\tresponse = &ListClusterMembersResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (s Member) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AccountId != nil {\n\t\tv := *s.AccountId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"accountId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Email != nil {\n\t\tv := *s.Email\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"email\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.InvitedAt != nil {\n\t\tv := *s.InvitedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"invitedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MasterAccountId != nil {\n\t\tv := *s.MasterAccountId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"masterAccountId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RelationshipStatus) > 0 {\n\t\tv := s.RelationshipStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"relationshipStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.UpdatedAt != nil {\n\t\tv := *s.UpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"updatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\treturn nil\n}", "func (o ServerAdministratorsResponseOutput) Members() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v ServerAdministratorsResponse) []string { return v.Members }).(pulumi.StringArrayOutput)\n}", "func (s CreateMembersInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AccountDetails != nil {\n\t\tv := s.AccountDetails\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"AccountDetails\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (r *etcdClusterHealthImpl) Members(p graphql.ResolveParams) (interface{}, error) {\n\tresp := p.Source.(*corev2.HealthResponse)\n\treturn resp.ClusterHealth, nil\n}", "func (s *Status) Members(args *structs.GenericRequest, reply *structs.ServerMembersResponse) error {\n\t// Check node read permissions\n\tif aclObj, err := s.srv.ResolveToken(args.AuthToken); err != nil {\n\t\treturn err\n\t} else if aclObj != nil && !aclObj.AllowNodeRead() {\n\t\treturn structs.ErrPermissionDenied\n\t}\n\n\tserfMembers := s.srv.Members()\n\tmembers := make([]*structs.ServerMember, len(serfMembers))\n\tfor i, mem := range serfMembers {\n\t\tmembers[i] = &structs.ServerMember{\n\t\t\tName: mem.Name,\n\t\t\tAddr: mem.Addr,\n\t\t\tPort: mem.Port,\n\t\t\tTags: mem.Tags,\n\t\t\tStatus: mem.Status.String(),\n\t\t\tProtocolMin: mem.ProtocolMin,\n\t\t\tProtocolMax: mem.ProtocolMax,\n\t\t\tProtocolCur: mem.ProtocolCur,\n\t\t\tDelegateMin: mem.DelegateMin,\n\t\t\tDelegateMax: mem.DelegateMax,\n\t\t\tDelegateCur: mem.DelegateCur,\n\t\t}\n\t}\n\t*reply = structs.ServerMembersResponse{\n\t\tServerName: s.srv.config.NodeName,\n\t\tServerRegion: s.srv.config.Region,\n\t\tServerDC: s.srv.config.Datacenter,\n\t\tMembers: members,\n\t}\n\treturn nil\n}", "func (r *StartReplicationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListInstanceGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (m *TeamItemRequestBuilder) Members()(*ifc923048eff969ef232f17cdaf6c11e18676c5c9e2918bc19001d30cbdc4b5c1.MembersRequestBuilder) {\n return ifc923048eff969ef232f17cdaf6c11e18676c5c9e2918bc19001d30cbdc4b5c1.NewMembersRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (r *StartDBClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (m *Value) Members() []xmlrpc.Member { return m.MembersMock() }", "func (r *StartReportCreationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (o ServerAdministratorsResponsePtrOutput) Members() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *ServerAdministratorsResponse) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Members\n\t}).(pulumi.StringArrayOutput)\n}", "func (r *RunScheduledInstancesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeDBClusterParameterGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListEndpointGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (s CreateMemberInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Account != nil {\n\t\tv := s.Account\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"account\", v, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s GetUsageStatisticsInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.FilterBy != nil {\n\t\tv := s.FilterBy\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"filterBy\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.MaxResults != nil {\n\t\tv := *s.MaxResults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"maxResults\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.NextToken != nil {\n\t\tv := *s.NextToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"nextToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SortBy != nil {\n\t\tv := s.SortBy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"sortBy\", v, metadata)\n\t}\n\treturn nil\n}", "func (m *DeviceRequestBuilder) GetMemberObjects()(*i07bc89ff8c55d06b076d4f0a8ad636a90ed70691edcc9301ad5f2b5508af5d88.GetMemberObjectsRequestBuilder) {\n return i07bc89ff8c55d06b076d4f0a8ad636a90ed70691edcc9301ad5f2b5508af5d88.NewGetMemberObjectsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (r *StartSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateInstancesFromSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (m *Value) WithMembers(v map[string]xmlrpc.Value) *Value {\n\tm.KindMock = func() xmlrpc.Kind { return xmlrpc.Struct }\n\tm.MembersMock = func() []xmlrpc.Member { return m.membersFromMap(v) }\n\treturn m\n}", "func (r *GetPublicKeyResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (s StartSnapshotOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BlockSize != nil {\n\t\tv := *s.BlockSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"BlockSize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.KmsKeyArn != nil {\n\t\tv := *s.KmsKeyArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"KmsKeyArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OwnerId != nil {\n\t\tv := *s.OwnerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"OwnerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ParentSnapshotId != nil {\n\t\tv := *s.ParentSnapshotId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ParentSnapshotId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SnapshotId != nil {\n\t\tv := *s.SnapshotId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"SnapshotId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StartTime != nil {\n\t\tv := *s.StartTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StartTime\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif len(s.Status) > 0 {\n\t\tv := s.Status\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Status\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.VolumeSize != nil {\n\t\tv := *s.VolumeSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"VolumeSize\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (r *AssociateIpGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateGameServerGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateDBParameterGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListIngestionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (cm *ClusterMember) Start() error {\n\tif cm.started {\n\t\treturn ErrClusterJoined\n\t}\n\n\tcm.started = true\n\n\tmi, err := cm.cmKVS.RegisterAgent(cm.name)\n\tif err != nil {\n\t\treturn &RegisterError{err: err, name: cm.name}\n\t}\n\n\tcm.modifiedIndex = mi\n\n\tgo cm.schedule(cm, \"poll\", poll, time.Second)\n\tgo cm.schedule(cm, \"refresh\", refresh, cm.cmKVS.CheckTTL/2)\n\n\treturn nil\n}", "func (r *ListFargateProfilesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (dm *DMMasterClient) GetRegisteredMembers() ([]string, []string, error) {\n\tquery := \"?master=true&worker=true\"\n\tendpoints := dm.getEndpoints(dmMembersURI + query)\n\tmemberResp, err := dm.getMember(endpoints)\n\n\tvar (\n\t\tregisteredMasters []string\n\t\tregisteredWorkers []string\n\t)\n\n\tif err != nil {\n\t\tzap.L().Error(\"get dm master status failed\", zap.Error(err))\n\t\treturn registeredMasters, registeredWorkers, err\n\t}\n\n\tfor _, member := range memberResp.Members {\n\t\tif masters := member.GetMaster(); masters != nil {\n\t\t\tfor _, master := range masters.GetMasters() {\n\t\t\t\tregisteredMasters = append(registeredMasters, master.Name)\n\t\t\t}\n\t\t} else if workers := member.GetWorker(); workers != nil {\n\t\t\tfor _, worker := range workers.GetWorkers() {\n\t\t\t\tregisteredWorkers = append(registeredWorkers, worker.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn registeredMasters, registeredWorkers, nil\n}", "func (m *ChatItemRequestBuilder) Members()(*i29250e65d3047fac7086c3da168f1dc97b716e57040772b108c92eb7e41e5082.MembersRequestBuilder) {\n return i29250e65d3047fac7086c3da168f1dc97b716e57040772b108c92eb7e41e5082.NewMembersRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (r *StartLabelDetectionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifySelfservicePermissionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListNotebookInstanceLifecycleConfigsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (client ConversationsClient) GetActivityMembersMethodSender(req *http.Request) (*http.Response, error) {\n sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n return autorest.SendWithSender(client, req, sd...)\n }", "func (s GetUsageStatisticsOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.NextToken != nil {\n\t\tv := *s.NextToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"nextToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Records != nil {\n\t\tv := s.Records\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"records\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (g *GetSupergroupMembersRequest) TypeInfo() tdp.Type {\n\ttyp := tdp.Type{\n\t\tName: \"getSupergroupMembers\",\n\t\tID: GetSupergroupMembersRequestTypeID,\n\t}\n\tif g == nil {\n\t\ttyp.Null = true\n\t\treturn typ\n\t}\n\ttyp.Fields = []tdp.Field{\n\t\t{\n\t\t\tName: \"SupergroupID\",\n\t\t\tSchemaName: \"supergroup_id\",\n\t\t},\n\t\t{\n\t\t\tName: \"Filter\",\n\t\t\tSchemaName: \"filter\",\n\t\t},\n\t\t{\n\t\t\tName: \"Offset\",\n\t\t\tSchemaName: \"offset\",\n\t\t},\n\t\t{\n\t\t\tName: \"Limit\",\n\t\t\tSchemaName: \"limit\",\n\t\t},\n\t}\n\treturn typ\n}", "func (s StartSnapshotInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ClientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Encrypted != nil {\n\t\tv := *s.Encrypted\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Encrypted\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.KmsKeyArn != nil {\n\t\tv := *s.KmsKeyArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"KmsKeyArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ParentSnapshotId != nil {\n\t\tv := *s.ParentSnapshotId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ParentSnapshotId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Timeout != nil {\n\t\tv := *s.Timeout\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timeout\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VolumeSize != nil {\n\t\tv := *s.VolumeSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"VolumeSize\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (r *StartImagePipelineExecutionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyReplicationGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (cgpr ContainersGetPropertiesResponse) NewMetadata() Metadata {\n\tmd := Metadata{}\n\tfor k, v := range cgpr.rawResponse.Header {\n\t\tif len(k) > mdPrefixLen {\n\t\t\tif prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) {\n\t\t\t\tmd[strings.ToLower(k[mdPrefixLen:])] = v[0]\n\t\t\t}\n\t\t}\n\t}\n\treturn md\n}", "func (g *Group) RequestMemberlist() {\n\tjoinMessage := &JoinMessage{\n\t\tMember: g.Member().ProtoMember(),\n\t}\n\tprotoJoinMessage, _ := ptypes.MarshalAny(joinMessage)\n\n\tm := &Message{\n\t\tMessageType: Message_INTRO_SYN,\n\t\tAddr: g.Addr,\n\t\tDetails: protoJoinMessage,\n\t}\n\n\t// Only join using primary coordinator\n\tfor _, coordinator := range g.coordinators {\n\t\tif coordinator == g.Addr {\n\t\t\tcontinue\n\t\t}\n\t\tSendTCP(coordinator, m)\n\t}\n}", "func (r *RegisterUserResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeSnapshotSchedulesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AdminConfirmSignUpResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeVpcEndpointConnectionNotificationsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartAssociationsOnceResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (o *DashboardAllOfLinks) GetMembersOk() (*string, bool) {\n\tif o == nil || o.Members == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Members, true\n}", "func (s StartOnDemandReplicationRunOutput) SDKResponseMetadata() aws.Response {\n\treturn s.responseMetadata\n}", "func (o ServerGroupOutput) Members() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *ServerGroup) pulumi.StringArrayOutput { return v.Members }).(pulumi.StringArrayOutput)\n}", "func (r *CreateUserResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListOpenIDConnectProvidersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (client ConversationsClient) GetActivityMembersMethodResponder(resp *http.Response) (result ListChannelAccountType, err error) {\n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result.Value),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (r *EnableKeyResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (b *ChatRequestBuilder) Members() *ChatMembersCollectionRequestBuilder {\n\tbb := &ChatMembersCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/members\"\n\treturn bb\n}", "func (r *CreateProfileResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (o *DashboardAllOfLinks) SetMembers(v string) {\n\to.Members = &v\n}", "func (m *MockedCache) SMembers(key string) ([]string, error) {\n\targs := m.Called(key)\n\treturn args[0].([]string), args.Error(1)\n}", "func (r *ResetDBParameterGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (client ConversationsClient) GetConversationPagedMembersMethodResponder(resp *http.Response) (result PagedMembersResultType, err error) {\n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (r *DescribeJobsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyMountTargetSecurityGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetInstanceMetricDataResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetReusableDelegationSetLimitResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListProcessingJobsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateProjectVersionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AssociateProactiveEngagementDetailsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListGitHubAccountTokenNamesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AddPermissionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AcceptVpcPeeringConnectionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeContainerInstancesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetDevicesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListJournalKinesisStreamsForLedgerResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeInstancesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartResourceScanResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *RegisterUsageResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *RegisterUsageResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *EnableAllFeaturesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateDomainNameserversResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AttachLoadBalancerTargetGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeTargetGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListCandidatesForAutoMLJobResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetDiscoverySummaryResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListAssessmentRunAgentsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ApplySecurityGroupsToClientVpnTargetNetworkResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (s StartJobOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.JobSummary != nil {\n\t\tv := s.JobSummary\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"jobSummary\", v, metadata)\n\t}\n\treturn nil\n}", "func (r *ListWorkersWithQualificationTypeResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}" ]
[ "0.56053686", "0.541501", "0.541501", "0.5233159", "0.5035915", "0.47210503", "0.46928957", "0.46867424", "0.46832535", "0.4680339", "0.46705097", "0.46681777", "0.46608528", "0.46559983", "0.46532053", "0.46119094", "0.46104077", "0.45924494", "0.4582352", "0.45484757", "0.4534586", "0.45334685", "0.45090187", "0.45086703", "0.4493312", "0.4481559", "0.4471237", "0.44690397", "0.4468174", "0.44646955", "0.44169468", "0.44126642", "0.44030303", "0.43937835", "0.4393202", "0.4392023", "0.43894637", "0.4376417", "0.43701792", "0.43624362", "0.43585533", "0.4355968", "0.43551147", "0.43545878", "0.4351242", "0.43491825", "0.43464914", "0.43415853", "0.43404934", "0.43393606", "0.4328526", "0.4318813", "0.4305905", "0.43002042", "0.4299411", "0.429852", "0.4297069", "0.4294224", "0.4292487", "0.42916107", "0.4287979", "0.42803872", "0.42777702", "0.42448026", "0.4244128", "0.4243894", "0.42359084", "0.42309225", "0.42309177", "0.42292503", "0.4228262", "0.42277738", "0.42192698", "0.42186874", "0.4215038", "0.421093", "0.4210126", "0.42085686", "0.42041132", "0.41921636", "0.41898525", "0.41834444", "0.418241", "0.41785848", "0.41758922", "0.41745642", "0.41729543", "0.41725713", "0.4171553", "0.4171553", "0.41678873", "0.416728", "0.4163914", "0.41633943", "0.41597492", "0.4158613", "0.41550735", "0.4154997", "0.41492003", "0.41475907" ]
0.6100442
0
IsAvailable detects whether this code is executed inside a CI environment.
func IsAvailable() bool { // Most CI providers have this. return os.Getenv("CI") != "" || os.Getenv("BUILD_NUMBER") != "" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsAvailable() (bool, error) {\n\terr := modwevtapi.Load()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}", "func (pr *Provider) IsAvailable() bool {\n\treturn pr.available\n}", "func Available() bool {\n\treturn isAfterBuild17063()\n}", "func IsAvailable() bool {\n\tif isAvailable_ < 0 {\n\t\ttoolName_ = \"\"\n\t\tisAvailable_ = 0\n\n\t\tcandidates := []string{\n\t\t\t\"gvfs-trash\",\n\t\t\t\"trash\",\n\t\t}\n\n\t\tfor _, candidate := range candidates {\n\t\t\terr := exec.Command(\"type\", candidate).Run()\n\t\t\tok := false\n\t\t\tif err == nil {\n\t\t\t\tok = true\n\t\t\t} else {\n\t\t\t\terr = exec.Command(\"sh\", \"-c\", \"type \"+candidate).Run()\n\t\t\t\tif err == nil {\n\t\t\t\t\tok = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ok {\n\t\t\t\ttoolName_ = candidate\n\t\t\t\tisAvailable_ = 1\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t} else if isAvailable_ == 1 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsAvailable() bool {\n\tvar kernel32Err, ntdllErr, rtlCopyMemErr, vAllocErr error\n\tkernel32, kernel32Err = syscall.LoadDLL(\"kernel32.dll\")\n\tntdll, ntdllErr = syscall.LoadDLL(\"ntdll.dll\")\n\tVirtualAlloc, vAllocErr = kernel32.FindProc(\"VirtualAlloc\")\n\tRtlCopyMemory, rtlCopyMemErr = ntdll.FindProc(\"RtlCopyMemory\")\n\tif kernel32Err != nil && ntdllErr != nil && rtlCopyMemErr != nil && vAllocErr != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func Available() bool {\n\treturn available()\n}", "func (p LeclercParser) IsAvailable() (bool, error) {\n\tproductResponse, err := p.getJSON(p.URL)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif len(productResponse.Items) > 0 {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}", "func (p FnacParser) IsAvailable() (bool, error) {\n\tchromeURL, err := getRemoteDebuggerURL()\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 15 * time.Second)\n\n\tdefer cancel()\n\n\t// create allocator context for use with creating a browser context later\n\tallocatorContext, cancel := chromedp.NewRemoteAllocator(ctx, chromeURL)\n\tdefer cancel()\n\n\t// create context\n\tctxt, cancel := chromedp.NewContext(allocatorContext)\n\tdefer cancel()\n\n\t// run task list\n\tvar nodes []*cdp.Node\n\tif err := chromedp.Run(ctxt,\n\t\tchromedp.Navigate(p.URL),\n\t\tchromedp.WaitVisible(\".f-productHeader-Title\"),\n\t\tchromedp.Nodes(`.f-buyBox-infos>.f-buyBox-availabilityStatus-unavailable`, &nodes, chromedp.ByQuery),\n\t); err != nil {\n\t\tctxt.Done()\n\t\tallocatorContext.Done()\n\n\t\treturn false, err\n\t}\n\n\tctxt.Done()\n\tallocatorContext.Done()\n\n\tif len(nodes) != 2 {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}", "func (d *conntrackInstallerInUbuntu) Available() (ok bool) {\n\tif runtime.GOOS == \"linux\" {\n\t\t_, err := exec.LookPath(\"apt-get\")\n\t\tok = err == nil\n\t}\n\treturn\n}", "func IsCI() bool {\n\tif _, ok := os.LookupEnv(\"CI\"); ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func (gi *GrpcInvoker) IsAvailable() bool {\n\tclient := gi.getClient()\n\tif client != nil {\n\t\treturn gi.BaseInvoker.IsAvailable() && client.GetState() != connectivity.Shutdown\n\t}\n\n\treturn false\n}", "func Is() bool {\n\tif get(\"TRAVIS\") && get(\"CI\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func (l Licence) IsAvailable() bool {\n\treturn l.Status != LicStatusGranted && l.AssigneeID.IsZero()\n}", "func isAvailable(name string) bool {\n\tregion := os.Getenv(\"REGION\")\n\n\tsvc := neptune.New(session.New(&aws.Config{\n\t\tRegion: aws.String(region),\n\t}))\n\n\trparams := &neptune.DescribeDBInstancesInput{\n\t\tDBInstanceIdentifier: aws.String(name),\n\t\tMaxRecords: aws.Int64(20),\n\t}\n\trresp, rerr := svc.DescribeDBInstances(rparams)\n\tif rerr != nil {\n\t\tfmt.Println(rerr)\n\t}\n\n\tfmt.Println(\"Checking to see if \" + name + \" is available...\")\n\tfmt.Println(\"Current Status: \" + *rresp.DBInstances[0].DBInstanceStatus)\n\tstatus := *rresp.DBInstances[0].DBInstanceStatus\n\tif status == \"available\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func AircrackAvailable() bool {\n\n\t_, err := getAircrackExecutable()\n\tif err != nil {\n\t\tterm.Error(\"Unable to find aircrack-ng: %s\\n\", err)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func runningInCI() bool {\n\tenv, err := Environment()\n\tif err != nil {\n\t\treturn false\n\t}\n\t// TODO: We're gating this to the dev environment first.\n\t// Note that this will require another bump of the current version value,\n\t// as the prod machines will (rightfully) think they're up to date.\n\treturn env == CiDev\n}", "func Available() bool {\n\t_, err := os.Stat(LogPath())\n\treturn err == nil\n}", "func (fi *FilterInvoker) IsAvailable() bool {\n\treturn fi.invoker.IsAvailable()\n}", "func IsAvailable() bool {\n\tif kernel32, kernel32Err := syscall.LoadDLL(\"kernel32.dll\"); kernel32Err == nil {\n\t\tif _, vAllocErr := kernel32.FindProc(\"VirtualAlloc\"); vAllocErr != nil {\n\t\t\tfmt.Printf(\"[-] VirtualAlloc error: %s\", vAllocErr.Error())\n\t\t\treturn false\n\t\t}\n\t\tif ntdll, ntdllErr := syscall.LoadDLL(\"ntdll.dll\"); ntdllErr == nil {\n\t\t\tif _, rtlCopyMemErr := ntdll.FindProc(\"RtlCopyMemory\"); rtlCopyMemErr == nil {\n\t\t\t\treturn true\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"[-] RtlCopyMemory error: %s\", rtlCopyMemErr.Error())\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"[-] LoadDLL error: %s\", kernel32Err.Error())\n\t}\n\treturn false\n}", "func (b *Box) Available() bool {\n\treturn b.Status == StatusDeploying ||\n\t\tb.Status == StatusCreating ||\n\t\tb.Status == StatusError\n}", "func (host *Server) Available() bool {\n\tif host.Spec.ConsumerRef != nil {\n\t\treturn false\n\t}\n\tif host.GetDeletionTimestamp() != nil {\n\t\treturn false\n\t}\n\tif host.HasError() {\n\t\treturn false\n\t}\n\treturn true\n}", "func (d downloader) IsAvailable() bool {\n\treturn len(d.urls) != 0\n}", "func (d *vimInstallerInUbuntu) Available() (ok bool) {\n\tif runtime.GOOS == \"linux\" {\n\t\t_, err := exec.LookPath(\"apt-get\")\n\t\tok = err == nil\n\t}\n\treturn\n}", "func IsInGitlab() bool {\n\t_, isCI := os.LookupEnv(\"CI_TEST\")\n\treturn isCI\n}", "func (s *Storage) IsAvailable() error {\n\tif err := s.db.Ping(); err != nil {\n\t\treturn err\n\t}\n\n\t// This is necessary because once a database connection is initiallly\n\t// established subsequent calls to the Ping method return success even if the\n\t// database goes down.\n\t//\n\t// https://github.com/lib/pq/issues/533\n\tif _, err := s.db.Exec(\"SELECT 1\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *ETHController) IsAvailable(domain string) (bool, error) {\n\tname, err := UnqualifiedName(domain, c.domain)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"invalid name %s\", domain)\n\t}\n\treturn c.Contract.Available(nil, name)\n}", "func (s Scraper) IsAvailable() (bool, error) {\n\ttext, err := s.getTextInSelector()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn strings.Contains(strings.TrimSpace(strings.ToLower(text)), strings.TrimSpace(strings.ToLower(s.findText))), nil\n}", "func IsSupported() bool {\n\treturn true\n}", "func IsSupported() bool {\n\treturn true\n}", "func (c *Client) IsAvailable() bool {\n\tclient, _, err := c.selectSession(c.addr)\n\treturn err == nil &&\n\t\t// defensive check\n\t\tclient != nil\n}", "func IsSupported() bool {\n\treturn false\n}", "func (c *Completer) Available() bool {\n\tif c.unavailable {\n\t\treturn false\n\t}\n\n\t_, err := exec.LookPath(c.GocodePath)\n\tif err != nil {\n\t\tc.unavailable = true\n\t\treturn false\n\t}\n\n\treturn true\n}", "func IsCI() bool {\n\tif env.GetDefault(\"CI\", \"\") != \"\" {\n\t\treturn true\n\t}\n\n\tfor _, e := range Services {\n\t\tif env.GetDefault(e.Base, \"\") != \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (c *Client) IsAvailable() bool {\n\tc.muForCall.Lock()\n\tdefer c.muForCall.Unlock()\n\n\treturn !c.closing && !c.shutdown\n}", "func (i *PowerVSImageScope) IsReady() bool {\n\treturn i.IBMPowerVSImage.Status.Ready\n}", "func Available() bool {\n\treturn DefaultCompleter.Available()\n}", "func (BuildEnv) IsCI() bool {\n\t// Travis, AppVeyor, CircleCI, and many others all set CI=true in their\n\t// environment by default.\n\treturn os.Getenv(\"CI\") == \"true\"\n}", "func isCommandAvailable(name string) bool {\n\tcmd := exec.Command(\"command\", name, \"-V\")\n\tif err := cmd.Run(); err != nil {\n\t\tFatalf(\"%s executable is not installed on this box, please run 'yum install -y %[1]s to install it'\", name, name)\n\t}\n\treturn true\n}", "func runAtCI() bool {\n\t_, ok := os.LookupEnv(\"CI\")\n\treturn ok\n}", "func (r *Rkt) Available() error {\n\treturn r.Runner.Run(\"command -v rkt\")\n}", "func IsInVexor() bool {\n\treturn os.Getenv(\"CI_NAME\") == \"VEXOR\"\n}", "func (f *azurePostgresFlexServerFetcher) isAvailable(server *armpostgresqlflexibleservers.Server, log logrus.FieldLogger) bool {\n\tstate := armpostgresqlflexibleservers.ServerState(azure.StringVal(server.Properties.State))\n\tswitch state {\n\tcase armpostgresqlflexibleservers.ServerStateReady, armpostgresqlflexibleservers.ServerStateUpdating:\n\t\treturn true\n\tcase armpostgresqlflexibleservers.ServerStateDisabled,\n\t\tarmpostgresqlflexibleservers.ServerStateDropping,\n\t\tarmpostgresqlflexibleservers.ServerStateStarting,\n\t\tarmpostgresqlflexibleservers.ServerStateStopped,\n\t\tarmpostgresqlflexibleservers.ServerStateStopping:\n\t\t// server state is known and it's not available.\n\t\treturn false\n\t}\n\tlog.Warnf(\"Unknown status type: %q. Assuming Azure PostgreSQL Flexible server %q is available.\",\n\t\tstate,\n\t\tazure.StringVal(server.Name))\n\treturn true\n}", "func (client *Client) IsAvailable() bool {\n\tclient.mu.Lock()\n\tdefer client.mu.Unlock()\n\treturn !client.shutdown && !client.closing\n}", "func (s *Slot) IsAvailable() bool {\n\treturn s.Car == nil\n}", "func (disk Disk) IsUsable() bool {\n\t_, err := os.Stat(disk.path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (m *MetricsCacheType) IsAvailable() bool {\n\treturn m.metrics != nil && !m.updating\n}", "func isMSICredsAvailable(config map[string]string) bool {\n\t_, clientIDok := config[blockstorage.AzureClientID]\n\treturn (clientIDok && config[blockstorage.AzureTenantID] == \"\" &&\n\t\tconfig[blockstorage.AzureClientSecret] == \"\")\n}", "func (data *ProjectData) IsDeployable() bool {\n\treturn data.deployable && data.Lang.deployableConfig() != nil\n}", "func (c *Client) IsAvailable() bool {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn !c.shutdown && !c.closing\n}", "func (p *Processor) IsAllowed() bool {\n\t// return false since no startup tasks available for unix platform.\n\treturn false\n}", "func isAppAvailable(t *testing.T, healthCheckEndPoint string) bool {\n\tclient := &http.Client{}\n\tresp, err := client.Get(healthCheckEndPoint)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get a response from health probe: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\treturn resp.StatusCode == http.StatusNoContent\n}", "func (o *InterfaceCapability) HasAvailable() bool {\n\tif o != nil && o.Available != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (*staticSiteDeployer) IsServiceAvailableInRegion(region string) (bool, error) {\n\treturn partitions.IsAvailableInRegion(s3.EndpointsID, region)\n}", "func (rp *ResolverPool) Available() (bool, error) {\n\treturn true, nil\n}", "func (rp *ResolverPool) Available() (bool, error) {\n\treturn true, nil\n}", "func (h *ProxyHealth) IsAvailable() bool {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\treturn h.isAvailable\n}", "func IsActivated() bool {\n\t_, ok := os.LookupEnv(\"GOVENV_ENABLE\")\n\treturn ok\n}", "func IsLoaded(name string) (bool, error) {\n\treturn false, ErrApparmorUnsupported\n}", "func (o *InterfaceCapability) GetAvailable() bool {\n\tif o == nil || o.Available == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Available\n}", "func (p *ProbeManager) KernelConfigAvailable() bool {\n\t// Check Kernel Config is available or not.\n\t// We are replicating BPFTools logic here to check if kernel config is available\n\t// https://elixir.bootlin.com/linux/v5.7/source/tools/bpf/bpftool/feature.c#L390\n\tinfo := unix.Utsname{}\n\terr := unix.Uname(&info)\n\tif err != nil {\n\t\treturn false\n\t}\n\trelease := strings.TrimSpace(string(bytes.Trim(info.Release[:], \"\\x00\")))\n\n\t// Any error checking these files will return Kernel config not found error\n\tif _, err := os.Stat(fmt.Sprintf(\"/boot/config-%s\", release)); err != nil {\n\t\tif _, err = os.Stat(\"/proc/config.gz\"); err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (_OracleMgr *OracleMgrCallerSession) HasAvailability() (bool, error) {\n\treturn _OracleMgr.Contract.HasAvailability(&_OracleMgr.CallOpts)\n}", "func (s *GitLabSourceStatus) IsReady() bool {\n\treturn gitLabSourceCondSet.Manage(s).IsHappy()\n}", "func (k LocalClient) IsPresent() bool {\n\t_, err := k.LookPath()\n\treturn err == nil\n}", "func IsInProduction() bool {\n\treturn runtime.GOOS == \"linux\"\n}", "func (s *Service) IsSupported() bool {\n\tfileExists := s.d.isFileExists(\"/data/local/tmp/minicap\")\n\tif !fileExists {\n\t\terr := s.Install()\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\tout, err := s.d.shell(\"LD_LIBRARY_PATH=/data/local/tmp /data/local/tmp/minicap -i\")\n\tif err != nil {\n\t\treturn false\n\t}\n\tsupported := strings.Contains(out, \"height\") && strings.Contains(out, \"width\")\n\treturn supported\n}", "func (client *ClientImpl) CheckAvailability(ctx context.Context, args CheckAvailabilityArgs) error {\n\tlocationId, _ := uuid.Parse(\"97c893cc-e861-4ef4-8c43-9bad4a963dee\")\n\t_, err := client.Client.Send(ctx, http.MethodGet, locationId, \"6.0-preview.1\", nil, nil, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func IsKubectlAvailable() (bool, error) {\n\n\t_, err := executeCommandFunc(\"kubectl\", []string{})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func ServiceAvailable(ctx *Context, url string, timeout time.Duration) bool {\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url)\n\n\tclient := &http.Client{Timeout: timeout}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\tLog(ERROR, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\treturn false\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"code\", resp.StatusCode, \"available\", false)\n\t\treturn false\n\t}\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"available\", true)\n\treturn true\n}", "func (f *Facility) Available(t int) bool {\n\treturn t >= f.BuildAfter && f.BuildAfter >= 0\n}", "func (m *Manager) IsAvailableResource(item interface{}) bool {\n\tif v, ok := item.(BinConfig); ok {\n\t\treturn utils.FileExists(v.Path) || utils.DirectoryExists(v.Path)\n\t}\n\tif v, ok := item.(WeightConfig); ok {\n\t\treturn utils.FileExists(v.Path)\n\t}\n\tif v, ok := item.(ConfigConfig); ok {\n\t\treturn utils.FileExists(v.Path)\n\t}\n\treturn false\n}", "func (p *AbstractRunProvider) IsRunning() bool {\n\treturn p.running\n}", "func IsAvaliable() (ava bool) {\n\n\tcpuRate, memRate, err := common.GetSystemUsageRate()\n\tif err != nil {\n\t\tlog.DefaultLogger.Errorf(model.AlertDumpKey + \" failed to get system usage rate info.\")\n\t\treturn false\n\t}\n\n\tif cpuRate < DumpCpuMaxRate && memRate < DumpMemMaxRate {\n\t\tif log.DefaultLogger.GetLogLevel() >= log.DEBUG {\n\t\t\tlog.DefaultLogger.Debugf(\"%s cpuRate:%s is less than max rate %s, memRate:%s is less than max rate %s\", model.LogDumpKey, cpuRate, memRate, DumpCpuMaxRate, DumpMemMaxRate)\n\t\t}\n\t\treturn true\n\t}\n\n\tif log.DefaultLogger.GetLogLevel() >= log.DEBUG {\n\t\tlog.DefaultLogger.Debugf(\"%s cpuRate:%s, memRate:%s, one or both of them are larger than max rate. Max cpu rate %s. Max mem rate %s\", model.LogDumpKey, cpuRate, memRate, DumpCpuMaxRate, DumpMemMaxRate)\n\t}\n\treturn false\n}", "func (c *Carton) Available() bool {\n\tfor _, box := range *c.Boxes {\n\t\tif box.Available() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (c *Module) IsInstallableToApex() bool {\n\tif shared, ok := c.linker.(interface {\n\t\tshared() bool\n\t}); ok {\n\t\t// Stub libs and prebuilt libs in a versioned SDK are not\n\t\t// installable to APEX even though they are shared libs.\n\t\treturn shared.shared() && !c.IsStubs() && c.ContainingSdk().Unversioned()\n\t} else if _, ok := c.linker.(testPerSrc); ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func Supports() bool {\n\tif htesting.SupportsAll() {\n\t\treturn true\n\t}\n\treturn getRstExecPath() != \"\"\n}", "func IsUnavailable(err error) bool {\r\n\tvar t Unavailable\r\n\treturn errors.As(err, &t)\r\n}", "func IsSupported(algorithmID AlgorithmID) bool {\n\tvar supportedAlgorithms = map[AlgorithmID]bool{\n\t\tCiota: true,\n\t\tSnow: true,\n\t}\n\treturn supportedAlgorithms[algorithmID]\n}", "func IsUnavailable(err error) bool {\n\treturn errors.Is(err, ErrUnavailable)\n}", "func (jbobject *JavaIoInputStream) Available() (int, error) {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"available\", javabind.Int)\n\tif err != nil {\n\t\tvar zero int\n\t\treturn zero, err\n\t}\n\treturn jret.(int), nil\n}", "func IsReady(name string, timing ...time.Duration) feature.StepFn {\n\treturn k8s.IsReady(GVR(), name, timing...)\n}", "func IsProd() bool {\n\tprod := os.Getenv(\"KUBERNETES\")\n\tif prod == \"true\" {\n\t\tlog.Println(\"truuue!\")\n\t\treturn true\n\t}\n\tlog.Println(\"false!!!!!!!!!\")\n\treturn false\n}", "func (r *Reddit) IsAvailable(name string) (bool, error) {\n\treturn fetch.IsNotFound(r.client, fmt.Sprintf(\"https://www.reddit.com/user/%s\", url.QueryEscape(name)))\n}", "func (c *Config) Available(file ...string) bool {\n\tvar name string\n\tif len(file) > 0 && file[0] != \"\" {\n\t\tname = file[0]\n\t} else {\n\t\tname = c.defaultName\n\t}\n\tif path, _ := c.GetFilePath(name); path != \"\" {\n\t\treturn true\n\t}\n\tif GetContent(name) != \"\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func (g *Game) isLocoAvailableForDevelopment(loco *Loco) bool {\n\tif loco.Obsolete {\n\t\treturn false\n\t}\n\t// if there's no initial order or existing order, loco can't\n\t// be developed.\n\thasPips := loco.InitialOrders.Pips != 0\n\tfor _, d := range loco.ExistingOrders {\n\t\thasPips = hasPips || d.Pips != 0\n\t}\n\tif !hasPips {\n\t\treturn false\n\t}\n\t// from here on, we can develop the loco unless the player can't.\n\tp := g.getCurrentPlayer()\n\tif loco.DevelopmentCost > p.Money {\n\t\treturn false\n\t}\n\tfor _, f := range p.Factories {\n\t\tif f.Key == loco.Key {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (r *SkaffoldRunner) HasDeployed() bool {\n\treturn r.hasDeployed\n}", "func (obj *ECDSCluster) IsReady() bool {\n\treturn obj.Status.ActualState == StateDeployed\n}", "func (_OracleMgr *OracleMgrCaller) HasAvailability(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _OracleMgr.contract.Call(opts, out, \"_hasAvailability\")\n\treturn *ret0, err\n}", "func (r *runnerMachinesCoordinator) getAvailableMachine() bool {\n\tr.availableLock.Lock()\n\tdefer r.availableLock.Unlock()\n\n\tif r.available == 0 {\n\t\treturn false\n\t}\n\n\tr.available--\n\treturn true\n}", "func (_m *IManagerHelper) IsCommandAvailable(cmd string) bool {\n\tret := _m.Called(cmd)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(string) bool); ok {\n\t\tr0 = rf(cmd)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\treturn r0\n}", "func IsAvailable(port int) bool {\n\tif port < minTCPPort || port > maxTCPPort {\n\t\treturn false\n\t}\n\tconn, err := net.Listen(\"tcp\", fmt.Sprintf(\"127.0.0.1:%d\", port))\n\tif err != nil {\n\t\treturn false\n\t}\n\tconn.Close()\n\treturn true\n}", "func (mr *MockProjectManagerServiceClientMockRecorder) IsProjectAvailable(ctx, in interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, in}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsProjectAvailable\", reflect.TypeOf((*MockProjectManagerServiceClient)(nil).IsProjectAvailable), varargs...)\n}", "func (r Virtual_ReservedCapacityGroup_Instance) GetAvailableFlag() (resp bool, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_ReservedCapacityGroup_Instance\", \"getAvailableFlag\", nil, &r.Options, &resp)\n\treturn\n}", "func (_Cakevault *CakevaultCallerSession) Available() (*big.Int, error) {\n\treturn _Cakevault.Contract.Available(&_Cakevault.CallOpts)\n}", "func sdkIsSupported(goversion string) bool {\n\tfor _, e := range sdk.supportedList {\n\t\tif e == goversion {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (m *Machine) SupportsCheckingIsBootstrapped() bool {\n\t// Right now, we can only check if bootstrapping has\n\t// completed if we are using a bootstrapper that allows\n\t// for us to inject ssh keys into the guest.\n\n\tif m.sshKeys != nil {\n\t\treturn m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey)\n\t}\n\treturn false\n}", "func Supported() bool {\n\treturn true\n}", "func IsLive() bool {\n\treturn config.GetString(\"name\") == ENV_PRODUCTION\n}", "func (p *preImpl) checkUsable(ctx context.Context, pkgs map[string]struct{}) error {\n\tctx, st := timing.Start(ctx, \"check_arc\")\n\tdefer st.End()\n\n\tctx, cancel := context.WithTimeout(ctx, 5*time.Second)\n\tdefer cancel()\n\n\t// Check that the init process is the same as before. Otherwise, ARC was probably restarted.\n\tif pid, err := InitPID(); err != nil {\n\t\treturn err\n\t} else if pid != p.origInitPID {\n\t\treturn errors.Errorf(\"init process changed from %v to %v; probably crashed\", p.origInitPID, pid)\n\t}\n\n\t// Check that the package manager service is running.\n\tconst pkg = \"android\"\n\tif _, ok := pkgs[pkg]; !ok {\n\t\treturn errors.Errorf(\"pm didn't list %q among %d package(s)\", pkg, len(pkgs))\n\t}\n\n\t// TODO(nya): Should we also check that p.cr is still usable?\n\treturn nil\n}", "func IsTokenAvailable(token string) bool {\n\tdefer mutex.Unlock()\n\tmutex.Lock()\n\n\t_, ok := tokenAvailable[token]\n\treturn !ok\n}", "func (s *azureMachineService) isAvailabilityZoneSupported() bool {\n\tazSupported := false\n\n\tfor _, supportedLocation := range azure.SupportedAvailabilityZoneLocations {\n\t\tif s.machineScope.Location() == supportedLocation {\n\t\t\tazSupported = true\n\n\t\t\treturn azSupported\n\t\t}\n\t}\n\n\ts.machineScope.V(2).Info(\"Availability Zones are not supported in the selected location\", \"location\", s.machineScope.Location())\n\treturn azSupported\n}" ]
[ "0.71369237", "0.7105502", "0.70088315", "0.6875512", "0.67455816", "0.66983813", "0.6655391", "0.6622176", "0.6427865", "0.64022094", "0.6388223", "0.63691276", "0.6366663", "0.63461685", "0.6302954", "0.62916225", "0.6270563", "0.6238505", "0.62135977", "0.6200425", "0.61904687", "0.6123658", "0.607128", "0.6071006", "0.6054613", "0.60500187", "0.60482997", "0.6041337", "0.6041337", "0.6020341", "0.60203123", "0.6014847", "0.5978197", "0.59728396", "0.59547687", "0.5935846", "0.5935187", "0.5930028", "0.59125954", "0.5908195", "0.5903099", "0.58845466", "0.5853084", "0.5824974", "0.58112085", "0.5791849", "0.5787094", "0.5782903", "0.57658726", "0.57632565", "0.57617056", "0.57360244", "0.5718939", "0.5716567", "0.5716567", "0.57162213", "0.56917214", "0.5684639", "0.5678304", "0.5661581", "0.5648841", "0.5636029", "0.5603416", "0.5596916", "0.55950665", "0.5582552", "0.5554805", "0.5544828", "0.5544238", "0.55366063", "0.5527071", "0.5525835", "0.5520767", "0.5516907", "0.5506508", "0.54984766", "0.54917634", "0.5474449", "0.54727757", "0.5466611", "0.5460624", "0.54589075", "0.5455494", "0.54356134", "0.5432979", "0.54260415", "0.54139835", "0.540913", "0.54044265", "0.5397982", "0.53653777", "0.5362969", "0.53565896", "0.5352426", "0.5350094", "0.53493935", "0.534791", "0.53313476", "0.5324618", "0.5323898" ]
0.8841041
0
ReadResponse reads a server response into the received o.
func (o *DeleteInstanceStackV4Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewDeleteInstanceStackV4OK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *ResourceHandler) ReadResponse(dataOut unsafe.Pointer, bytesToRead int32, bytesRead *int32, callback *Callback) int32 {\n\treturn lookupResourceHandlerProxy(d.Base()).ReadResponse(d, dataOut, bytesToRead, bytesRead, callback)\n}", "func (o *GetServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *InteractionBindReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInteractionBindOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewInteractionBindNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewInteractionBindInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *InteractionUnbindReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInteractionUnbindOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewInteractionUnbindNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewInteractionUnbindInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (r *ResponseReader) ReadResponse(req *Request) (res *Response, err error) {\n\tres = CreateEmptyResponse(req)\n\t_, err = readFirstLine(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = readHeaders(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t_, err = readBodyContent(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn res, nil\n}", "func (c *Conn) ReadResponse(rmsg *Response) error {\n\tdata, err := c.ReadDataUnit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcolor.Printf(\"@{c}<!-- RESPONSE -->\\n%s\\n\\n\", string(data))\n\terr = xml.Unmarshal(data, rmsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// color.Fprintf(os.Stderr, \"@{y}%s\\n\", spew.Sprintf(\"%+v\", msg))\n\tif len(rmsg.Results) != 0 {\n\t\tr := rmsg.Results[0]\n\t\tif r.IsError() {\n\t\t\treturn r\n\t\t}\n\t}\n\treturn nil\n}", "func (o *VerifyConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewVerifyConnectionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetAvailableReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetAvailableOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ClosePositionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewClosePositionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewClosePositionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 401:\n\t\tresult := NewClosePositionUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewClosePositionNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 405:\n\t\tresult := NewClosePositionMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *DescribeServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDescribeServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewDescribeServerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewDescribeServerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 409:\n\t\tresult := NewDescribeServerConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewDescribeServerInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetServerSessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetServerSessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetServerSessionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewGetServerSessionUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetServerSessionNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetServerSessionInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /dsmcontroller/namespaces/{namespace}/servers/{podName}/session returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *StartReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewStartOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (resp *PharosResponse) readResponse() {\n\tif !resp.hasBeenRead && resp.Response != nil && resp.Response.Body != nil {\n\t\tresp.data, resp.Error = ioutil.ReadAll(resp.Response.Body)\n\t\tresp.Response.Body.Close()\n\t\tresp.hasBeenRead = true\n\t}\n}", "func (o *HelloWorldReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHelloWorldOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewHelloWorldBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewHelloWorldInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (reader *BasicRpcReader) ReadResponse(r io.Reader, method string, requestID int32, resp proto.Message) error {\n\trrh := &hadoop.RpcResponseHeaderProto{}\n\terr := readRPCPacket(r, rrh, resp)\n\tif err != nil {\n\t\treturn err\n\t} else if int32(rrh.GetCallId()) != requestID {\n\t\treturn errors.New(\"unexpected sequence number\")\n\t} else if rrh.GetStatus() != hadoop.RpcResponseHeaderProto_SUCCESS {\n\t\treturn &NamenodeError{\n\t\t\tmethod: method,\n\t\t\tmessage: rrh.GetErrorMsg(),\n\t\t\tcode: int(rrh.GetErrorDetail()),\n\t\t\texception: rrh.GetExceptionClassName(),\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *UpdateAntivirusServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewUpdateAntivirusServerNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewUpdateAntivirusServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *HasEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHasEventsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewHasEventsUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewHasEventsForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetV2Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetV2OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetV2InternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SaveReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewSaveNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSaveInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *TestWriteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewTestWriteOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewTestWriteUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *AllConnectionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewAllConnectionsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewAllConnectionsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewAllConnectionsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SendDataToDeviceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSendDataToDeviceOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewSendDataToDeviceBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewSendDataToDeviceInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *HealthNoopReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHealthNoopOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PutOutOfRotationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewPutOutOfRotationNoContent()\n\t\tresult.HttpResponse = response\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\terrorResult := kbcommon.NewKillbillError(response.Code())\n\t\tif err := consumer.Consume(response.Body(), &errorResult); err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errorResult\n\t}\n}", "func (o *GetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *StatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewStatusOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewStatusUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewStatusForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ReplaceServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewReplaceServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewReplaceServerAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewReplaceServerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewReplaceServerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewReplaceServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func ReadResponse(r *bfe_bufio.Reader, req *Request) (*Response, error) {\n\ttp := textproto.NewReader(r)\n\tresp := &Response{\n\t\tRequest: req,\n\t}\n\n\t// Parse the first line of the response.\n\tline, err := tp.ReadLine()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\treturn nil, err\n\t}\n\tf := strings.SplitN(line, \" \", 3)\n\tif len(f) < 2 {\n\t\treturn nil, &badStringError{\"malformed HTTP response\", line}\n\t}\n\treasonPhrase := \"\"\n\tif len(f) > 2 {\n\t\treasonPhrase = f[2]\n\t}\n\tresp.Status = f[1] + \" \" + reasonPhrase\n\tresp.StatusCode, err = strconv.Atoi(f[1])\n\tif err != nil {\n\t\treturn nil, &badStringError{\"malformed HTTP status code\", f[1]}\n\t}\n\n\tresp.Proto = f[0]\n\tvar ok bool\n\tif resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {\n\t\treturn nil, &badStringError{\"malformed HTTP version\", resp.Proto}\n\t}\n\n\t// Parse the response headers.\n\tmimeHeader, err := tp.ReadMIMEHeader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp.Header = Header(mimeHeader)\n\n\tfixPragmaCacheControl(resp.Header)\n\n\terr = readTransfer(resp, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (o *PostChatroomsChannelHashReadReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostChatroomsChannelHashReadOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 403:\n\t\tresult := NewPostChatroomsChannelHashReadForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *TogglePacketGeneratorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewTogglePacketGeneratorsCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *FrontPutBinaryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewFrontPutBinaryOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SystemPingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSystemPingOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSystemPingInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SendDummyAlertReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSendDummyAlertOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewSendDummyAlertBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewSendDummyAlertNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetViewsConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetViewsConnectionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetViewsConnectionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *SyncCopyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSyncCopyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewSyncCopyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostPatientsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostPatientsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewPostPatientsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 405:\n\t\tresult := NewPostPatientsMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (c *Conn) readResponse(res *response_) error {\n\terr := c.readDataUnit()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = IgnoreEOF(scanResponse.Scan(c.decoder, res))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.Result.IsError() {\n\t\treturn res.Result\n\t}\n\treturn nil\n}", "func (o *AllConnectionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n switch response.Code() {\n \n case 200:\n result := NewAllConnectionsOK()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return result, nil\n \n case 400:\n result := NewAllConnectionsBadRequest()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return nil, result\n \n case 404:\n result := NewAllConnectionsNotFound()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return nil, result\n \n default:\n return nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n }\n}", "func (o *GetMsgVpnReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetMsgVpnOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewGetMsgVpnDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (r *Response) Read(p []byte) (n int, err error) {\n\n\tif r.Error != nil {\n\t\treturn -1, r.Error\n\t}\n\n\treturn r.RawResponse.Body.Read(p)\n}", "func (o *PostPciLinksMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostPciLinksMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostPciLinksMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *THSRAPIODFare2121Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewTHSRAPIODFare2121OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 299:\n\t\tresult := NewTHSRAPIODFare2121Status299()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 304:\n\t\tresult := NewTHSRAPIODFare2121NotModified()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PostGatewayConnectNetaddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewPostGatewayConnectNetaddressNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostGatewayConnectNetaddressDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *DNSGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDNSGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewDNSGetDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetGreetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetGreetStatusOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostAPIV2EventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostAPIV2EventsNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostAPIV2EventsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPostAPIV2EventsForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *CreateAntivirusServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewCreateAntivirusServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewCreateAntivirusServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostCarsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostCarsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 405:\n\t\tresult := NewPostCarsMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *LogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewLogOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewLogNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ChatGetConnectedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewChatGetConnectedOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewChatGetConnectedBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 401:\n\t\tresult := NewChatGetConnectedUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewChatGetConnectedNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *WebModifyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewWebModifyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewWebModifyAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewWebModifyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetHyperflexServerModelsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetHyperflexServerModelsMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetHyperflexServerModelsMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewGetHyperflexServerModelsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *KillQueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewKillQueryNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewKillQueryBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewKillQueryNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 422:\n\t\tresult := NewKillQueryUnprocessableEntity()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetProgressionViewReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetProgressionViewOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetProgressionViewBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *UpdateRackTopoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUpdateRackTopoOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUpdateRackTopoBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *UtilTestReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUtilTestOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetByUIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetByUIDOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetByUIDNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetMeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetMeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetMeDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *Delete1Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewDelete1NoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDelete1NotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *RevokeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewRevokeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewRevokeUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewRevokeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostGatewayDisconnectNetaddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewPostGatewayDisconnectNetaddressNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostGatewayDisconnectNetaddressDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetProtocolsUsingGETReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetProtocolsUsingGETOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *DestroySessionUsingPOSTReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDestroySessionUsingPOSTOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *CompleteTransactionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewCompleteTransactionNoContent()\n\t\tresult.HttpResponse = response\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\terrorResult := kbcommon.NewKillbillError(response.Code())\n\t\tif err := consumer.Consume(response.Body(), &errorResult); err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errorResult\n\t}\n}", "func (o *GetMapNameEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetMapNameEventsOK(o.writer)\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetMapNameEventsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *RecoveryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewRecoveryOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewRecoveryInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetPeersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetPeersOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 403:\n\t\tresult := NewGetPeersForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *InstallEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInstallEventsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SetMemoRequiredReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSetMemoRequiredOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewSetMemoRequiredBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewSetMemoRequiredInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UpdateRackTopoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUpdateRackTopoOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUpdateRackTopoBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewUpdateRackTopoNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewUpdateRackTopoInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetVoicesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetVoicesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PatchHyperflexServerModelsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPatchHyperflexServerModelsMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPatchHyperflexServerModelsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *BounceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tresult := NewBounceDefault(response.Code())\n\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\treturn nil, err\n\t}\n\tif response.Code()/100 == 2 {\n\t\treturn result, nil\n\t}\n\treturn nil, result\n}", "func (o *PostHyperflexHxdpVersionsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostHyperflexHxdpVersionsMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostHyperflexHxdpVersionsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetObmsLibraryIdentifierReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetObmsLibraryIdentifierOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewGetObmsLibraryIdentifierNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewGetObmsLibraryIdentifierDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *DeleteApplianceRestoresMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteApplianceRestoresMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDeleteApplianceRestoresMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewDeleteApplianceRestoresMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *UserQuerySessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUserQuerySessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUserQuerySessionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewUserQuerySessionInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /sessionbrowser/namespaces/{namespace}/gamesession returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *GetDiscoverReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetDiscoverOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (r *overwriteConsumerReader) ReadResponse(resp runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tif r.forStatusCode == ForAllStatusCodes || resp.Code() == r.forStatusCode {\n\t\treturn r.requestReader.ReadResponse(resp, r.consumer)\n\t}\n\n\treturn r.requestReader.ReadResponse(resp, consumer)\n}", "func (o *UnclaimTrafficFilterLinkIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUnclaimTrafficFilterLinkIDOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUnclaimTrafficFilterLinkIDBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewUnclaimTrafficFilterLinkIDInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetDebugRequestReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetDebugRequestOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewGetDebugRequestNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ChangeaspecificSpeedDialReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewChangeaspecificSpeedDialNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostMemoryArraysMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostMemoryArraysMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostMemoryArraysMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (c *Client) readResponse(conn net.Conn) ([]byte, error) {\n\tif c.Timeout > 0 {\n\t\t_ = conn.SetReadDeadline(time.Now().Add(c.Timeout))\n\t}\n\n\tproto := \"udp\"\n\tif _, ok := conn.(*net.TCPConn); ok {\n\t\tproto = \"tcp\"\n\t}\n\n\tif proto == \"udp\" {\n\t\tbufSize := c.UDPSize\n\t\tif bufSize == 0 {\n\t\t\tbufSize = dns.MinMsgSize\n\t\t}\n\t\tresponse := make([]byte, bufSize)\n\t\tn, err := conn.Read(response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn response[:n], nil\n\t}\n\n\t// If we got here, this is a TCP connection\n\t// so we should read a 2-byte prefix first\n\treturn readPrefixed(conn)\n}", "func (o *PayReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPayOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPayBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewPayNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 409:\n\t\tresult := NewPayConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested POST /platform/public/namespaces/{namespace}/payment/orders/{paymentOrderNo}/pay returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *CountReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewCountOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewCountBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostNodesIdentifierObmIdentifyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 201:\n\t\tresult := NewPostNodesIdentifierObmIdentifyCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewPostNodesIdentifierObmIdentifyNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewPostNodesIdentifierObmIdentifyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetInterpreterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetInterpreterOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetInterpreterNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *DeleteEventsEventIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewDeleteEventsEventIDNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 401:\n\t\tresult := NewDeleteEventsEventIDUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewDeleteEventsEventIDNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UtilityServiceReadyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUtilityServiceReadyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewUtilityServiceReadyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *HTTPGetPersistenceItemDataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHTTPGetPersistenceItemDataOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewHTTPGetPersistenceItemDataNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SubscriptionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSubscriptionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PostEquipmentIoExpandersMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostEquipmentIoExpandersMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostEquipmentIoExpandersMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *FrontSessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewFrontSessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (pr *PushedRequest) ReadResponse(ctx context.Context) (*http.Response, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\tpr.Cancel()\n\t\tpr.pushedStream.bufPipe.CloseWithError(ctx.Err())\n\t\treturn nil, ctx.Err()\n\tcase <-pr.pushedStream.peerReset:\n\t\treturn nil, pr.pushedStream.resetErr\n\tcase resErr := <-pr.pushedStream.resc:\n\t\tif resErr.err != nil {\n\t\t\tfmt.Println(resErr.err.Error())\n\t\t\tpr.Cancel()\n\t\t\tpr.pushedStream.bufPipe.CloseWithError(resErr.err)\n\t\t\treturn nil, resErr.err\n\t\t}\n\t\tresErr.res.Request = pr.Promise\n\t\tresErr.res.TLS = pr.pushedStream.cc.tlsState\n\t\treturn resErr.res, resErr.err\n\t}\n}", "func (o *GetZippedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tresult := NewGetZippedDefault(response.Code())\n\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\treturn nil, err\n\t}\n\tif response.Code()/100 == 2 {\n\t\treturn result, nil\n\t}\n\treturn nil, result\n}", "func (o *DeleteFirmwareUpgradesMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteFirmwareUpgradesMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDeleteFirmwareUpgradesMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewDeleteFirmwareUpgradesMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetEtherPhysicalPortsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetEtherPhysicalPortsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetEtherPhysicalPortsDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *ZoneStreamReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewZoneStreamOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ByNamespaceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewByNamespaceOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewByNamespaceNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetRequestTrackerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetRequestTrackerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 403:\n\t\tresult := NewGetRequestTrackerForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewGetRequestTrackerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}" ]
[ "0.7640225", "0.7607834", "0.75210214", "0.7509121", "0.74803215", "0.74724656", "0.7433606", "0.74244606", "0.7375357", "0.7367311", "0.73589337", "0.73551553", "0.7350114", "0.7347274", "0.7346054", "0.733966", "0.7336042", "0.73239547", "0.7315819", "0.73155594", "0.7310195", "0.730769", "0.72904205", "0.7287086", "0.72826135", "0.72742283", "0.7274111", "0.72655845", "0.726384", "0.7262403", "0.7255057", "0.72496617", "0.72492456", "0.72479755", "0.72409135", "0.7224629", "0.722366", "0.7219326", "0.7216009", "0.72122216", "0.72110355", "0.72099286", "0.7209348", "0.72004783", "0.71978456", "0.719778", "0.71926844", "0.7177653", "0.71745974", "0.71737057", "0.716626", "0.7155474", "0.71500206", "0.7149536", "0.7148374", "0.7143972", "0.7143686", "0.7141745", "0.71397567", "0.713703", "0.7136677", "0.7136661", "0.7135863", "0.7135147", "0.71337897", "0.71312535", "0.7124361", "0.7123878", "0.71200526", "0.7120036", "0.7119569", "0.71148854", "0.7104891", "0.7100936", "0.70989054", "0.70989", "0.70984536", "0.70977753", "0.709657", "0.70961034", "0.70941985", "0.70932794", "0.70886916", "0.70850074", "0.7083912", "0.7080819", "0.7078785", "0.70775825", "0.70765215", "0.7076268", "0.7070042", "0.70699906", "0.7068155", "0.7068122", "0.7066828", "0.70625323", "0.70621973", "0.70599294", "0.70577264", "0.7054454", "0.70509636" ]
0.0
-1
NewDeleteInstanceStackV4OK creates a DeleteInstanceStackV4OK with default headers values
func NewDeleteInstanceStackV4OK() *DeleteInstanceStackV4OK { return &DeleteInstanceStackV4OK{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func deleteStack(name string) error {\n\tfmt.Printf(\"DEBUG:: deleting stack %v\\n\", name)\n\tcfg, err := external.LoadDefaultAWSConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsvc := cloudformation.New(cfg)\n\tdsreq := svc.DeleteStackRequest(&cloudformation.DeleteStackInput{StackName: aws.String(name)})\n\t_, err = dsreq.Send(context.TODO())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Delete(ctx context.Context, serviceName string, logger *zerolog.Logger) error {\n\tawsConfig, awsConfigErr := spartaAWS.NewConfig(ctx, logger)\n\tif awsConfigErr != nil {\n\t\treturn awsConfigErr\n\t}\n\tawsCloudFormation := awsv2CF.NewFromConfig(awsConfig)\n\n\texists, err := spartaCF.StackExists(ctx, serviceName, awsConfig, logger)\n\tif nil != err {\n\t\treturn err\n\t}\n\tlogger.Info().\n\t\tBool(\"Exists\", exists).\n\t\tStr(\"Name\", serviceName).\n\t\tMsg(\"Stack existence check\")\n\n\tif exists {\n\n\t\tparams := &awsv2CF.DeleteStackInput{\n\t\t\tStackName: awsv2.String(serviceName),\n\t\t}\n\t\tresp, err := awsCloudFormation.DeleteStack(ctx, params)\n\t\tif nil != resp {\n\t\t\tlogger.Info().\n\t\t\t\tInterface(\"Response\", resp).\n\t\t\t\tMsg(\"Delete request submitted\")\n\t\t}\n\t\treturn err\n\t}\n\tlogger.Info().Msg(\"Stack does not exist\")\n\treturn nil\n}", "func (y *YogClient) DeleteStack(request DeleteStackRequest) (DeleteStackResponse, YogError) {\n\tret := DeleteStackResponse{}\n\n\treturn ret, YogError{}\n}", "func (c *Client) DeleteStack(stackSlug string) error {\n\treturn c.request(\"DELETE\", fmt.Sprintf(\"/api/instances/%s\", stackSlug), nil, nil, nil)\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeGlobalHellos(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:hellos/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeInterfaceAttributesInterfaceHellos(ctx context.Context, name string, interfaceId string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:interface-attributes/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:hellos/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (s *Cloudformation) DeleteStack() (err error) {\n\tsess := s.config.AWSSession\n\tsvc := cloudformation.New(sess)\n\n\tstackInputs := cloudformation.DeleteStackInput{}\n\tstackInputs.SetStackName(s.StackName())\n\n\t_, err = svc.DeleteStack(&stackInputs)\n\treturn\n}", "func NewDelete(appName string) *commander.CommandWrapper {\n\treturn &commander.CommandWrapper{\n\t\tHandler: &Delete{},\n\t\tHelp: &commander.CommandDescriptor{\n\t\t\tName: \"delete\",\n\t\t\tShortDescription: \"Delete a server.\",\n\t\t\tLongDescription: `Delete a server will destroy the world and container and the version file.`,\n\t\t\tArguments: \"name\",\n\t\t\tExamples: []string{\"\", \"my_server\"},\n\t\t},\n\t}\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelHelloAuthenticationKeychain(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:hello-authentication/frinx-openconfig-network-instance:keychain/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTe(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupTransport(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:transport/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelHelloAuthentication(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:hello-authentication/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceVlansVlan(ctx context.Context, name string, vlanId int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:vlans/frinx-openconfig-network-instance:vlan/{vlan-id}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"vlan-id\"+\"}\", fmt.Sprintf(\"%v\", vlanId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeInterfaceAttributesInterfaceHellosConfig(ctx context.Context, name string, interfaceId string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:interface-attributes/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:hellos/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeGlobalSoftPreemption(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:soft-preemption/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeGlobalHellosConfig(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:hellos/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelHelloAuthenticationKey(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:hello-authentication/frinx-openconfig-network-instance:key/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeInterfaceAttributes(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:interface-attributes/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelPacketCountersEsh(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:packet-counters/frinx-openconfig-network-instance:esh/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceVlans(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:vlans/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeInterfaceAttributesInterfaceProtection(ctx context.Context, name string, interfaceId string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:interface-attributes/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:protection/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func newDeleteCmd(clientset *client.ConfigSet) *cobra.Command {\n\tvar file string\n\tdeleteCmd := &cobra.Command{\n\t\tUse: \"delete\",\n\t\tShort: \"Delete knative resource\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\ts.Namespace = client.Namespace\n\t\t\tif err := s.DeleteYAML(file, args, concurrency, clientset); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t},\n\t}\n\n\tdeleteCmd.Flags().StringVarP(&file, \"file\", \"f\", \"serverless.yaml\", \"Delete functions defined in yaml\")\n\tdeleteCmd.Flags().IntVarP(&concurrency, \"concurrency\", \"c\", 3, \"Number of concurrent deletion threads\")\n\tdeleteCmd.AddCommand(cmdDeleteConfiguration(clientset))\n\tdeleteCmd.AddCommand(cmdDeleteRevision(clientset))\n\tdeleteCmd.AddCommand(cmdDeleteService(clientset))\n\tdeleteCmd.AddCommand(cmdDeleteRoute(clientset))\n\tdeleteCmd.AddCommand(cmdDeleteChannel(clientset))\n\tdeleteCmd.AddCommand(cmdDeleteTask(clientset))\n\tdeleteCmd.AddCommand(cmdDeleteTaskRun(clientset))\n\tdeleteCmd.AddCommand(cmdDeletePipelineResource(clientset))\n\n\treturn deleteCmd\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisLevelsLevelAuthenticationKeychain(ctx context.Context, name string, identifier string, protocolName string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:authentication/frinx-openconfig-network-instance:keychain/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func deleteInstance(params martini.Params, r render.Render) {\n\tinstanceName := params[\"name\"]\n\n\tif !instanceExists(instanceName) {\n\t\tfmt.Println(\"Instance with specified name does not exist in provision table\")\n\t\tr.Text(400, \"Bad Request\")\n\t\treturn\n\t}\n\n\tsvc := neptune.New(session.New(&aws.Config{\n\t\tRegion: aws.String(os.Getenv(\"REGION\")),\n\t}))\n\n\tinstanceParamsDelete := &neptune.DeleteDBInstanceInput{\n\t\tDBInstanceIdentifier: aws.String(instanceName),\n\t\tSkipFinalSnapshot: aws.Bool(true),\n\t}\n\n\tclusterParamsDelete := &neptune.DeleteDBClusterInput{\n\t\tDBClusterIdentifier: aws.String(instanceName),\n\t\tSkipFinalSnapshot: aws.Bool(true),\n\t}\n\n\tinstanceResp, instanceErr := svc.DeleteDBInstance(instanceParamsDelete)\n\tname := *instanceParamsDelete.DBInstanceIdentifier\n\tif instanceErr != nil {\n\t\tfmt.Println(instanceErr.Error())\n\t\toutput500Error(r, instanceErr)\n\t\treturn\n\t}\n\tfmt.Println(\"Deletion in progress for instance \" + *instanceResp.DBInstance.DBInstanceIdentifier)\n\n\tclusterResp, clusterErr := svc.DeleteDBCluster(clusterParamsDelete)\n\tif clusterErr != nil {\n\t\tfmt.Println(instanceErr.Error())\n\t\toutput500Error(r, clusterErr)\n\t\treturn\n\t}\n\tfmt.Println(\"Deletion in progress for cluster \" + *clusterResp.DBCluster.DBClusterIdentifier)\n\n\t_, err := pool.Exec(\"DELETE FROM provision WHERE name=$1\", name)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\toutput500Error(r, err)\n\t\treturn\n\t}\n\n\tr.JSON(200, map[string]string{\"Response\": \"Instance deletion in progress\"})\n\n\tusername := *instanceParamsDelete.DBInstanceIdentifier\n\tdeleteUserPolicy(username)\n\tdeleteAccessKey(username)\n\tdeleteUser(username)\n\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsLdpTargeted(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:ldp/frinx-openconfig-network-instance:targeted/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgp(ctx context.Context, name string, identifier string, protocolName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func Delete(c *cli.Context) {\n\tprinter.Progress(\"Kombusting\")\n\n\tfileName := c.Args().Get(0)\n\tif fileName == \"\" {\n\t\tprinter.Fatal(\n\t\t\tfmt.Errorf(\"Can't upsert file, no source template provided\"),\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Add the path to the source template file you want to generate like: `kombustion upsert template.yaml`.\",\n\t\t\t),\n\t\t\t\"https://www.kombustion.io/api/manifest/\",\n\t\t)\n\t}\n\n\tmanifestFile := manifest.FindAndLoadManifest()\n\n\tenvironment := c.String(\"environment\")\n\n\tstackName := cloudformation.GetStackName(manifestFile, fileName, environment, c.String(\"stack-name\"))\n\n\tregion := c.String(\"region\")\n\tif region == \"\" {\n\t\t// If no region was provided by the cli flag, check for the default in the manifest\n\t\tif manifestFile.Region != \"\" {\n\t\t\tregion = manifestFile.Region\n\t\t}\n\t}\n\n\ttasks.DeleteStack(\n\t\tstackName,\n\t\tc.GlobalString(\"profile\"),\n\t\tregion,\n\t)\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstance(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeInterfaceAttributesInterfaceAuthentication(ctx context.Context, name string, interfaceId string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:interface-attributes/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:authentication/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func NewDeleteStackActivity(awsSessionFactory AWSFactory) *DeleteStackActivity {\n\treturn &DeleteStackActivity{\n\t\tawsSessionFactory: awsSessionFactory,\n\t}\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocols(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (rm *resourceManager) newDeleteRequestPayload(\n\tr *resource,\n) (*svcsdk.DeleteStageInput, error) {\n\tres := &svcsdk.DeleteStageInput{}\n\n\tif r.ko.Spec.APIID != nil {\n\t\tres.SetApiId(*r.ko.Spec.APIID)\n\t}\n\tif r.ko.Spec.StageName != nil {\n\t\tres.SetStageName(*r.ko.Spec.StageName)\n\t}\n\n\treturn res, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelPacketCountersUnknown(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:packet-counters/frinx-openconfig-network-instance:unknown/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsLspsConstrainedPathTunnelsTunnelP2pTunnelAttributesP2pPrimaryPathP2pPrimaryPathAdminGroups(ctx context.Context, name string, tunnelName string, p2pPrimaryPathName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:lsps/frinx-openconfig-network-instance:constrained-path/frinx-openconfig-network-instance:tunnels/frinx-openconfig-network-instance:tunnel/{tunnel-name}/frinx-openconfig-network-instance:p2p-tunnel-attributes/frinx-openconfig-network-instance:p2p-primary-path/frinx-openconfig-network-instance:p2p-primary-path/{p2p-primary-path-name}/frinx-openconfig-network-instance:admin-groups/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tunnel-name\"+\"}\", fmt.Sprintf(\"%v\", tunnelName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"p2p-primary-path-name\"+\"}\", fmt.Sprintf(\"%v\", p2pPrimaryPathName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeInterfaceAttributesInterfaceProtectionConfig(ctx context.Context, name string, interfaceId string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:interface-attributes/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:protection/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborTransport(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:transport/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelHelloAuthenticationConfig(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:hello-authentication/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeGlobalGracefulRestart(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:graceful-restart/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupLoggingOptions(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:logging-options/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (m *MockCloudformationAPI) DeleteStack(*cloudformation.DeleteStackInput) (*cloudformation.DeleteStackOutput, error) {\n\treturn &cloudformation.DeleteStackOutput{}, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeGlobalSoftPreemptionConfig(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:soft-preemption/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupTransportConfig(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:transport/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroups(ctx context.Context, name string, identifier string, protocolName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMpls(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeGlobal(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:global/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsLspsConstrainedPathTunnelsTunnelP2pTunnelAttributesP2pPrimaryPathP2pPrimaryPathAdminGroupsConfig(ctx context.Context, name string, tunnelName string, p2pPrimaryPathName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:lsps/frinx-openconfig-network-instance:constrained-path/frinx-openconfig-network-instance:tunnels/frinx-openconfig-network-instance:tunnel/{tunnel-name}/frinx-openconfig-network-instance:p2p-tunnel-attributes/frinx-openconfig-network-instance:p2p-primary-path/frinx-openconfig-network-instance:p2p-primary-path/{p2p-primary-path-name}/frinx-openconfig-network-instance:admin-groups/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tunnel-name\"+\"}\", fmt.Sprintf(\"%v\", tunnelName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"p2p-primary-path-name\"+\"}\", fmt.Sprintf(\"%v\", p2pPrimaryPathName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsLdpNeighborsNeighborAuthentication(ctx context.Context, name string, lsrId string, labelSpaceId int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:ldp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{lsr-id}/{label-space-id}/frinx-openconfig-network-instance:authentication/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"lsr-id\"+\"}\", fmt.Sprintf(\"%v\", lsrId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"label-space-id\"+\"}\", fmt.Sprintf(\"%v\", labelSpaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (client *CloudServicesClient) deleteInstancesCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteInstancesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/delete\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.Parameters != nil {\n\t\treturn req, runtime.MarshalAsJSON(req, *options.Parameters)\n\t}\n\treturn req, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeNeighbors(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:neighbors/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeInterfaceAttributesInterfaceAuthenticationConfig(ctx context.Context, name string, interfaceId string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:interface-attributes/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:authentication/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroup(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsTeInterfaceAttributes(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:te-interface-attributes/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (m *Mockapi) DeleteStack(arg0 *cloudformation.DeleteStackInput) (*cloudformation.DeleteStackOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteStack\", arg0)\n\tret0, _ := ret[0].(*cloudformation.DeleteStackOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolStaticRoutesStaticNextHops(ctx context.Context, name string, identifier string, protocolName string, prefix string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:static-routes/frinx-openconfig-network-instance:static/{prefix}/frinx-openconfig-network-instance:next-hops/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"prefix\"+\"}\", fmt.Sprintf(\"%v\", prefix), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelPacketCountersIsh(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:packet-counters/frinx-openconfig-network-instance:ish/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsLspsConstrainedPathTunnelsTunnelP2pTunnelAttributesP2pPrimaryPathP2pPrimaryPathConfig(ctx context.Context, name string, tunnelName string, p2pPrimaryPathName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:lsps/frinx-openconfig-network-instance:constrained-path/frinx-openconfig-network-instance:tunnels/frinx-openconfig-network-instance:tunnel/{tunnel-name}/frinx-openconfig-network-instance:p2p-tunnel-attributes/frinx-openconfig-network-instance:p2p-primary-path/frinx-openconfig-network-instance:p2p-primary-path/{p2p-primary-path-name}/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tunnel-name\"+\"}\", fmt.Sprintf(\"%v\", tunnelName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"p2p-primary-path-name\"+\"}\", fmt.Sprintf(\"%v\", p2pPrimaryPathName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisLevelsLevelAuthentication(ctx context.Context, name string, identifier string, protocolName string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:authentication/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) {\n\treturn file_myshoes_proto_rawDescGZIP(), []int{2}\n}", "func NewDeleteHost(rt *runtime.Runtime) operations.DeleteHostHandler {\n\treturn &deleteHost{rt: rt}\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsLspsConstrainedPathTunnelsTunnelP2pTunnelAttributesP2pPrimaryPathP2pPrimaryPath(ctx context.Context, name string, tunnelName string, p2pPrimaryPathName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:lsps/frinx-openconfig-network-instance:constrained-path/frinx-openconfig-network-instance:tunnels/frinx-openconfig-network-instance:tunnel/{tunnel-name}/frinx-openconfig-network-instance:p2p-tunnel-attributes/frinx-openconfig-network-instance:p2p-primary-path/frinx-openconfig-network-instance:p2p-primary-path/{p2p-primary-path-name}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tunnel-name\"+\"}\", fmt.Sprintf(\"%v\", tunnelName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"p2p-primary-path-name\"+\"}\", fmt.Sprintf(\"%v\", p2pPrimaryPathName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupEbgpMultihop(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:ebgp-multihop/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (p *ProxMox) DeleteInstance(ctx *lepton.Context, instanceID string) error {\n\n\treq, err := http.NewRequest(\"DELETE\", p.apiURL+\"/api2/json/nodes/\"+p.nodeNAME+\"/qemu/\"+instanceID, nil)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\treq.Header.Add(\"Authorization\", \"PVEAPIToken=\"+p.tokenID+\"=\"+p.secret)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\t_, err = io.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\n\t}\n\n\treturn err\n\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceFdb(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:fdb/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocols(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsLdp(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:ldp/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisLevelsLevelAuthenticationKey(ctx context.Context, name string, identifier string, protocolName string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:authentication/frinx-openconfig-network-instance:key/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolStaticRoutesStaticNextHopsNextHop(ctx context.Context, name string, identifier string, protocolName string, prefix string, index string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:static-routes/frinx-openconfig-network-instance:static/{prefix}/frinx-openconfig-network-instance:next-hops/frinx-openconfig-network-instance:next-hop/{index}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"prefix\"+\"}\", fmt.Sprintf(\"%v\", prefix), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"index\"+\"}\", fmt.Sprintf(\"%v\", index), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceTables(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:tables/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeGlobalGracefulRestartConfig(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:graceful-restart/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceVlansVlanConfig(ctx context.Context, name string, vlanId int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:vlans/frinx-openconfig-network-instance:vlan/{vlan-id}/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"vlan-id\"+\"}\", fmt.Sprintf(\"%v\", vlanId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisLevelsLevel(ctx context.Context, name string, identifier string, protocolName string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeInterfaceAttributesInterface(ctx context.Context, name string, interfaceId string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:interface-attributes/frinx-openconfig-network-instance:interface/{interface-id}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *Client) DeleteNetworkSwitchStack(params *DeleteNetworkSwitchStackParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteNetworkSwitchStackNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDeleteNetworkSwitchStackParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"deleteNetworkSwitchStack\",\n\t\tMethod: \"DELETE\",\n\t\tPathPattern: \"/networks/{networkId}/switchStacks/{switchStackId}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &DeleteNetworkSwitchStackReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*DeleteNetworkSwitchStackNoContent)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for deleteNetworkSwitchStack: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsLspsConstrainedPathTunnelsTunnelP2pTunnelAttributesP2pSecondaryPathsP2pSecondaryPathAdminGroups(ctx context.Context, name string, tunnelName string, p2pSecondaryPathName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:lsps/frinx-openconfig-network-instance:constrained-path/frinx-openconfig-network-instance:tunnels/frinx-openconfig-network-instance:tunnel/{tunnel-name}/frinx-openconfig-network-instance:p2p-tunnel-attributes/frinx-openconfig-network-instance:p2p-secondary-paths/frinx-openconfig-network-instance:p2p-secondary-path/{p2p-secondary-path-name}/frinx-openconfig-network-instance:admin-groups/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tunnel-name\"+\"}\", fmt.Sprintf(\"%v\", tunnelName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"p2p-secondary-path-name\"+\"}\", fmt.Sprintf(\"%v\", p2pSecondaryPathName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelHelloAuthenticationKeyConfig(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:hello-authentication/frinx-openconfig-network-instance:key/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) deleteByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, locationName string, managedInstanceName string, databaseName string, backupName string, options *LongTermRetentionManagedInstanceBackupsBeginDeleteByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/locations/{locationName}/longTermRetentionManagedInstances/{managedInstanceName}/longTermRetentionDatabases/{databaseName}/longTermRetentionManagedInstanceBackups/{backupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif backupName == \"\" {\n\t\treturn nil, errors.New(\"parameter backupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{backupName}\", url.PathEscape(backupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (m *GroupPolicyDefinitionsItemNextVersionDefinitionPreviousVersionDefinitionRequestBuilder) Delete(ctx context.Context, requestConfiguration *GroupPolicyDefinitionsItemNextVersionDefinitionPreviousVersionDefinitionRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsLdpGlobalAuthentication(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:ldp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:authentication/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocol(ctx context.Context, name string, identifier string, protocolName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiL3vpnIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceLevelsLevelPacketCountersPsnp(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, levelNumber int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:levels/frinx-openconfig-network-instance:level/{level-number}/frinx-openconfig-network-instance:packet-counters/frinx-openconfig-network-instance:psnp/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"level-number\"+\"}\", fmt.Sprintf(\"%v\", levelNumber), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupGracefulRestart(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:graceful-restart/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (client *Client) DeleteInstanceWithOptions(request *DeleteInstanceRequest, runtime *util.RuntimeOptions) (_result *DeleteInstanceResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.GlobalInstanceId)) {\n\t\tquery[\"GlobalInstanceId\"] = request.GlobalInstanceId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.InstanceId)) {\n\t\tquery[\"InstanceId\"] = request.InstanceId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.OwnerAccount)) {\n\t\tquery[\"OwnerAccount\"] = request.OwnerAccount\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.OwnerId)) {\n\t\tquery[\"OwnerId\"] = request.OwnerId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ResourceOwnerAccount)) {\n\t\tquery[\"ResourceOwnerAccount\"] = request.ResourceOwnerAccount\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ResourceOwnerId)) {\n\t\tquery[\"ResourceOwnerId\"] = request.ResourceOwnerId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.SecurityToken)) {\n\t\tquery[\"SecurityToken\"] = request.SecurityToken\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"DeleteInstance\"),\n\t\tVersion: tea.String(\"2015-01-01\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &DeleteInstanceResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolOspfv2AreasAreaInterfacesInterfaceMplsIgpLdpSync(ctx context.Context, name string, identifier string, protocolName string, areaIdentifier string, id string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:ospfv2/frinx-openconfig-network-instance:areas/frinx-openconfig-network-instance:area/{area-identifier}/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{id}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:igp-ldp-sync/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"area-identifier\"+\"}\", fmt.Sprintf(\"%v\", areaIdentifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", fmt.Sprintf(\"%v\", id), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceFdbMacTable(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:fdb/frinx-openconfig-network-instance:mac-table/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsLspsConstrainedPathTunnelsTunnelP2pTunnelAttributesP2pSecondaryPathsP2pSecondaryPathAdminGroupsConfig(ctx context.Context, name string, tunnelName string, p2pSecondaryPathName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:lsps/frinx-openconfig-network-instance:constrained-path/frinx-openconfig-network-instance:tunnels/frinx-openconfig-network-instance:tunnel/{tunnel-name}/frinx-openconfig-network-instance:p2p-tunnel-attributes/frinx-openconfig-network-instance:p2p-secondary-paths/frinx-openconfig-network-instance:p2p-secondary-path/{p2p-secondary-path-name}/frinx-openconfig-network-instance:admin-groups/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tunnel-name\"+\"}\", fmt.Sprintf(\"%v\", tunnelName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"p2p-secondary-path-name\"+\"}\", fmt.Sprintf(\"%v\", p2pSecondaryPathName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsRsvpTeSessions(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:rsvp-te/frinx-openconfig-network-instance:sessions/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsLdpNeighborsNeighborAuthenticationConfig(ctx context.Context, name string, lsrId string, labelSpaceId int32, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:ldp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{lsr-id}/{label-space-id}/frinx-openconfig-network-instance:authentication/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"lsr-id\"+\"}\", fmt.Sprintf(\"%v\", lsrId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"label-space-id\"+\"}\", fmt.Sprintf(\"%v\", labelSpaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpGlobalAfiSafisAfiSafiIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, afiSafiName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:global/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (r DeleteStackRequest) Send(ctx context.Context) (*DeleteStackResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &DeleteStackResponse{\n\t\tDeleteStackOutput: r.Request.Data.(*DeleteStackOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupConfig(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolIsisInterfacesInterfaceAuthenticationKeychain(ctx context.Context, name string, identifier string, protocolName string, interfaceId string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:isis/frinx-openconfig-network-instance:interfaces/frinx-openconfig-network-instance:interface/{interface-id}/frinx-openconfig-network-instance:authentication/frinx-openconfig-network-instance:keychain/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"interface-id\"+\"}\", fmt.Sprintf(\"%v\", interfaceId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiIpv4Unicast(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:ipv4-unicast/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborLoggingOptions(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:logging-options/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (m *MockProviderClient) DeleteCloudformationStack(arg0 context.Context, arg1 map[string]string, arg2 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteCloudformationStack\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func DeleteStack(stackName, profile, region string) {\n\tcf := GetCloudformationClient(profile, region)\n\tprinter.Step(fmt.Sprintf(\"Delete Stack %s:\", stackName))\n\n\t//See if the stack exists to begin with\n\t_, err := cf.DescribeStacks(&cloudformation.DescribeStacksInput{StackName: aws.String(stackName)})\n\tcheckError(err)\n\n\t_, err = cf.DeleteStack(&cloudformation.DeleteStackInput{StackName: aws.String(stackName)})\n\tcheckError(err)\n\n\t// status polling\n\tPrintStackEventHeader()\n\n\tfor {\n\t\tprinter.Progress(\"Deleting\")\n\t\ttime.Sleep(2 * time.Second)\n\t\tstatus, err := cf.DescribeStacks(&cloudformation.DescribeStacksInput{StackName: aws.String(stackName)})\n\t\tcheckErrorDeletePoll(err)\n\n\t\tevents, _ := cf.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{StackName: aws.String(stackName)})\n\n\t\tif len(status.Stacks) > 0 {\n\t\t\tstackStatus := *status.Stacks[0].StackStatus\n\n\t\t\tif len(events.StackEvents) > 0 {\n\t\t\t\tPrintStackEvent(events.StackEvents[0], false)\n\t\t\t}\n\t\t\tif stackStatus == cloudformation.StackStatusDeleteInProgress {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\n\t// Make sure delete worked\n\t_, err = cf.DescribeStacks(&cloudformation.DescribeStacksInput{StackName: aws.String(stackName)})\n\tif err != nil {\n\t\tcheckErrorDeletePoll(err)\n\t} else {\n\t\tprinter.SubStep(\n\t\t\tfmt.Sprintf(\"Success Delete Stack %s\", stackName),\n\t\t\t1,\n\t\t\ttrue,\n\t\t\ttrue,\n\t\t)\n\t\tos.Exit(0)\n\t}\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsSignalingProtocolsLdpInterfaceAttributes(ctx context.Context, name string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:signaling-protocols/frinx-openconfig-network-instance:ldp/frinx-openconfig-network-instance:interface-attributes/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiL3vpnIpv4UnicastPrefixLimit(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:l3vpn-ipv4-unicast/frinx-openconfig-network-instance:prefix-limit/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) DeleteFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceMplsTeGlobalAttributesSrlgsSrlgStaticSrlgMembers(ctx context.Context, name string, srlgName string, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:mpls/frinx-openconfig-network-instance:te-global-attributes/frinx-openconfig-network-instance:srlgs/frinx-openconfig-network-instance:srlg/{srlg-name}/frinx-openconfig-network-instance:static-srlg-members/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"srlg-name\"+\"}\", fmt.Sprintf(\"%v\", srlgName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}" ]
[ "0.5638459", "0.55183303", "0.5496509", "0.5320517", "0.53041613", "0.5235433", "0.5193159", "0.5165025", "0.5164133", "0.5135289", "0.5097817", "0.50801057", "0.5079635", "0.5071968", "0.5042701", "0.50424296", "0.5040854", "0.5025364", "0.5012202", "0.50082207", "0.50059986", "0.49933973", "0.49797082", "0.49614823", "0.49614462", "0.49550322", "0.49409842", "0.49402016", "0.49391818", "0.4937171", "0.49301085", "0.49280554", "0.49268523", "0.4920675", "0.49155188", "0.4897962", "0.4896611", "0.48951778", "0.48926488", "0.48831064", "0.48822668", "0.48754045", "0.4874147", "0.48737526", "0.48730317", "0.48718864", "0.48681268", "0.48636702", "0.4861766", "0.486028", "0.4856871", "0.48529083", "0.4843173", "0.4842311", "0.48379606", "0.48350608", "0.48325273", "0.48305562", "0.48270312", "0.48248377", "0.48237264", "0.48220572", "0.4821764", "0.48210272", "0.48207614", "0.4819633", "0.4818536", "0.48125795", "0.48062903", "0.48055404", "0.48031953", "0.48019755", "0.47991544", "0.47986233", "0.47955844", "0.4791266", "0.47890863", "0.4788448", "0.47875667", "0.4778318", "0.47756308", "0.47754106", "0.47748402", "0.47700524", "0.47696748", "0.47691172", "0.47685412", "0.47663432", "0.4761592", "0.47608933", "0.47585705", "0.47583857", "0.4756274", "0.4752871", "0.4750988", "0.47508168", "0.47504503", "0.47485286", "0.47460878", "0.47413653" ]
0.63991356
0
Capabilities provides a mock function with given fields:
func (_m *Plugin) Capabilities() *events.Capabilities { ret := _m.Called() var r0 *events.Capabilities if rf, ok := ret.Get(0).(func() *events.Capabilities); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*events.Capabilities) } } return r0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (n *mockAgent) capabilities() types.Capabilities {\n\treturn types.Capabilities{}\n}", "func (m *MockPublicKeyFinderClient) Supports() map[string]bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Supports\")\n\tret0, _ := ret[0].(map[string]bool)\n\treturn ret0\n}", "func NewCapabilities(features ...string) Capabilities {\n\tc := Capabilities{}\n\tfor _, feature := range features {\n\t\tc.With(feature)\n\t}\n\treturn c\n}", "func (m *MockReceiverClient) Supports() map[string]bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Supports\")\n\tret0, _ := ret[0].(map[string]bool)\n\treturn ret0\n}", "func (srv *Server) Capabilities(context.Context, *gnmipb.CapabilityRequest) (*gnmipb.CapabilityResponse, error) {\n\treturn nil, grpc.Errorf(codes.Unimplemented, \"Capabilities() is not implemented\")\n}", "func (m *MockSupporter) Supports() map[string]bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Supports\")\n\tret0, _ := ret[0].(map[string]bool)\n\treturn ret0\n}", "func NewCapabilities() *Capabilities {\n\treturn &Capabilities{\n\t\tm: make(map[string]*Capability, 0),\n\t}\n}", "func (m *MockSenderClient) Supports() map[string]bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Supports\")\n\tret0, _ := ret[0].(map[string]bool)\n\treturn ret0\n}", "func mapCapabilities(config configuration) cap.Capabilities {\n\tcaps := cap.Capabilities{\n\t\t\"features\": cap.Cap{\n\t\t\t\"vault\": cap.Cap{\n\t\t\t\t\"enabled\": config.Cluster.Vault.Enabled,\n\t\t\t\t\"managed\": config.Cluster.Vault.Managed.Enabled,\n\t\t\t},\n\t\t\t\"monitoring\": cap.Cap{\n\t\t\t\t\"enabled\": config.Cluster.Monitoring.Enabled,\n\t\t\t},\n\t\t\t\"logging\": cap.Cap{\n\t\t\t\t\"enabled\": config.Cluster.Logging.Enabled,\n\t\t\t},\n\t\t\t\"dns\": cap.Cap{\n\t\t\t\t\"enabled\": config.Cluster.DNS.Enabled,\n\t\t\t\t\"baseDomain\": config.Cluster.DNS.BaseDomain,\n\t\t\t},\n\t\t\t\"securityScan\": cap.Cap{\n\t\t\t\t\"enabled\": config.Cluster.SecurityScan.Enabled,\n\t\t\t\t\"managed\": config.Cluster.SecurityScan.Anchore.Enabled,\n\t\t\t},\n\t\t\t\"expiry\": cap.Cap{\n\t\t\t\t\"enabled\": config.Cluster.Expiry.Enabled,\n\t\t\t},\n\t\t\t\"ingress\": cap.Cap{\n\t\t\t\t\"enabled\": config.Cluster.Ingress.Enabled,\n\t\t\t\t\"controllers\": config.Cluster.Ingress.Controllers,\n\t\t\t},\n\t\t},\n\t\t\"helm\": cap.Cap{\n\t\t\t\"version\": helmVersion,\n\t\t},\n\t}\n\n\treturn caps\n}", "func (d *MinioDriver) Capabilities(r volume.Request) volume.Response {\n\tlocalCapability := volume.Capability{\n\t\tScope: \"local\",\n\t}\n\tglog.V(1).Infof(\"Capabilities request: %#v\", r)\n\treturn volumeResp(\"\", \"\", nil, localCapability, \"\")\n}", "func (s *Server) Capabilities(ctx context.Context, req *gnmipb.CapabilityRequest) (*gnmipb.CapabilityResponse, error) {\n\tseq := s.rpcSequence()\n\tif glog.V(11) {\n\t\tglog.Infof(\"capabilities.request[%d]=%s\", seq, req)\n\t}\n\tresp, err := s.capabilities(ctx, req)\n\tif err != nil {\n\t\tif glog.V(11) {\n\t\t\tglog.Errorf(\"capabilities.response[%d]=%v\", seq, status.FromError(err))\n\t\t}\n\t} else {\n\t\tif glog.V(11) {\n\t\t\tglog.Infof(\"capabilities.response[%d]=%s\", seq, resp)\n\t\t}\n\t}\n\treturn resp, err\n}", "func (m *MockNuvoVM) GetCapabilities() (*nuvoapi.Capabilities, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetCapabilities\")\n\tret0, _ := ret[0].(*nuvoapi.Capabilities)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockManager) GetCapabilitiesForAppnetInterfaceVersion(arg0 string) ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetCapabilitiesForAppnetInterfaceVersion\", arg0)\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (bq *InMemoryBuildQueue) GetCapabilities(ctx context.Context, in *remoteexecution.GetCapabilitiesRequest) (*remoteexecution.ServerCapabilities, error) {\n\tinstanceName, err := digest.NewInstanceName(in.InstanceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texecEnabled := true\n\tauthErr := auth.AuthorizeSingleInstanceName(ctx, bq.executeAuthorizer, instanceName)\n\tswitch status.Code(authErr) {\n\tcase codes.OK:\n\t\t// Nothing to do.\n\tcase codes.PermissionDenied:\n\t\texecEnabled = false\n\tdefault:\n\t\treturn nil, util.StatusWrap(authErr, \"Authorization\")\n\t}\n\n\treturn &remoteexecution.ServerCapabilities{\n\t\tCacheCapabilities: &remoteexecution.CacheCapabilities{\n\t\t\tDigestFunctions: digest.SupportedDigestFunctions,\n\t\t\tActionCacheUpdateCapabilities: &remoteexecution.ActionCacheUpdateCapabilities{\n\t\t\t\tUpdateEnabled: false,\n\t\t\t},\n\t\t\t// CachePriorityCapabilities: Priorities not supported.\n\t\t\t// MaxBatchTotalSize: Not used by Bazel yet.\n\t\t\tSymlinkAbsolutePathStrategy: remoteexecution.SymlinkAbsolutePathStrategy_ALLOWED,\n\t\t},\n\t\tExecutionCapabilities: &remoteexecution.ExecutionCapabilities{\n\t\t\tDigestFunction: remoteexecution.DigestFunction_SHA256,\n\t\t\tExecEnabled: execEnabled,\n\t\t\tExecutionPriorityCapabilities: &remoteexecution.PriorityCapabilities{\n\t\t\t\tPriorities: []*remoteexecution.PriorityCapabilities_PriorityRange{\n\t\t\t\t\t{MinPriority: math.MinInt32, MaxPriority: math.MaxInt32},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// TODO: DeprecatedApiVersion.\n\t\tLowApiVersion: &semver.SemVer{Major: 2},\n\t\tHighApiVersion: &semver.SemVer{Major: 2},\n\t}, nil\n}", "func (s *Server) capabilities(ctx context.Context, req *gnmipb.CapabilityRequest) (*gnmipb.CapabilityResponse, error) {\n\tver, err := GetGNMIVersion()\n\tif err != nil {\n\t\treturn nil, status.TaggedErrorf(codes.Internal, status.TagOperationFail,\n\t\t\t\"gnmi service version error: %v\", err)\n\t}\n\treturn &gnmipb.CapabilityResponse{\n\t\tSupportedModels: s.Modeldata,\n\t\tSupportedEncodings: supportedEncodings,\n\t\tGNMIVersion: *ver,\n\t}, nil\n}", "func (d ImagefsDriver) Capabilities() *volume.CapabilitiesResponse {\n\tfmt.Printf(\"-> Capabilities\\n\")\n\tresponse := volume.CapabilitiesResponse{Capabilities: volume.Capability{Scope: \"local\"}}\n\tfmt.Printf(\"<- %+v\\n\", response)\n\treturn &response\n}", "func getCapabilities(client kubernetes.Interface) (Capabilities, error) {\n\n\tgroupList, err := client.Discovery().ServerGroups()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcaps := Capabilities{}\n\tfor _, g := range groupList.Groups {\n\t\tfor _, gv := range g.Versions {\n\t\t\tcaps[gv.GroupVersion] = true\n\t\t}\n\t}\n\n\treturn caps, nil\n}", "func (proxy *remoteDriverProxy) Capabilities() (*remoteVolumeCapability, error) {\n\tvar req remoteVolumeCapabilitiesReq\n\tvar resp remoteVolumeCapabilitiesResp\n\n\tif err := proxy.client.CallService(remoteVolumeCapabilitiesService, &req, &resp, true); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn nil, errors.New(resp.Err)\n\t}\n\n\treturn resp.Capabilities, nil\n}", "func GetCapabilities() Capabilities {\n\treturn Client().GetCapabilities()\n}", "func GetCapabilities(\n\treq *http.Request, rsAPI roomserverAPI.ClientRoomserverAPI,\n) util.JSONResponse {\n\troomVersionsQueryReq := roomserverAPI.QueryRoomVersionCapabilitiesRequest{}\n\troomVersionsQueryRes := roomserverAPI.QueryRoomVersionCapabilitiesResponse{}\n\tif err := rsAPI.QueryRoomVersionCapabilities(\n\t\treq.Context(),\n\t\t&roomVersionsQueryReq,\n\t\t&roomVersionsQueryRes,\n\t); err != nil {\n\t\tutil.GetLogger(req.Context()).WithError(err).Error(\"queryAPI.QueryRoomVersionCapabilities failed\")\n\t\treturn jsonerror.InternalServerError()\n\t}\n\n\tresponse := map[string]interface{}{\n\t\t\"capabilities\": map[string]interface{}{\n\t\t\t\"m.change_password\": map[string]bool{\n\t\t\t\t\"enabled\": true,\n\t\t\t},\n\t\t\t\"m.room_versions\": roomVersionsQueryRes,\n\t\t},\n\t}\n\n\treturn util.JSONResponse{\n\t\tCode: http.StatusOK,\n\t\tJSON: response,\n\t}\n}", "func detectCapabilities(d diag.Sink, client *client.Client) func(ctx context.Context) capabilities {\n\tvar once sync.Once\n\tvar caps capabilities\n\tdone := make(chan struct{})\n\tget := func(ctx context.Context) capabilities {\n\t\tonce.Do(func() {\n\t\t\tcaps = doDetectCapabilities(ctx, d, client)\n\t\t\tclose(done)\n\t\t})\n\t\t<-done\n\t\treturn caps\n\t}\n\treturn get\n}", "func (d *EmulatedBTPeerDevice) evaluateCapabilities(ctx context.Context) error {\n\t// Refresh stored capabilities.\n\tcapabilities, err := d.rpc.GetCapabilities(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get device capabilities\")\n\t}\n\td.cache.capabilities = capabilities\n\n\t// Evaluate hasPinCode.\n\thasPinCode, ok := d.cache.capabilities[cbt.DeviceCapabilityHasPin.String()]\n\tif !ok {\n\t\treturn errors.Errorf(\"device capabilities missing %q\", cbt.DeviceCapabilityHasPin)\n\t}\n\thasPinCodeBool, ok := hasPinCode.(bool)\n\tif !ok {\n\t\treturn errors.Errorf(\"expected device capability %q to be a bool, got %v\", cbt.DeviceCapabilityHasPin, hasPinCode)\n\t}\n\td.cache.hasPinCode = hasPinCodeBool\n\n\t// Evaluate supportsInitConnect.\n\tsupportsInitConnect, ok := d.cache.capabilities[cbt.DeviceCapabilityInitConnect.String()]\n\tif !ok {\n\t\treturn errors.Errorf(\"device capabilities missing %q\", cbt.DeviceCapabilityInitConnect)\n\t}\n\tsupportsInitConnectBool, ok := supportsInitConnect.(bool)\n\tif !ok {\n\t\treturn errors.Errorf(\"expected device capability %q to be a bool, got %v\", cbt.DeviceCapabilityInitConnect, supportsInitConnect)\n\t}\n\td.cache.supportsInitConnect = supportsInitConnectBool\n\n\t// Evaluate supportedTransportMethods.\n\tsupportedTransportMethods, ok := d.cache.capabilities[cbt.DeviceCapabilityTransports.String()]\n\tif !ok {\n\t\treturn errors.Errorf(\"device capabilities missing %q\", cbt.DeviceCapabilityTransports)\n\t}\n\tsupportedTransportMethodsSlice, ok := supportedTransportMethods.([]interface{})\n\tif !ok {\n\t\treturn errors.Errorf(\"expected device capability %q to be an []interface{}, got %v\", cbt.DeviceCapabilityTransports, supportedTransportMethods)\n\t}\n\tfor _, method := range supportedTransportMethodsSlice {\n\t\tmethodStr, ok := method.(string)\n\t\tif !ok {\n\t\t\treturn errors.Errorf(\"expected device capability %q to be an []interface{} of strings, got %v\", cbt.DeviceCapabilityTransports, supportedTransportMethods)\n\t\t}\n\t\td.cache.supportedTransportMethods = append(d.cache.supportedTransportMethods, cbt.TransportMethod(methodStr))\n\t}\n\n\treturn nil\n}", "func (l *RemoteProvider) loadCapabilities(token string) {\n\tvar resp *http.Response\n\tvar err error\n\n\tversion := viper.GetString(\"BUILD\")\n\tos := viper.GetString(\"OS\")\n\tfinalURL := fmt.Sprintf(\"%s/%s/capabilities?os=%s\", l.RemoteProviderURL, version, os)\n\tfinalURL = strings.TrimSuffix(finalURL, \"\\n\")\n\tremoteProviderURL, err := url.Parse(finalURL)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Error while constructing url: %s\", err)\n\t\treturn\n\t}\n\n\treq, _ := http.NewRequest(http.MethodGet, remoteProviderURL.String(), nil)\n\n\t// If not token is provided then make a simple GET request\n\tif token == \"\" {\n\t\tc := &http.Client{}\n\t\tresp, err = c.Do(req)\n\t} else {\n\t\t// Proceed to make a request with the token\n\t\tresp, err = l.DoRequest(req, token)\n\t}\n\n\tif err != nil || resp.StatusCode != http.StatusOK {\n\t\tlogrus.Errorf(\"[Initialize Provider]: Failed to get capabilities %s\", err)\n\t\treturn\n\t}\n\tdefer func() {\n\t\terr := resp.Body.Close()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"[Initialize]: Failed to close response body %s\", err)\n\t\t}\n\t}()\n\n\t// Clear the previous capabilities before writing new one\n\tl.ProviderProperties = ProviderProperties{\n\t\tProviderURL: l.RemoteProviderURL,\n\t}\n\tdecoder := json.NewDecoder(resp.Body)\n\tif err := decoder.Decode(&l.ProviderProperties); err != nil {\n\t\tlogrus.Errorf(\"[Initialize]: Failed to decode provider properties %s\", err)\n\t}\n}", "func (_class PIFClass) GetCapabilities(sessionID SessionRef, self PIFRef) (_retval []string, _err error) {\n\t_method := \"PIF.get_capabilities\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_selfArg, _err := convertPIFRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"self\"), self)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_result, _err := _class.client.APICall(_method, _sessionIDArg, _selfArg)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_retval, _err = convertStringSetToGo(_method + \" -> \", _result.Value)\n\treturn\n}", "func (m *MockNotary) Notarize(arg0 string) (map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Notarize\", arg0)\n\tret0, _ := ret[0].(map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func MockPrepareRelease(release *Release) {\n\trelease.SetDefaultRegionAccount(to.Strp(\"region\"), to.Strp(\"account\"))\n\trelease.SetDefaults()\n\trelease.SetUUID()\n}", "func (o *ARVRInterface) GetCapabilities() gdnative.Int {\n\t//log.Println(\"Calling ARVRInterface.GetCapabilities()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"ARVRInterface\", \"get_capabilities\")\n\n\t// Call the parent method.\n\t// int\n\tretPtr := gdnative.NewEmptyInt()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewIntFromPointer(retPtr)\n\treturn ret\n}", "func (self *MyDockerNetworkPlugin) GetCapabilities() (*n.CapabilitiesResponse,\n\terror) {\n\tlog.Printf(\"Received GetCapabilities req\")\n\tcapabilities := &n.CapabilitiesResponse{\n\t\tScope: self.scope,\n\t}\n\treturn capabilities, nil\n}", "func Capabilities() *volume.CapabilitiesResponse {\n\tlog.Debugf(\"Entering Capabilities\")\n\treturn &volume.CapabilitiesResponse{\n\t\tCapabilities: volume.Capability{\n\t\t\tScope: \"global\",\n\t\t},\n\t}\n}", "func New(capabilities ...string) *Capability {\n\treturn &Capability{\n\t\tCapabilities: capabilities,\n\t}\n}", "func SetCapabilities(config *configv1.ClusterVersion,\n\texistingEnabled map[configv1.ClusterVersionCapability]struct{}) ClusterCapabilities {\n\n\tvar capabilities ClusterCapabilities\n\tcapabilities.KnownCapabilities = setKnownCapabilities()\n\n\tcapabilities.EnabledCapabilities, capabilities.ImplicitlyEnabledCapabilities = setEnabledCapabilities(config.Spec.Capabilities,\n\t\texistingEnabled)\n\n\treturn capabilities\n}", "func mockOlt() *fields {\n\tdh := newMockDeviceHandler()\n\tnewOlt := &fields{}\n\tnewOlt.deviceHandlers = map[string]*DeviceHandler{}\n\tnewOlt.deviceHandlers[dh.device.Id] = dh\n\treturn newOlt\n}", "func (d *Driver) Capabilities() (*drivers.Capabilities, error) {\n\treturn capabilities, nil\n}", "func (m *MockisKey_KeyInfo) isKey_KeyInfo() {\n\tm.ctrl.Call(m, \"isKey_KeyInfo\")\n}", "func (d *VolumeDriver) Capabilities(r volume.Request) volume.Response {\n\treturn volume.Response{Capabilities: volume.Capability{Scope: \"global\"}}\n}", "func (mt *Mytoken) VerifyCapabilities(required ...api.Capability) bool {\n\tif mt.Capabilities == nil || len(mt.Capabilities) == 0 {\n\t\treturn false\n\t}\n\tfor _, c := range required {\n\t\tif !mt.Capabilities.Has(c) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (mt *Mytoken) VerifyCapabilities(required ...api.Capability) bool {\n\tif mt.Capabilities == nil || len(mt.Capabilities) == 0 {\n\t\treturn false\n\t}\n\tfor _, c := range required {\n\t\tif !mt.Capabilities.Has(c) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (m *MockenvDescriber) Params() (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Params\")\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (d *DirDriver) Capabilities() *volume.CapabilitiesResponse {\n\tlogrus.Infof(\"Hit Capabilities() endpoint\")\n\n\treturn &volume.CapabilitiesResponse{\n\t\tCapabilities: volume.Capability{\n\t\t\tScope: \"local\",\n\t\t},\n\t}\n}", "func (d *EmulatedBTPeerDevice) Capabilities() map[string]interface{} {\n\treturn d.cache.capabilities\n}", "func getCapabilities(attributes bascule.Attributes) ([]string, string, error) {\n\tif attributes == nil {\n\t\treturn []string{}, UndeterminedCapabilities, ErrNilAttributes\n\t}\n\n\tval, ok := attributes.Get(CapabilityKey)\n\tif !ok {\n\t\treturn []string{}, UndeterminedCapabilities, fmt.Errorf(\"couldn't get capabilities using key %v\", CapabilityKey)\n\t}\n\n\tvals, err := cast.ToStringSliceE(val)\n\tif err != nil {\n\t\treturn []string{}, UndeterminedCapabilities, fmt.Errorf(\"capabilities \\\"%v\\\" not the expected string slice: %v\", val, err)\n\t}\n\n\tif len(vals) == 0 {\n\t\treturn []string{}, EmptyCapabilitiesList, ErrNoVals\n\t}\n\n\treturn vals, \"\", nil\n\n}", "func (o *PluginConfigLinux) SetCapabilities(v []string) {\n\to.Capabilities = v\n}", "func (c CapabilitiesValidator) checkCapabilities(capabilities []string, reqURL string, method string) error {\n\tfor _, val := range capabilities {\n\t\tif c.Checker.Authorized(val, reqURL, method) {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn emperror.With(ErrNoValidCapabilityFound, \"capabilitiesFound\", capabilities, \"urlToMatch\", reqURL, \"methodToMatch\", method)\n\n}", "func (fs *Mysqlfs) Capabilities() billy.Capability {\n\treturn billy.WriteCapability |\n\t\tbilly.ReadCapability |\n\t\tbilly.ReadAndWriteCapability |\n\t\tbilly.SeekCapability |\n\t\tbilly.TruncateCapability\n}", "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func NewCapabilitiesCaveat(serviceName string, capabilities string) Caveat {\n\treturn Caveat{\n\t\tCondition: serviceName + CondCapabilitiesSuffix,\n\t\tValue: capabilities,\n\t}\n}", "func GetCapabilities(dc discovery.DiscoveryInterface) (*Capabilities, error) {\n\tkubeVersion, err := dc.ServerVersion()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not get server version from Kubernetes\")\n\t}\n\t// Issue #6361:\n\t// Client-Go emits an error when an API service is registered but unimplemented.\n\t// We trap that error here and print a warning. But since the discovery client continues\n\t// building the API object, it is correctly populated with all valid APIs.\n\t// See https://github.com/kubernetes/kubernetes/issues/72051#issuecomment-521157642\n\tapiVersions, err := GetVersionSet(dc)\n\tif err != nil {\n\t\tif discovery.IsGroupDiscoveryFailedError(err) {\n\t\t\tblog.Infof(\"WARNING: The Kubernetes server has an orphaned API service. Server reports: %s\", err)\n\t\t\tblog.Infof(\"WARNING: To fix this, kubectl delete apiservice <service-name>\")\n\t\t} else {\n\t\t\treturn nil, errors.Wrap(err, \"could not get apiVersions from Kubernetes\")\n\t\t}\n\t}\n\treturn &Capabilities{\n\t\tKubeVersion: KubeVersion{\n\t\t\tMajor: kubeVersion.Major,\n\t\t\tMinor: kubeVersion.Minor,\n\t\t\tVersion: kubeVersion.GitVersion,\n\t\t},\n\t\tAPIVersions: apiVersions,\n\t}, nil\n}", "func MockExecutor(height uint64) *PermissiveExecutor {\n\tp := user.NewProvisioners()\n\t_ = p.Add(key.NewRandKeys().BLSPubKey, 1, 1, 1, 0)\n\treturn &PermissiveExecutor{\n\t\theight: height,\n\t\tP: p,\n\t}\n}", "func allCapabilities() []string {\n\tvar capabilities []string\n\tfor _, cap := range capabilityList {\n\t\tcapabilities = append(capabilities, cap)\n\t}\n\treturn capabilities\n}", "func (m *MockResourceSpecGetterWithHeaders) CustomHeaders() map[string]string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CustomHeaders\")\n\tret0, _ := ret[0].(map[string]string)\n\treturn ret0\n}", "func (m *MockSession) ResponseMID(arg0 context.Context, arg1 uint, arg2 interface{}, arg3 ...bool) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1, arg2}\n\tfor _, a := range arg3 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"ResponseMID\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockService) Match(arg0 context.Context, arg1 url.Values) (*provider.MatchResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Match\", arg0, arg1)\n\tret0, _ := ret[0].(*provider.MatchResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *SlackRTMInterface) OpenIMChannel(user string) (bool, bool, string, error) {\n\tret := _m.Called(user)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(string) bool); ok {\n\t\tr0 = rf(user)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\tvar r1 bool\n\tif rf, ok := ret.Get(1).(func(string) bool); ok {\n\t\tr1 = rf(user)\n\t} else {\n\t\tr1 = ret.Get(1).(bool)\n\t}\n\n\tvar r2 string\n\tif rf, ok := ret.Get(2).(func(string) string); ok {\n\t\tr2 = rf(user)\n\t} else {\n\t\tr2 = ret.Get(2).(string)\n\t}\n\n\tvar r3 error\n\tif rf, ok := ret.Get(3).(func(string) error); ok {\n\t\tr3 = rf(user)\n\t} else {\n\t\tr3 = ret.Error(3)\n\t}\n\n\treturn r0, r1, r2, r3\n}", "func (mariadbFlavor) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) {\n\tswitch capability {\n\tdefault:\n\t\treturn false, nil\n\t}\n}", "func NewCapability() *CapabilityBuilder {\n\treturn &CapabilityBuilder{}\n}", "func (_m *mockKvCapabilityVerifier) BucketCapabilityStatus(cap gocbcore.BucketCapability) gocbcore.BucketCapabilityStatus {\n\tret := _m.Called(cap)\n\n\tvar r0 gocbcore.BucketCapabilityStatus\n\tif rf, ok := ret.Get(0).(func(gocbcore.BucketCapability) gocbcore.BucketCapabilityStatus); ok {\n\t\tr0 = rf(cap)\n\t} else {\n\t\tr0 = ret.Get(0).(gocbcore.BucketCapabilityStatus)\n\t}\n\n\treturn r0\n}", "func (m *MockAccessResponder) ToMap() map[string]interface{} {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ToMap\")\n\tret0, _ := ret[0].(map[string]interface{})\n\treturn ret0\n}", "func (cc *ChannelConfig) Capabilities() ChannelCapabilities {\n\t_ = cc.protos\n\t_ = cc.protos.Capabilities\n\t_ = cc.protos.Capabilities.Capabilities\n\treturn capabilities.NewChannelProvider(cc.protos.Capabilities.Capabilities)\n}", "func (m *MockValues) Map() map[string]interface{} {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Map\")\n\tret0, _ := ret[0].(map[string]interface{})\n\treturn ret0\n}", "func mockASRockBMC() *httptest.Server {\n\thandler := http.NewServeMux()\n\thandler.HandleFunc(\"/\", index)\n\thandler.HandleFunc(\"/api/session\", session)\n\thandler.HandleFunc(\"/api/asrr/fw-info\", fwinfo)\n\thandler.HandleFunc(\"/api/fru\", fruinfo)\n\thandler.HandleFunc(\"/api/asrr/inventory_info\", inventoryinfo)\n\thandler.HandleFunc(\"/api/sensors\", sensorsinfo)\n\thandler.HandleFunc(\"/api/asrr/getbioscode\", biosPOSTCodeinfo)\n\thandler.HandleFunc(\"/api/chassis-status\", chassisStatusInfo)\n\n\t// fw update endpoints - in order of invocation\n\thandler.HandleFunc(\"/api/maintenance/flash\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/firmware\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/firmware/verification\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/firmware/upgrade\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/firmware/flash-progress\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/reset\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/asrr/maintenance/BIOS/firmware\", biosFirmwareUpgrade)\n\n\t// user accounts endpoints\n\thandler.HandleFunc(\"/api/settings/users\", userAccountList)\n\thandler.HandleFunc(\"/api/settings/users/3\", userAccountList)\n\treturn httptest.NewTLSServer(handler)\n}", "func (m *MockProduct) GetProductInformationFromAcqright(arg0 context.Context, arg1 db.GetProductInformationFromAcqrightParams) (db.GetProductInformationFromAcqrightRow, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetProductInformationFromAcqright\", arg0, arg1)\n\tret0, _ := ret[0].(db.GetProductInformationFromAcqrightRow)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockManagedClusterScope) Info(msg string, keysAndValues ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{msg}\n\tfor _, a := range keysAndValues {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"Info\", varargs...)\n}", "func setEnabledCapabilities(capabilitiesSpec *configv1.ClusterVersionCapabilitiesSpec,\n\tpriorEnabled map[configv1.ClusterVersionCapability]struct{}) (map[configv1.ClusterVersionCapability]struct{},\n\t[]configv1.ClusterVersionCapability) {\n\n\tcapSet := DefaultCapabilitySet\n\n\tif capabilitiesSpec != nil && len(capabilitiesSpec.BaselineCapabilitySet) > 0 {\n\t\tcapSet = capabilitiesSpec.BaselineCapabilitySet\n\t}\n\tenabled := GetCapabilitiesAsMap(configv1.ClusterVersionCapabilitySets[capSet])\n\n\tif capabilitiesSpec != nil {\n\t\tfor _, v := range capabilitiesSpec.AdditionalEnabledCapabilities {\n\t\t\tif _, ok := enabled[v]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tenabled[v] = struct{}{}\n\t\t}\n\t}\n\tvar implicitlyEnabled []configv1.ClusterVersionCapability\n\tfor k := range priorEnabled {\n\t\tif _, ok := enabled[k]; !ok {\n\t\t\timplicitlyEnabled = append(implicitlyEnabled, k)\n\t\t\tenabled[k] = struct{}{}\n\t\t}\n\t}\n\tsort.Sort(capabilitiesSort(implicitlyEnabled))\n\treturn enabled, implicitlyEnabled\n}", "func (b *BaseConn) writeCapabilities() error {\n\n\tcapMsg := new(baseproto.Capabilities)\n\n\tmsg := new(baseproto.Message)\n\tmsg.MsgType = new(uint32)\n\t*msg.MsgType = 1\n\n\tvar err error\n\tif msg.Content, err = proto.Marshal(capMsg); err != nil {\n\t\treturn err\n\t}\n\n\treturn b.writeMsg(msg)\n}", "func (_m *ExecutionManager) Create(ctx context.Context, vendorType string, vendorID int64, trigger string, extraAttrs ...map[string]interface{}) (int64, error) {\n\t_va := make([]interface{}, len(extraAttrs))\n\tfor _i := range extraAttrs {\n\t\t_va[_i] = extraAttrs[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, vendorType, vendorID, trigger)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 int64\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, int64, string, ...map[string]interface{}) (int64, error)); ok {\n\t\treturn rf(ctx, vendorType, vendorID, trigger, extraAttrs...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, string, int64, string, ...map[string]interface{}) int64); ok {\n\t\tr0 = rf(ctx, vendorType, vendorID, trigger, extraAttrs...)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, string, int64, string, ...map[string]interface{}) error); ok {\n\t\tr1 = rf(ctx, vendorType, vendorID, trigger, extraAttrs...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func Get() Capabilities {\n\tcapInstance.lock.Lock()\n\tdefer capInstance.lock.Unlock()\n\t// This check prevents clobbering of capabilities that might've been set via SetForTests\n\tif capInstance.capabilities == nil {\n\t\tInitialize(Capabilities{\n\t\t\tAllowPrivileged: false,\n\t\t\tPrivilegedSources: PrivilegedSources{\n\t\t\t\tHostNetworkSources: []string{},\n\t\t\t\tHostPIDSources: []string{},\n\t\t\t\tHostIPCSources: []string{},\n\t\t\t},\n\t\t})\n\t}\n\treturn *capInstance.capabilities\n}", "func (mysqlFlavor80) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) {\n\tswitch capability {\n\tcase InstantDDLFlavorCapability,\n\t\tInstantExpandEnumCapability,\n\t\tInstantAddLastColumnFlavorCapability,\n\t\tInstantAddDropVirtualColumnFlavorCapability,\n\t\tInstantChangeColumnDefaultFlavorCapability:\n\t\treturn true, nil\n\tcase InstantAddDropColumnFlavorCapability:\n\t\treturn ServerVersionAtLeast(serverVersion, 8, 0, 29)\n\tcase TransactionalGtidExecutedFlavorCapability:\n\t\treturn ServerVersionAtLeast(serverVersion, 8, 0, 17)\n\tcase FastDropTableFlavorCapability:\n\t\treturn ServerVersionAtLeast(serverVersion, 8, 0, 23)\n\tcase MySQLJSONFlavorCapability:\n\t\treturn true, nil\n\tcase MySQLUpgradeInServerFlavorCapability:\n\t\treturn ServerVersionAtLeast(serverVersion, 8, 0, 16)\n\tcase DynamicRedoLogCapacityFlavorCapability:\n\t\treturn ServerVersionAtLeast(serverVersion, 8, 0, 30)\n\tcase DisableRedoLogFlavorCapability:\n\t\treturn ServerVersionAtLeast(serverVersion, 8, 0, 21)\n\tdefault:\n\t\treturn false, nil\n\t}\n}", "func (mysqlFlavor56) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) {\n\tswitch capability {\n\tdefault:\n\t\treturn false, nil\n\t}\n}", "func (mysqlFlavor57) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) {\n\tswitch capability {\n\tcase MySQLJSONFlavorCapability:\n\t\treturn true, nil\n\tdefault:\n\t\treturn false, nil\n\t}\n}", "func (d *GitDir) Capabilities() (*common.Capabilities, error) {\n\tc := common.NewCapabilities()\n\n\terr := d.addSymRefCapability(c)\n\n\treturn c, err\n}", "func (d *Driver) GetCapabilities() (*sdk.CapabilitiesResponse, error) {\n\tscope := &sdk.CapabilitiesResponse{Scope: sdk.LocalScope}\n\treturn scope, nil\n}", "func (m *FakeApiServer) List(arg0 schema.GroupVersionResource, arg1 string, arg2 schema.GroupVersionKind, arg3 labels.Selector) (runtime.Object, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"List\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(runtime.Object)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockGCS) ObjectAttrs(arg0 string) (*storage.ObjectAttrs, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ObjectAttrs\", arg0)\n\tret0, _ := ret[0].(*storage.ObjectAttrs)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func setKnownCapabilities() map[configv1.ClusterVersionCapability]struct{} {\n\tknown := make(map[configv1.ClusterVersionCapability]struct{})\n\n\tfor _, v := range configv1.ClusterVersionCapabilitySets {\n\t\tfor _, capability := range v {\n\t\t\tif _, ok := known[capability]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tknown[capability] = struct{}{}\n\t\t}\n\t}\n\treturn known\n}", "func verifyCapability(err error, capabilities []string) {\n\tif capabilities == nil {\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tfmt.Printf(\"problem with retrieving the capability flag with err: %s \\n\", err)\n\t}\n\n\tfor _, capability := range capabilities {\n\t\tif capability != cloudformation.CapabilityCapabilityIam &&\n\t\t\tcapability != cloudformation.CapabilityCapabilityNamedIam {\n\t\t\tfmt.Printf(\"capability provided with invalid flag, valid are \\\"CAPABILITY_IAM\\\" or \\\"CAPABILITY_NAMED_IAM\\\" \\n\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func (_m *MockPermissionRegistry) Register(permissions charon.Permissions) (int64, int64, int64, error) {\n\tret := _m.Called(permissions)\n\n\tvar r0 int64\n\tif rf, ok := ret.Get(0).(func(charon.Permissions) int64); ok {\n\t\tr0 = rf(permissions)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tvar r1 int64\n\tif rf, ok := ret.Get(1).(func(charon.Permissions) int64); ok {\n\t\tr1 = rf(permissions)\n\t} else {\n\t\tr1 = ret.Get(1).(int64)\n\t}\n\n\tvar r2 int64\n\tif rf, ok := ret.Get(2).(func(charon.Permissions) int64); ok {\n\t\tr2 = rf(permissions)\n\t} else {\n\t\tr2 = ret.Get(2).(int64)\n\t}\n\n\tvar r3 error\n\tif rf, ok := ret.Get(3).(func(charon.Permissions) error); ok {\n\t\tr3 = rf(permissions)\n\t} else {\n\t\tr3 = ret.Error(3)\n\t}\n\n\treturn r0, r1, r2, r3\n}", "func (_m *Asconn) RequestInfo(_a0 ...string) (map[string]string, aerospike.Error) {\n\t_va := make([]interface{}, len(_a0))\n\tfor _i := range _a0 {\n\t\t_va[_i] = _a0[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 map[string]string\n\tif rf, ok := ret.Get(0).(func(...string) map[string]string); ok {\n\t\tr0 = rf(_a0...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\n\tvar r1 aerospike.Error\n\tif rf, ok := ret.Get(1).(func(...string) aerospike.Error); ok {\n\t\tr1 = rf(_a0...)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(aerospike.Error)\n\t\t}\n\t}\n\n\treturn r0, r1\n}", "func (a *Client) ListCapabilities(params *ListCapabilitiesParams, authInfo runtime.ClientAuthInfoWriter) (*ListCapabilitiesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListCapabilitiesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listCapabilities\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/capabilities\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListCapabilitiesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ListCapabilitiesOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*ListCapabilitiesDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (_m *Manager) Get(ctx context.Context, projectID int64, meta ...string) (map[string]string, error) {\n\t_va := make([]interface{}, len(meta))\n\tfor _i := range meta {\n\t\t_va[_i] = meta[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, projectID)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 map[string]string\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int64, ...string) (map[string]string, error)); ok {\n\t\treturn rf(ctx, projectID, meta...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, int64, ...string) map[string]string); ok {\n\t\tr0 = rf(ctx, projectID, meta...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, int64, ...string) error); ok {\n\t\tr1 = rf(ctx, projectID, meta...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockManager) SerializeHelmValues(arg0, arg1 string) error {\n\tret := m.ctrl.Call(m, \"SerializeHelmValues\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *MockPermissionProvider) Register(permissions charon.Permissions) (int64, int64, int64, error) {\n\tret := _m.Called(permissions)\n\n\tvar r0 int64\n\tif rf, ok := ret.Get(0).(func(charon.Permissions) int64); ok {\n\t\tr0 = rf(permissions)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tvar r1 int64\n\tif rf, ok := ret.Get(1).(func(charon.Permissions) int64); ok {\n\t\tr1 = rf(permissions)\n\t} else {\n\t\tr1 = ret.Get(1).(int64)\n\t}\n\n\tvar r2 int64\n\tif rf, ok := ret.Get(2).(func(charon.Permissions) int64); ok {\n\t\tr2 = rf(permissions)\n\t} else {\n\t\tr2 = ret.Get(2).(int64)\n\t}\n\n\tvar r3 error\n\tif rf, ok := ret.Get(3).(func(charon.Permissions) error); ok {\n\t\tr3 = rf(permissions)\n\t} else {\n\t\tr3 = ret.Error(3)\n\t}\n\n\treturn r0, r1, r2, r3\n}", "func (m *MockASOResourceSpecGetter) Parameters(ctx context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Parameters\", ctx, object)\n\tret0, _ := ret[0].(genruntime.MetaObject)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func GetCapabilities(cap string) []*string {\n\tx := []*string{}\n\tif strings.Compare(cap, \"\") == 0 {\n\t\treturn nil\n\t}\n\tfor _, c := range strings.Split(cap, \",\") {\n\t\tx = append(x, &c)\n\t}\n\treturn x\n}", "func (c *Client) GetCapabilities(ctx context.Context) (res *repb.ServerCapabilities, err error) {\n\treturn c.GetCapabilitiesForInstance(ctx, c.InstanceName)\n}", "func (m *MockBtcd) GetInfo() (*map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetInfo\")\n\tret0, _ := ret[0].(*map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockHandler) GetCPUArchitectures(arg0 string) ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetCPUArchitectures\", arg0)\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *Client) CheckCapabilities(ctx context.Context) (err error) {\n\t// Only query the server once. There is no need for a lock, because we will\n\t// usually make the call on startup.\n\tif c.serverCaps == nil {\n\t\tif c.serverCaps, err = c.GetCapabilities(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := digest.CheckCapabilities(c.serverCaps); err != nil {\n\t\treturn errors.Wrapf(err, \"digest function mismatch\")\n\t}\n\n\tif c.serverCaps.CacheCapabilities != nil {\n\t\tc.MaxBatchSize = MaxBatchSize(c.serverCaps.CacheCapabilities.MaxBatchTotalSizeBytes)\n\t}\n\n\tif useCompression := c.CompressedBytestreamThreshold >= 0; useCompression {\n\t\tif c.serverCaps.CacheCapabilities.SupportedCompressors == nil {\n\t\t\treturn errors.New(\"the server does not support compression\")\n\t\t}\n\n\t\tfoundZstd := false\n\t\tfor _, sComp := range c.serverCaps.CacheCapabilities.SupportedCompressors {\n\t\t\tif sComp == repb.Compressor_ZSTD {\n\t\t\t\tfoundZstd = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !foundZstd {\n\t\t\treturn errors.New(\"zstd is not supported by server, while the SDK only supports ZSTD compression\")\n\t\t}\n\t\tfor _, compressor := range c.serverCaps.CacheCapabilities.SupportedBatchUpdateCompressors {\n\t\t\tif compressor == repb.Compressor_ZSTD {\n\t\t\t\tc.batchCompression = true\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (o *PluginConfigLinux) GetCapabilities() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Capabilities\n}", "func (_m *AdminAPI) HandleInfo(sessionID uint64, handleID uint64) (interface{}, error) {\n\tret := _m.Called(sessionID, handleID)\n\n\tvar r0 interface{}\n\tif rf, ok := ret.Get(0).(func(uint64, uint64) interface{}); ok {\n\t\tr0 = rf(sessionID, handleID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(interface{})\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(uint64, uint64) error); ok {\n\t\tr1 = rf(sessionID, handleID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (l *RemoteProvider) GetProviderCapabilities(w http.ResponseWriter, r *http.Request) {\n\tencoder := json.NewEncoder(w)\n\tif err := encoder.Encode(l.ProviderProperties); err != nil {\n\t\thttp.Error(w, ErrEncoding(err, \"Provider Capablity\").Error(), http.StatusInternalServerError)\n\t}\n}", "func TestCapabilitiesExternal(t *testing.T) {\n\tcfg := getCapabilitiesTestConfig()\n\tcapsNonExternal := getCapabilitiesWithConfig(cfg, t)\n\tcfg.External = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}\n\tcapsExternal := getCapabilitiesWithConfig(cfg, t)\n\n\tfor _, cap := range externalUnsupportedCapabilities {\n\t\tassert.NotContains(t, capsExternal, &ecs.Attribute{\n\t\t\tName: aws.String(cap),\n\t\t})\n\t}\n\tfor _, cap := range externalSpecificCapabilities {\n\t\tassert.Contains(t, capsExternal, &ecs.Attribute{\n\t\t\tName: aws.String(cap),\n\t\t})\n\t}\n\tcommonCaps := removeAttributesByNames(capsNonExternal, externalUnsupportedCapabilities)\n\tfor _, cap := range commonCaps {\n\t\tassert.Contains(t, capsExternal, cap)\n\t}\n}", "func (o *PluginConfigLinux) GetCapabilitiesOk() ([]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Capabilities, true\n}", "func (c *ClientWithResponses) GetCapabilitiesWithResponse(ctx context.Context) (*GetCapabilitiesResponse, error) {\n\trsp, err := c.GetCapabilities(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetCapabilitiesResponse(rsp)\n}", "func (_m *PermissionRegistry) Register(ctx context.Context, permissions charon.Permissions) (int64, int64, int64, error) {\n\tret := _m.Called(ctx, permissions)\n\n\tvar r0 int64\n\tif rf, ok := ret.Get(0).(func(context.Context, charon.Permissions) int64); ok {\n\t\tr0 = rf(ctx, permissions)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tvar r1 int64\n\tif rf, ok := ret.Get(1).(func(context.Context, charon.Permissions) int64); ok {\n\t\tr1 = rf(ctx, permissions)\n\t} else {\n\t\tr1 = ret.Get(1).(int64)\n\t}\n\n\tvar r2 int64\n\tif rf, ok := ret.Get(2).(func(context.Context, charon.Permissions) int64); ok {\n\t\tr2 = rf(ctx, permissions)\n\t} else {\n\t\tr2 = ret.Get(2).(int64)\n\t}\n\n\tvar r3 error\n\tif rf, ok := ret.Get(3).(func(context.Context, charon.Permissions) error); ok {\n\t\tr3 = rf(ctx, permissions)\n\t} else {\n\t\tr3 = ret.Error(3)\n\t}\n\n\treturn r0, r1, r2, r3\n}", "func TestHandler_OK(t *testing.T) {\n\tnow, _ := clock.ParseRFC3339(\"2000-01-01T00:00:00Z\")\n\tinputMachineID := \"AAAAAAAA-A00A-1234-1234-5864377B4831\"\n\ttimeProvider := clock.FrozenTimeProvider{\n\t\tCurrent: now,\n\t}\n\tvar request = events.APIGatewayProxyRequest{\n\t\tHTTPMethod: \"POST\",\n\t\tResource: \"/preflight/{machine_id}\",\n\t\tPathParameters: map[string]string{\"machine_id\": inputMachineID},\n\t\tHeaders: map[string]string{\"Content-Type\": \"application/json\"},\n\t\tBody: `{\n\t\"os_build\":\"20D5029f\",\n\t\"santa_version\":\"2021.1\",\n\t\"hostname\":\"my-awesome-macbook-pro.attlocal.net\",\n\t\"transitive_rule_count\":0,\n\t\"os_version\":\"11.2\",\n\t\"certificate_rule_count\":2,\n\t\"client_mode\":\"MONITOR\",\n\t\"serial_num\":\"C02123456789\",\n\t\"binary_rule_count\":3,\n\t\"primary_user\":\"nobody\",\n\t\"compiler_rule_count\":0\n}`,\n\t}\n\tmockedConfigurationFetcher := &MockDynamodb{}\n\n\tconfig := machineconfiguration.MachineConfiguration{\n\t\tClientMode: types.Lockdown,\n\t\tBatchSize: 37,\n\t\tUploadLogsURL: \"/aaa\",\n\t\tEnableBundles: true,\n\t\tAllowedPathRegex: \"\",\n\t\tCleanSync: false,\n\t}\n\n\treturnedConfig, err := attributevalue.MarshalMap(config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tmockedConfigurationFetcher.On(\"GetItem\", mock.Anything, mock.Anything).Return(&awsdynamodb.GetItemOutput{\n\t\tItem: returnedConfig,\n\t}, nil)\n\n\tmockedStateTracking := &MockDynamodb{}\n\tmockedStateTracking.On(\"GetItem\", mock.Anything, mock.Anything).Return(&awsdynamodb.GetItemOutput{\n\t\tItem: nil,\n\t}, nil)\n\n\t// mockedStateTracking.On(\"PutItem\", mock.MatchedBy(func(item interface{}) bool {\n\tmockedStateTracking.On(\"PutItem\", mock.MatchedBy(func(syncState syncstate.SyncStateRow) bool {\n\t\treturn syncState.MachineID == inputMachineID && syncState.BatchSize == 37 && syncState.LastCleanSync == \"2000-01-01T00:00:00Z\" && syncState.FeedSyncCursor == \"2000-01-01T00:00:00Z\"\n\t})).Return(&awsdynamodb.PutItemOutput{}, nil)\n\n\tmockedStateTracking.On(\"PutItem\", mock.MatchedBy(func(sensorData sensordata.SensorData) bool {\n\t\treturn sensorData.OSBuild == \"20D5029f\" && sensorData.SerialNum == \"C02123456789\" && sensorData.MachineID == inputMachineID && sensorData.PrimaryUser == \"nobody\" && sensorData.BinaryRuleCount == 3 && sensorData.CompilerRuleCount == 0\n\t})).Return(&awsdynamodb.PutItemOutput{}, nil)\n\n\th := &PostPreflightHandler{\n\t\ttimeProvider: timeProvider,\n\t\tmachineConfigurationService: machineconfiguration.GetMachineConfigurationService(mockedConfigurationFetcher, timeProvider),\n\t\tstateTrackingService: getStateTrackingService(mockedStateTracking, timeProvider),\n\t\tcleanSyncService: getCleanSyncService(timeProvider),\n\t}\n\n\tresp, err := h.Handle(request)\n\n\tassert.Empty(t, err)\n\tassert.Equal(t, 200, resp.StatusCode)\n\n\t// Ensure that the response matches the configuration returned\n\tassert.Equal(t, `{\"client_mode\":\"LOCKDOWN\",\"blocked_path_regex\":\"\",\"allowed_path_regex\":\"\",\"batch_size\":37,\"enable_bundles\":true,\"enable_transitive_rules\":false,\"clean_sync\":true,\"upload_logs_url\":\"/aaa\"}`, resp.Body)\n}", "func (d *Driver) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {\n\tnscaps := []*csi.NodeServiceCapability{\n\t\t&csi.NodeServiceCapability{\n\t\t\tType: &csi.NodeServiceCapability_Rpc{\n\t\t\t\tRpc: &csi.NodeServiceCapability_RPC{\n\t\t\t\t\tType: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&csi.NodeServiceCapability{\n\t\t\tType: &csi.NodeServiceCapability_Rpc{\n\t\t\t\tRpc: &csi.NodeServiceCapability_RPC{\n\t\t\t\t\tType: csi.NodeServiceCapability_RPC_EXPAND_VOLUME,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&csi.NodeServiceCapability{\n\t\t\tType: &csi.NodeServiceCapability_Rpc{\n\t\t\t\tRpc: &csi.NodeServiceCapability_RPC{\n\t\t\t\t\tType: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\td.log.WithFields(logrus.Fields{\n\t\t\"node_capabilities\": nscaps,\n\t\t\"method\": \"node_get_capabilities\",\n\t}).Info(\"node get capabilities called\")\n\treturn &csi.NodeGetCapabilitiesResponse{\n\t\tCapabilities: nscaps,\n\t}, nil\n}", "func SetForTests(c Capabilities) {\n\tcapInstance.lock.Lock()\n\tdefer capInstance.lock.Unlock()\n\tcapInstance.capabilities = &c\n}", "func newMockKvCapabilityVerifier(t mockConstructorTestingTnewMockKvCapabilityVerifier) *mockKvCapabilityVerifier {\n\tmock := &mockKvCapabilityVerifier{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (s *GetSceneOutput) SetCapabilities(v []*string) *GetSceneOutput {\n\ts.Capabilities = v\n\treturn s\n}", "func (m *MockisProxyKeyHandle_KeyOrHandle) isProxyKeyHandle_KeyOrHandle() {\n\tm.ctrl.Call(m, \"isProxyKeyHandle_KeyOrHandle\")\n}" ]
[ "0.6384923", "0.592584", "0.59184813", "0.58920664", "0.5881676", "0.5876497", "0.5834733", "0.581414", "0.5751604", "0.5739174", "0.56472343", "0.56211865", "0.5613671", "0.5582627", "0.5560482", "0.5440514", "0.54207444", "0.5365423", "0.5351793", "0.5347695", "0.5277504", "0.5271598", "0.5252751", "0.52287966", "0.5221567", "0.5213698", "0.5207069", "0.51977754", "0.5173755", "0.51710397", "0.5138512", "0.51159555", "0.51061755", "0.5098178", "0.50729704", "0.50721", "0.50721", "0.50079656", "0.50034666", "0.5003397", "0.49982217", "0.49872574", "0.4978", "0.49533904", "0.49366802", "0.49318525", "0.49211398", "0.4919866", "0.49154395", "0.49060896", "0.4904223", "0.4902741", "0.49026078", "0.49015272", "0.48926756", "0.4888375", "0.48812535", "0.48766005", "0.48722446", "0.48708886", "0.48678923", "0.48672208", "0.48649076", "0.4863692", "0.485305", "0.48526913", "0.48412025", "0.48358423", "0.48131007", "0.48120362", "0.4806028", "0.4804795", "0.4803789", "0.47996232", "0.47954893", "0.47935817", "0.4783434", "0.47771996", "0.4776547", "0.4775706", "0.47754788", "0.47737858", "0.4773423", "0.47714618", "0.4769484", "0.47627822", "0.47612107", "0.47583273", "0.47486246", "0.47472614", "0.47440556", "0.47401386", "0.47373983", "0.4736847", "0.4732178", "0.4723544", "0.4721821", "0.4720138", "0.4718487", "0.4712671" ]
0.62991965
1
DeliveryRequest provides a mock function with given fields: connID, event
func (_m *Plugin) DeliveryRequest(connID string, event *fftypes.EventDelivery) error { ret := _m.Called(connID, event) var r0 error if rf, ok := ret.Get(0).(func(string, *fftypes.EventDelivery) error); ok { r0 = rf(connID, event) } else { r0 = ret.Error(0) } return r0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *OutboundEvent) Call(ctx context.Context, t require.TestingT, req *transport.Request) (*transport.Response, error) {\n\tif e.WantTimeout != 0 {\n\t\ttimeoutBounds := e.WantTimeoutBounds\n\t\tif timeoutBounds == 0 {\n\t\t\ttimeoutBounds = time.Millisecond * 10\n\t\t}\n\t\tdeadline, ok := ctx.Deadline()\n\t\trequire.True(t, ok, \"wanted context deadline, but there was no deadline\")\n\t\tdeadlineDuration := deadline.Sub(time.Now())\n\t\tassert.True(t, deadlineDuration > (e.WantTimeout-timeoutBounds), \"deadline was less than expected, want %q (within %s), got %q\", e.WantTimeout, timeoutBounds, deadlineDuration)\n\t\tassert.True(t, deadlineDuration < (e.WantTimeout+timeoutBounds), \"deadline was greater than expected, want %q (within %s), got %q\", e.WantTimeout, timeoutBounds, deadlineDuration)\n\t}\n\n\tassertEqualIfSet(t, e.WantCaller, req.Caller, \"invalid Caller\")\n\tassertEqualIfSet(t, e.WantService, req.Service, \"invalid Service\")\n\tassertEqualIfSet(t, string(e.WantEncoding), string(req.Encoding), \"invalid Encoding\")\n\tassertEqualIfSet(t, e.WantProcedure, req.Procedure, \"invalid Procedure\")\n\tassertEqualIfSet(t, e.WantShardKey, req.ShardKey, \"invalid ShardKey\")\n\tassertEqualIfSet(t, e.WantRoutingKey, req.RoutingKey, \"invalid RoutingKey\")\n\tassertEqualIfSet(t, e.WantRoutingDelegate, req.RoutingDelegate, \"invalid RoutingDelegate\")\n\n\tif e.WantHeaders.Len() != 0 {\n\t\tassert.Equal(t, e.WantHeaders.Len(), req.Headers.Len(), \"unexpected number of headers\")\n\t\tfor key, wantVal := range e.WantHeaders.Items() {\n\t\t\tgotVal, ok := req.Headers.Get(key)\n\t\t\tassert.True(t, ok, \"header key %q was not in request headers\", key)\n\t\t\tassert.Equal(t, wantVal, gotVal, \"invalid request header value for %q\", key)\n\t\t}\n\t}\n\n\tif e.WantBody != \"\" {\n\t\tbody, err := ioutil.ReadAll(req.Body)\n\t\tassert.NoError(t, err, \"got error reading request body\")\n\t\tassert.Equal(t, e.WantBody, string(body), \"request body did not match\")\n\t}\n\n\tif e.WaitForTimeout {\n\t\t_, ok := ctx.Deadline()\n\t\trequire.True(t, ok, \"attempted to wait on context that has no deadline\")\n\t\t<-ctx.Done()\n\t}\n\n\treturn &transport.Response{\n\t\tBody: ioutil.NopCloser(bytes.NewBuffer([]byte(e.GiveRespBody))),\n\t\tHeaders: e.GiveRespHeaders,\n\t\tApplicationError: e.GiveApplicationError,\n\t}, e.GiveError\n}", "func TestService_Handle_Inviter(t *testing.T) {\n\tprov := protocol.MockProvider{}\n\tctx := context{outboundDispatcher: prov.OutboundDispatcher(), didCreator: &mockdid.MockDIDCreator{Doc: getMockDID()}}\n\tnewDidDoc, err := ctx.didCreator.CreateDID()\n\trequire.NoError(t, err)\n\n\ts, err := New(&mockdid.MockDIDCreator{Doc: getMockDID()}, &protocol.MockProvider{})\n\trequire.NoError(t, err)\n\tactionCh := make(chan service.DIDCommAction, 10)\n\terr = s.RegisterActionEvent(actionCh)\n\trequire.NoError(t, err)\n\tstatusCh := make(chan service.StateMsg, 10)\n\terr = s.RegisterMsgEvent(statusCh)\n\trequire.NoError(t, err)\n\tcompletedFlag := make(chan struct{})\n\trespondedFlag := make(chan struct{})\n\tgo msgEventListener(t, statusCh, respondedFlag, completedFlag)\n\tgo func() { require.NoError(t, service.AutoExecuteActionEvent(actionCh)) }()\n\tthid := randomString()\n\n\t// Invitation was previously sent by Alice to Bob.\n\t// Bob now sends a did-exchange Request\n\tpayloadBytes, err := json.Marshal(\n\t\t&Request{\n\t\t\tType: ConnectionRequest,\n\t\t\tID: thid,\n\t\t\tLabel: \"Bob\",\n\t\t\tConnection: &Connection{\n\t\t\t\tDID: \"B.did@B:A\",\n\t\t\t\tDIDDoc: newDidDoc,\n\t\t\t},\n\t\t})\n\trequire.NoError(t, err)\n\tmsg := service.DIDCommMsg{Type: ConnectionRequest, Payload: payloadBytes}\n\terr = s.Handle(&msg)\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase <-respondedFlag:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event responded\")\n\t}\n\t// Alice automatically sends exchange Response to Bob\n\t// Bob replies with an ACK\n\t// validateState(t, s, thid, (&responded{}).Name())\n\tpayloadBytes, err = json.Marshal(\n\t\t&model.Ack{\n\t\t\tType: ConnectionAck,\n\t\t\tID: randomString(),\n\t\t\tStatus: \"OK\",\n\t\t\tThread: &decorator.Thread{ID: thid},\n\t\t})\n\trequire.NoError(t, err)\n\tmsg = service.DIDCommMsg{Type: ConnectionAck, Payload: payloadBytes}\n\terr = s.Handle(&msg)\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase <-completedFlag:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event complete\")\n\t}\n\tvalidateState(t, s, thid, (&completed{}).Name())\n}", "func TestService_Handle_Invitee(t *testing.T) {\n\tdata := make(map[string]string)\n\t// using this mockStore as a hack in order to obtain the auto-generated thid after\n\t// automatically sending the request back to Bob\n\tvar lock sync.RWMutex\n\tstore := &mockStore{\n\t\tput: func(s string, bytes []byte) error {\n\t\t\tlock.Lock()\n\t\t\tdefer lock.Unlock()\n\t\t\tdata[s] = string(bytes)\n\t\t\treturn nil\n\t\t},\n\t\tget: func(s string) (bytes []byte, e error) {\n\t\t\tlock.RLock()\n\t\t\tdefer lock.RUnlock()\n\t\t\tif state, found := data[s]; found {\n\t\t\t\treturn []byte(state), nil\n\t\t\t}\n\t\t\treturn nil, storage.ErrDataNotFound\n\t\t},\n\t}\n\tprov := protocol.MockProvider{}\n\tctx := context{outboundDispatcher: prov.OutboundDispatcher(), didCreator: &mockdid.MockDIDCreator{Doc: getMockDID()}}\n\tnewDidDoc, err := ctx.didCreator.CreateDID()\n\trequire.NoError(t, err)\n\n\ts, err := New(&mockdid.MockDIDCreator{Doc: getMockDID()}, &protocol.MockProvider{CustomStore: store})\n\trequire.NoError(t, err)\n\tactionCh := make(chan service.DIDCommAction, 10)\n\terr = s.RegisterActionEvent(actionCh)\n\trequire.NoError(t, err)\n\tstatusCh := make(chan service.StateMsg, 10)\n\terr = s.RegisterMsgEvent(statusCh)\n\trequire.NoError(t, err)\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor e := range statusCh {\n\t\t\tif e.Type == service.PostState {\n\t\t\t\t// receive the events\n\t\t\t\tif e.StateID == \"completed\" {\n\t\t\t\t\tdone <- true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\tgo func() { require.NoError(t, service.AutoExecuteActionEvent(actionCh)) }()\n\n\t// Alice receives an invitation from Bob\n\tpayloadBytes, err := json.Marshal(\n\t\t&Invitation{\n\t\t\tType: ConnectionInvite,\n\t\t\tID: randomString(),\n\t\t\tLabel: \"Bob\",\n\t\t\tDID: \"did:example:bob\",\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tmsg := service.DIDCommMsg{Type: ConnectionInvite, Outbound: false, Payload: payloadBytes}\n\terr = s.Handle(&msg)\n\trequire.NoError(t, err)\n\n\t// Alice automatically sends a Request to Bob and is now in REQUESTED state.\n\tvar thid string\n\tvar currState string\n\tfor k, v := range data {\n\t\tthid = k\n\t\tcurrState = v\n\t\tbreak\n\t}\n\trequire.NotEmpty(t, thid)\n\trequire.Equal(t, (&requested{}).Name(), currState)\n\n\tconnection := &Connection{\n\t\tDID: newDidDoc.ID,\n\t\tDIDDoc: newDidDoc,\n\t}\n\n\tconnectionSignature, err := prepareConnectionSignature(connection)\n\trequire.NoError(t, err)\n\n\t// Bob replies with a Response\n\tpayloadBytes, err = json.Marshal(\n\t\t&Response{\n\t\t\tType: ConnectionResponse,\n\t\t\tID: randomString(),\n\t\t\tConnectionSignature: connectionSignature,\n\t\t\tThread: &decorator.Thread{ID: thid},\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tmsg = service.DIDCommMsg{Type: ConnectionResponse, Outbound: false, Payload: payloadBytes}\n\terr = s.Handle(&msg)\n\trequire.NoError(t, err)\n\n\t// Alice automatically sends an ACK to Bob\n\t// Alice must now be in COMPLETED state\n\tselect {\n\tcase <-done:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event complete\")\n\t}\n\tvalidateState(t, s, thid, (&completed{}).Name())\n}", "func (_m *NatsConn) PublishRequest(subj string, reply string, data []byte) error {\n\tret := _m.Called(subj, reply, data)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string, []byte) error); ok {\n\t\tr0 = rf(subj, reply, data)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestService_Handle_Inviter(t *testing.T) {\n\tmockStore := &mockstorage.MockStore{Store: make(map[string]mockstorage.DBEntry)}\n\tstoreProv := mockstorage.NewCustomMockStoreProvider(mockStore)\n\tk := newKMS(t, storeProv)\n\tprov := &protocol.MockProvider{\n\t\tStoreProvider: storeProv,\n\t\tServiceMap: map[string]interface{}{\n\t\t\tmediator.Coordination: &mockroute.MockMediatorSvc{},\n\t\t},\n\t\tCustomKMS: k,\n\t\tKeyTypeValue: kms.ED25519Type,\n\t\tKeyAgreementTypeValue: kms.X25519ECDHKWType,\n\t}\n\n\tctx := &context{\n\t\toutboundDispatcher: prov.OutboundDispatcher(),\n\t\tcrypto: &tinkcrypto.Crypto{},\n\t\tkms: k,\n\t\tkeyType: kms.ED25519Type,\n\t\tkeyAgreementType: kms.X25519ECDHKWType,\n\t}\n\n\tverPubKey, encPubKey := newSigningAndEncryptionDIDKeys(t, ctx)\n\n\tctx.vdRegistry = &mockvdr.MockVDRegistry{CreateValue: createDIDDocWithKey(verPubKey, encPubKey)}\n\n\tconnRec, err := connection.NewRecorder(prov)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, connRec)\n\n\tctx.connectionRecorder = connRec\n\n\tdoc, err := ctx.vdRegistry.Create(testMethod, nil)\n\trequire.NoError(t, err)\n\n\ts, err := New(prov)\n\trequire.NoError(t, err)\n\n\tactionCh := make(chan service.DIDCommAction, 10)\n\terr = s.RegisterActionEvent(actionCh)\n\trequire.NoError(t, err)\n\n\tstatusCh := make(chan service.StateMsg, 10)\n\terr = s.RegisterMsgEvent(statusCh)\n\trequire.NoError(t, err)\n\n\tcompletedFlag := make(chan struct{})\n\trespondedFlag := make(chan struct{})\n\n\tgo msgEventListener(t, statusCh, respondedFlag, completedFlag)\n\n\tgo func() { service.AutoExecuteActionEvent(actionCh) }()\n\n\tinvitation := &Invitation{\n\t\tType: InvitationMsgType,\n\t\tID: randomString(),\n\t\tLabel: \"Bob\",\n\t\tRecipientKeys: []string{verPubKey},\n\t\tServiceEndpoint: \"http://alice.agent.example.com:8081\",\n\t}\n\n\terr = ctx.connectionRecorder.SaveInvitation(invitation.ID, invitation)\n\trequire.NoError(t, err)\n\n\tthid := randomString()\n\n\t// Invitation was previously sent by Alice to Bob.\n\t// Bob now sends a did-exchange Invitation\n\tpayloadBytes, err := json.Marshal(\n\t\t&Request{\n\t\t\tType: RequestMsgType,\n\t\t\tID: thid,\n\t\t\tLabel: \"Bob\",\n\t\t\tThread: &decorator.Thread{\n\t\t\t\tPID: invitation.ID,\n\t\t\t},\n\t\t\tDID: doc.DIDDocument.ID,\n\t\t\tDocAttach: unsignedDocAttach(t, doc.DIDDocument),\n\t\t})\n\trequire.NoError(t, err)\n\tmsg, err := service.ParseDIDCommMsgMap(payloadBytes)\n\trequire.NoError(t, err)\n\t_, err = s.HandleInbound(msg, service.NewDIDCommContext(doc.DIDDocument.ID, \"\", nil))\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase <-respondedFlag:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event responded\")\n\t}\n\t// Alice automatically sends exchange Response to Bob\n\t// Bob replies with an ACK\n\tpayloadBytes, err = json.Marshal(\n\t\t&model.Ack{\n\t\t\tType: AckMsgType,\n\t\t\tID: randomString(),\n\t\t\tStatus: \"OK\",\n\t\t\tThread: &decorator.Thread{ID: thid},\n\t\t})\n\trequire.NoError(t, err)\n\n\tdidMsg, err := service.ParseDIDCommMsgMap(payloadBytes)\n\trequire.NoError(t, err)\n\n\t_, err = s.HandleInbound(didMsg, service.NewDIDCommContext(doc.DIDDocument.ID, \"\", nil))\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase <-completedFlag:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event complete\")\n\t}\n\n\tvalidateState(t, s, thid, findNamespace(AckMsgType), (&completed{}).Name())\n}", "func TestMockOnEvent(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockEvent}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\n\tclient.OnEvent(\"AgentChannel\", func(conn *websocket.Conn, payload *Payload, error error) {\n\t\tcalled <- struct{}{}\n\t\treturn\n\t})\n\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func TestService_Handle_Invitee(t *testing.T) {\n\tprotocolStateStore := mockstorage.NewMockStoreProvider()\n\tstore := mockstorage.NewMockStoreProvider()\n\tk := newKMS(t, store)\n\tprov := &protocol.MockProvider{\n\t\tStoreProvider: store,\n\t\tProtocolStateStoreProvider: protocolStateStore,\n\t\tServiceMap: map[string]interface{}{\n\t\t\tmediator.Coordination: &mockroute.MockMediatorSvc{},\n\t\t},\n\t\tCustomKMS: k,\n\t\tKeyTypeValue: kms.ED25519Type,\n\t\tKeyAgreementTypeValue: kms.X25519ECDHKWType,\n\t}\n\n\tctx := &context{\n\t\toutboundDispatcher: prov.OutboundDispatcher(),\n\t\tcrypto: &tinkcrypto.Crypto{},\n\t\tkms: k,\n\t\tkeyType: kms.ED25519Type,\n\t\tkeyAgreementType: kms.X25519ECDHKWType,\n\t}\n\n\tverPubKey, encPubKey := newSigningAndEncryptionDIDKeys(t, ctx)\n\n\tctx.vdRegistry = &mockvdr.MockVDRegistry{CreateValue: createDIDDocWithKey(verPubKey, encPubKey)}\n\n\tconnRec, err := connection.NewRecorder(prov)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, connRec)\n\n\tctx.connectionRecorder = connRec\n\n\tdoc, err := ctx.vdRegistry.Create(testMethod, nil)\n\trequire.NoError(t, err)\n\n\ts, err := New(prov)\n\trequire.NoError(t, err)\n\n\ts.ctx.vdRegistry = &mockvdr.MockVDRegistry{ResolveValue: doc.DIDDocument}\n\tactionCh := make(chan service.DIDCommAction, 10)\n\terr = s.RegisterActionEvent(actionCh)\n\trequire.NoError(t, err)\n\n\tstatusCh := make(chan service.StateMsg, 10)\n\terr = s.RegisterMsgEvent(statusCh)\n\trequire.NoError(t, err)\n\n\trequestedCh := make(chan string)\n\tcompletedCh := make(chan struct{})\n\n\tgo handleMessagesInvitee(statusCh, requestedCh, completedCh)\n\n\tgo func() { service.AutoExecuteActionEvent(actionCh) }()\n\n\tinvitation := &Invitation{\n\t\tType: InvitationMsgType,\n\t\tID: randomString(),\n\t\tLabel: \"Bob\",\n\t\tRecipientKeys: []string{verPubKey},\n\t\tServiceEndpoint: \"http://alice.agent.example.com:8081\",\n\t}\n\n\terr = ctx.connectionRecorder.SaveInvitation(invitation.ID, invitation)\n\trequire.NoError(t, err)\n\t// Alice receives an invitation from Bob\n\tpayloadBytes, err := json.Marshal(invitation)\n\trequire.NoError(t, err)\n\n\tdidMsg, err := service.ParseDIDCommMsgMap(payloadBytes)\n\trequire.NoError(t, err)\n\n\t_, err = s.HandleInbound(didMsg, service.EmptyDIDCommContext())\n\trequire.NoError(t, err)\n\n\tvar connID string\n\tselect {\n\tcase connID = <-requestedCh:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event requested\")\n\t}\n\n\t// Alice automatically sends a Request to Bob and is now in REQUESTED state.\n\tconnRecord, err := s.connectionRecorder.GetConnectionRecord(connID)\n\trequire.NoError(t, err)\n\trequire.Equal(t, (&requested{}).Name(), connRecord.State)\n\trequire.Equal(t, invitation.ID, connRecord.InvitationID)\n\trequire.Equal(t, invitation.RecipientKeys, connRecord.RecipientKeys)\n\trequire.Equal(t, invitation.ServiceEndpoint, connRecord.ServiceEndPoint)\n\n\tdidKey, err := ctx.getVerKey(invitation.ID)\n\trequire.NoError(t, err)\n\n\tdocAttach, err := ctx.didDocAttachment(doc.DIDDocument, didKey)\n\trequire.NoError(t, err)\n\n\t// Bob replies with a Response\n\tpayloadBytes, err = json.Marshal(\n\t\t&Response{\n\t\t\tType: ResponseMsgType,\n\t\t\tID: randomString(),\n\t\t\tDID: doc.DIDDocument.ID,\n\t\t\tDocAttach: docAttach,\n\t\t\tThread: &decorator.Thread{\n\t\t\t\tID: connRecord.ThreadID,\n\t\t\t},\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tdidMsg, err = service.ParseDIDCommMsgMap(payloadBytes)\n\trequire.NoError(t, err)\n\n\t_, err = s.HandleInbound(didMsg, service.EmptyDIDCommContext())\n\trequire.NoError(t, err)\n\n\t// Alice automatically sends an ACK to Bob\n\t// Alice must now be in COMPLETED state\n\tselect {\n\tcase <-completedCh:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event complete\")\n\t}\n\n\tvalidateState(t, s, connRecord.ThreadID, findNamespace(ResponseMsgType), (&completed{}).Name())\n}", "func (_m *NatsConn) Request(subj string, data []byte, timeout time.Duration) (*nats.Msg, error) {\n\tret := _m.Called(subj, data, timeout)\n\n\tvar r0 *nats.Msg\n\tif rf, ok := ret.Get(0).(func(string, []byte, time.Duration) *nats.Msg); ok {\n\t\tr0 = rf(subj, data, timeout)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*nats.Msg)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, []byte, time.Duration) error); ok {\n\t\tr1 = rf(subj, data, timeout)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *NSenterEventIface) SendRequest() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestRequest(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\tgo doh.Query(simpleQueryBytes)\n\treq := <-rt.req\n\tif req.URL.String() != testURL {\n\t\tt.Errorf(\"URL mismatch: %s != %s\", req.URL.String(), testURL)\n\t}\n\treqBody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(reqBody)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"reqBody has unexpected length: %d\", len(reqBody))\n\t}\n\t// Parse reqBody into a Message.\n\tnewQuery := mustUnpack(reqBody)\n\t// Ensure the converted request has an ID of zero.\n\tif newQuery.Header.ID != 0 {\n\t\tt.Errorf(\"Unexpected request header id: %v\", newQuery.Header.ID)\n\t}\n\t// Check that all fields except for Header.ID and Additionals\n\t// are the same as the original. Additionals may differ if\n\t// padding was added.\n\tif !queriesMostlyEqual(simpleQuery, *newQuery) {\n\t\tt.Errorf(\"Unexpected query body:\\n\\t%v\\nExpected:\\n\\t%v\", newQuery, simpleQuery)\n\t}\n\tcontentType := req.Header.Get(\"Content-Type\")\n\tif contentType != \"application/dns-message\" {\n\t\tt.Errorf(\"Wrong content type: %s\", contentType)\n\t}\n\taccept := req.Header.Get(\"Accept\")\n\tif accept != \"application/dns-message\" {\n\t\tt.Errorf(\"Wrong Accept header: %s\", accept)\n\t}\n}", "func (m *mConsensusNetworkMockSendRequest) Set(f func(p network.Request, p1 core.RecordRef) (r error)) *ConsensusNetworkMock {\n\tm.mock.SendRequestFunc = f\n\tm.mockExpectations = nil\n\treturn m.mock\n}", "func mockRequest() *request.Request {\n\tr := request.Request{}\n\thl := request.HandlerList{\n\t\tAfterEachFn: func(item request.HandlerListRunItem) bool { return true },\n\t}\n\n\tr.Handlers = request.Handlers{\n\t\tValidate: hl,\n\t\tBuild: hl,\n\t\tSend: hl,\n\t\tSign: hl,\n\t\tValidateResponse: hl,\n\t\tUnmarshal: hl,\n\t\tUnmarshalMeta: hl,\n\t\tUnmarshalError: hl,\n\t\tRetry: hl,\n\t\tAfterRetry: hl,\n\t\tComplete: hl,\n\t}\n\n\treturn &r\n}", "func deliveryTestHandler(t *testing.T, expCnt int64, deliveryChan chan Event, mt *msgtracker, doneChan chan int64) {\n\n\tfor ev := range deliveryChan {\n\t\tm, ok := ev.(*Message)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tmt.msgs[mt.msgcnt] = m\n\t\tmt.msgcnt++\n\n\t\tif m.TopicPartition.Error != nil {\n\t\t\tmt.errcnt++\n\t\t\t// log it and check it later\n\t\t\tt.Logf(\"Message delivery error: %v\", m.TopicPartition)\n\t\t}\n\n\t\tt.Logf(\"Delivered %d/%d to %s, error count %d\", mt.msgcnt, expCnt, m.TopicPartition, mt.errcnt)\n\n\t\tif mt.msgcnt >= expCnt {\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\tdoneChan <- mt.msgcnt\n\tclose(doneChan)\n}", "func (mmExchange *mMDNSClientMockExchange) When(msg *mdns.Msg, address string) *MDNSClientMockExchangeExpectation {\n\tif mmExchange.mock.funcExchange != nil {\n\t\tmmExchange.mock.t.Fatalf(\"MDNSClientMock.Exchange mock is already set by Set\")\n\t}\n\n\texpectation := &MDNSClientMockExchangeExpectation{\n\t\tmock: mmExchange.mock,\n\t\tparams: &MDNSClientMockExchangeParams{msg, address},\n\t}\n\tmmExchange.expectations = append(mmExchange.expectations, expectation)\n\treturn expectation\n}", "func (m *MockConn) Send(arg0 event.Event) {\n\tm.ctrl.Call(m, \"Send\", arg0)\n}", "func (m *MockRemotePeer) ConsumeRequest(msgID p2pcommon.MsgID) p2pcommon.MsgOrder {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"ConsumeRequest\", msgID)\n\treturn nil\n}", "func (_m *LambdaAPI) PublishVersionRequest(_a0 *lambda.PublishVersionInput) (*request.Request, *lambda.FunctionConfiguration) {\n\tret := _m.Called(_a0)\n\n\tvar r0 *request.Request\n\tif rf, ok := ret.Get(0).(func(*lambda.PublishVersionInput) *request.Request); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*request.Request)\n\t\t}\n\t}\n\n\tvar r1 *lambda.FunctionConfiguration\n\tif rf, ok := ret.Get(1).(func(*lambda.PublishVersionInput) *lambda.FunctionConfiguration); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*lambda.FunctionConfiguration)\n\t\t}\n\t}\n\n\treturn r0, r1\n}", "func newMockListener(endpoint net.Conn) *mockListener {\n \n c := make(chan net.Conn, 1)\n c <- endpoint\n listener := &mockListener{\n connChannel: c,\n serverEndpoint: endpoint,\n }\n return listener\n}", "func TestMessageHandler(t *testing.T) {\n log.SetOutput(ioutil.Discard)\n payload := Payload{\n Originator: \"Diogo\",\n Recipient: \"5531988174420\",\n Message: \"Test message\",\n }\n testRequest(payload, 200, 1, t)\n\n payload = Payload{\n Originator: \"\",\n Recipient: \"5531988174420\",\n Message: \"Test message\",\n }\n testRequest(payload, 400, 0, t)\n\n payload = Payload{\n Originator: \"Diogo\",\n Recipient: \"5531988174420\",\n Message: strings.Repeat(\"a\", 170),\n }\n testRequest(payload, 200, 2, t)\n}", "func (fgs *FakeGraphSync) AssertRequestReceived(ctx context.Context, t *testing.T) ReceivedGraphSyncRequest {\n\tvar requestReceived ReceivedGraphSyncRequest\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"did not receive message sent\")\n\tcase requestReceived = <-fgs.requests:\n\t}\n\treturn requestReceived\n}", "func (mmGetAbandonedRequest *mClientMockGetAbandonedRequest) When(ctx context.Context, objectRef insolar.Reference, reqRef insolar.Reference) *ClientMockGetAbandonedRequestExpectation {\n\tif mmGetAbandonedRequest.mock.funcGetAbandonedRequest != nil {\n\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"ClientMock.GetAbandonedRequest mock is already set by Set\")\n\t}\n\n\texpectation := &ClientMockGetAbandonedRequestExpectation{\n\t\tmock: mmGetAbandonedRequest.mock,\n\t\tparams: &ClientMockGetAbandonedRequestParams{ctx, objectRef, reqRef},\n\t}\n\tmmGetAbandonedRequest.expectations = append(mmGetAbandonedRequest.expectations, expectation)\n\treturn expectation\n}", "func (m *Mockrequester) Request(arg0 context.Context, arg1 p2p.Peer, arg2 []byte, arg3 func([]byte), arg4 func(error)) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Request\", arg0, arg1, arg2, arg3, arg4)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mmSend *mClientMockSend) When(ctx context.Context, n *Notification) *ClientMockSendExpectation {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"ClientMock.Send mock is already set by Set\")\n\t}\n\n\texpectation := &ClientMockSendExpectation{\n\t\tmock: mmSend.mock,\n\t\tparams: &ClientMockSendParams{ctx, n},\n\t}\n\tmmSend.expectations = append(mmSend.expectations, expectation)\n\treturn expectation\n}", "func MockConsensusEvent(hash []byte, round uint64, step uint8, keys []key.ConsensusKeys, p *user.Provisioners, i ...int) consensus.Event {\n\taev := MockAgreementEvent(hash, round, step, keys, p, i...)\n\thdr := aev.Header\n\n\tbuf := new(bytes.Buffer)\n\t_ = Marshal(buf, *aev)\n\n\treturn consensus.Event{\n\t\tHeader: hdr,\n\t\tPayload: *buf,\n\t}\n}", "func (mmGetAbandonedRequest *mClientMockGetAbandonedRequest) Set(f func(ctx context.Context, objectRef insolar.Reference, reqRef insolar.Reference) (r1 record.Request, err error)) *ClientMock {\n\tif mmGetAbandonedRequest.defaultExpectation != nil {\n\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"Default expectation is already set for the Client.GetAbandonedRequest method\")\n\t}\n\n\tif len(mmGetAbandonedRequest.expectations) > 0 {\n\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"Some expectations are already set for the Client.GetAbandonedRequest method\")\n\t}\n\n\tmmGetAbandonedRequest.mock.funcGetAbandonedRequest = f\n\treturn mmGetAbandonedRequest.mock\n}", "func testSendFunc(serv *Server, payload []byte, serverTime float64) {\n\t// send payloads to clients\n\tserv.SendPayloads(payload, serverTime)\n}", "func newRequest(ctx context.Context, msg interface{}) *request {\n\treturn &request{\n\t\tctx: ctx,\n\t\tmsg: msg,\n\t\tfailure: make(chan error, 1),\n\t\tresponse: make(chan *Delivery, 1),\n\t}\n}", "func (m *mHostNetworkMockSendRequest) Set(f func(p network.Request, p1 core.RecordRef) (r network.Future, r1 error)) *HostNetworkMock {\n\tm.mock.SendRequestFunc = f\n\tm.mockExpectations = nil\n\treturn m.mock\n}", "func (_m *mockTransportChannel) Send(_a0 *contracts.Envelope) {\n\t_m.Called(_a0)\n}", "func (m *MockRDSAPI) ModifyEventSubscriptionRequest(arg0 *rds.ModifyEventSubscriptionInput) (*request.Request, *rds.ModifyEventSubscriptionOutput) {\n\tret := m.ctrl.Call(m, \"ModifyEventSubscriptionRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.ModifyEventSubscriptionOutput)\n\treturn ret0, ret1\n}", "func (rh *MockRequestHandler) PushRequest(ctx context.Context, req *protocol.Request) (*protocol.Response, error) {\n\tselect {\n\tcase rh.in <- req:\n\tcase <-ctx.Done():\n\t\treturn nil, errors.New(\"request cancelled\")\n\t}\n\n\tselect {\n\tcase resp := <-req.Responded():\n\t\tif resp == nil {\n\t\t\treturn protocol.NewResponseConfig(config.Default), errors.New(\"no response\")\n\t\t}\n\t\treturn resp, nil\n\tcase <-ctx.Done():\n\t\treturn nil, errors.New(\"request cancelled\")\n\t}\n\treturn nil, nil\n}", "func TestHandler_OK(t *testing.T) {\n\tnow, _ := clock.ParseRFC3339(\"2000-01-01T00:00:00Z\")\n\tinputMachineID := \"AAAAAAAA-A00A-1234-1234-5864377B4831\"\n\ttimeProvider := clock.FrozenTimeProvider{\n\t\tCurrent: now,\n\t}\n\tvar request = events.APIGatewayProxyRequest{\n\t\tHTTPMethod: \"POST\",\n\t\tResource: \"/preflight/{machine_id}\",\n\t\tPathParameters: map[string]string{\"machine_id\": inputMachineID},\n\t\tHeaders: map[string]string{\"Content-Type\": \"application/json\"},\n\t\tBody: `{\n\t\"os_build\":\"20D5029f\",\n\t\"santa_version\":\"2021.1\",\n\t\"hostname\":\"my-awesome-macbook-pro.attlocal.net\",\n\t\"transitive_rule_count\":0,\n\t\"os_version\":\"11.2\",\n\t\"certificate_rule_count\":2,\n\t\"client_mode\":\"MONITOR\",\n\t\"serial_num\":\"C02123456789\",\n\t\"binary_rule_count\":3,\n\t\"primary_user\":\"nobody\",\n\t\"compiler_rule_count\":0\n}`,\n\t}\n\tmockedConfigurationFetcher := &MockDynamodb{}\n\n\tconfig := machineconfiguration.MachineConfiguration{\n\t\tClientMode: types.Lockdown,\n\t\tBatchSize: 37,\n\t\tUploadLogsURL: \"/aaa\",\n\t\tEnableBundles: true,\n\t\tAllowedPathRegex: \"\",\n\t\tCleanSync: false,\n\t}\n\n\treturnedConfig, err := attributevalue.MarshalMap(config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tmockedConfigurationFetcher.On(\"GetItem\", mock.Anything, mock.Anything).Return(&awsdynamodb.GetItemOutput{\n\t\tItem: returnedConfig,\n\t}, nil)\n\n\tmockedStateTracking := &MockDynamodb{}\n\tmockedStateTracking.On(\"GetItem\", mock.Anything, mock.Anything).Return(&awsdynamodb.GetItemOutput{\n\t\tItem: nil,\n\t}, nil)\n\n\t// mockedStateTracking.On(\"PutItem\", mock.MatchedBy(func(item interface{}) bool {\n\tmockedStateTracking.On(\"PutItem\", mock.MatchedBy(func(syncState syncstate.SyncStateRow) bool {\n\t\treturn syncState.MachineID == inputMachineID && syncState.BatchSize == 37 && syncState.LastCleanSync == \"2000-01-01T00:00:00Z\" && syncState.FeedSyncCursor == \"2000-01-01T00:00:00Z\"\n\t})).Return(&awsdynamodb.PutItemOutput{}, nil)\n\n\tmockedStateTracking.On(\"PutItem\", mock.MatchedBy(func(sensorData sensordata.SensorData) bool {\n\t\treturn sensorData.OSBuild == \"20D5029f\" && sensorData.SerialNum == \"C02123456789\" && sensorData.MachineID == inputMachineID && sensorData.PrimaryUser == \"nobody\" && sensorData.BinaryRuleCount == 3 && sensorData.CompilerRuleCount == 0\n\t})).Return(&awsdynamodb.PutItemOutput{}, nil)\n\n\th := &PostPreflightHandler{\n\t\ttimeProvider: timeProvider,\n\t\tmachineConfigurationService: machineconfiguration.GetMachineConfigurationService(mockedConfigurationFetcher, timeProvider),\n\t\tstateTrackingService: getStateTrackingService(mockedStateTracking, timeProvider),\n\t\tcleanSyncService: getCleanSyncService(timeProvider),\n\t}\n\n\tresp, err := h.Handle(request)\n\n\tassert.Empty(t, err)\n\tassert.Equal(t, 200, resp.StatusCode)\n\n\t// Ensure that the response matches the configuration returned\n\tassert.Equal(t, `{\"client_mode\":\"LOCKDOWN\",\"blocked_path_regex\":\"\",\"allowed_path_regex\":\"\",\"batch_size\":37,\"enable_bundles\":true,\"enable_transitive_rules\":false,\"clean_sync\":true,\"upload_logs_url\":\"/aaa\"}`, resp.Body)\n}", "func (_m *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {\n\tret := _m.Called(name, wantReply, payload)\n\n\tvar r0 bool\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(string, bool, []byte) (bool, error)); ok {\n\t\treturn rf(name, wantReply, payload)\n\t}\n\tif rf, ok := ret.Get(0).(func(string, bool, []byte) bool); ok {\n\t\tr0 = rf(name, wantReply, payload)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\tif rf, ok := ret.Get(1).(func(string, bool, []byte) error); ok {\n\t\tr1 = rf(name, wantReply, payload)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockRDSAPI) DeleteEventSubscriptionRequest(arg0 *rds.DeleteEventSubscriptionInput) (*request.Request, *rds.DeleteEventSubscriptionOutput) {\n\tret := m.ctrl.Call(m, \"DeleteEventSubscriptionRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.DeleteEventSubscriptionOutput)\n\treturn ret0, ret1\n}", "func (mmRegisterIncomingRequest *mClientMockRegisterIncomingRequest) When(ctx context.Context, request *record.IncomingRequest) *ClientMockRegisterIncomingRequestExpectation {\n\tif mmRegisterIncomingRequest.mock.funcRegisterIncomingRequest != nil {\n\t\tmmRegisterIncomingRequest.mock.t.Fatalf(\"ClientMock.RegisterIncomingRequest mock is already set by Set\")\n\t}\n\n\texpectation := &ClientMockRegisterIncomingRequestExpectation{\n\t\tmock: mmRegisterIncomingRequest.mock,\n\t\tparams: &ClientMockRegisterIncomingRequestParams{ctx, request},\n\t}\n\tmmRegisterIncomingRequest.expectations = append(mmRegisterIncomingRequest.expectations, expectation)\n\treturn expectation\n}", "func getDelivery(ch *amqplib.Channel, eventSource *v1alpha1.AMQPEventSource) (<-chan amqplib.Delivery, error) {\n\terr := ch.ExchangeDeclare(\n\t\teventSource.ExchangeName,\n\t\teventSource.ExchangeType,\n\t\teventSource.ExchangeDeclare.Durable,\n\t\teventSource.ExchangeDeclare.AutoDelete,\n\t\teventSource.ExchangeDeclare.Internal,\n\t\teventSource.ExchangeDeclare.NoWait,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to declare exchange with name %s and type %s. err: %w\", eventSource.ExchangeName, eventSource.ExchangeType, err)\n\t}\n\toptionalArguments, err := parseYamlTable(eventSource.QueueDeclare.Arguments)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse optional queue declare table arguments from Yaml string: %w\", err)\n\t}\n\n\tq, err := ch.QueueDeclare(\n\t\teventSource.QueueDeclare.Name,\n\t\teventSource.QueueDeclare.Durable,\n\t\teventSource.QueueDeclare.AutoDelete,\n\t\teventSource.QueueDeclare.Exclusive,\n\t\teventSource.QueueDeclare.NoWait,\n\t\toptionalArguments,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to declare queue: %w\", err)\n\t}\n\n\terr = ch.QueueBind(\n\t\tq.Name,\n\t\teventSource.RoutingKey,\n\t\teventSource.ExchangeName,\n\t\teventSource.QueueBind.NoWait,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to bind %s exchange '%s' to queue with routingKey: %s: %w\", eventSource.ExchangeType, eventSource.ExchangeName, eventSource.RoutingKey, err)\n\t}\n\n\tdelivery, err := ch.Consume(\n\t\tq.Name,\n\t\teventSource.Consume.ConsumerTag,\n\t\teventSource.Consume.AutoAck,\n\t\teventSource.Consume.Exclusive,\n\t\teventSource.Consume.NoLocal,\n\t\teventSource.Consume.NoWait,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to begin consuming messages: %w\", err)\n\t}\n\treturn delivery, nil\n}", "func (suite *TransportTestSuite) TestSendReceive() {\n\ttrans := suite.Transport\n\tinboundChan := make(chan message.Request, 1)\n\ttrans.Listen(testService, inboundChan)\n\n\tgo func() {\n\t\tselect {\n\t\tcase req := <-inboundChan:\n\t\t\tsuite.Assert().NotNil(req)\n\t\t\tsuite.Assert().Equal(\"ping\", string(req.Payload()))\n\t\t\tsuite.Assert().Equal(\"Shut up and take my money!\", req.Headers()[\"X-Fry\"])\n\t\t\tsuite.Assert().Equal(testService, req.Service())\n\t\t\tsuite.Assert().Equal(\"/foo\", req.Endpoint())\n\n\t\t\trsp := message.NewResponse()\n\t\t\trsp.SetId(req.Id())\n\t\t\trsp.SetPayload([]byte(\"pong\"))\n\t\t\tsuite.Assert().NoError(trans.Respond(req, rsp))\n\n\t\tcase <-trans.Tomb().Dying():\n\t\t}\n\t}()\n\n\treq := message.NewRequest()\n\treq.SetService(testService)\n\treq.SetEndpoint(\"/foo\")\n\treq.SetPayload([]byte(\"ping\"))\n\treq.SetId(\"1\")\n\treq.SetHeader(\"X-Fry\", \"Shut up and take my money!\")\n\trsp, err := trans.Send(req, time.Second)\n\tsuite.Assert().NoError(err)\n\tsuite.Assert().NotNil(rsp)\n\tsuite.Assert().Equal(req.Id(), rsp.Id())\n\tsuite.Assert().Equal(\"pong\", string(rsp.Payload()))\n}", "func TestFBaseProcessorNoProcessorFunction(t *testing.T) {\n\ttmpLogger := logrus.New()\n\tvar logBuf bytes.Buffer\n\ttmpLogger.Out = &logBuf\n\toldLogger := logger()\n\tSetLogger(tmpLogger)\n\tdefer func() {\n\t\tSetLogger(oldLogger)\n\t}()\n\n\tmockTransport := new(mockTTransport)\n\treads := make(chan []byte, 4)\n\treads <- pingFrame[0:1] // version\n\treads <- pingFrame[1:5] // headers size\n\treads <- pingFrame[5:34] // FContext headers\n\treads <- pingFrame[34:] // request body\n\tmockTransport.reads = reads\n\t// _opid0, cid 123\n\t// The ordering of opid and cid in the header is non-deterministic,\n\t// so cant check for equality.\n\tresponseCtx := []byte{0, 0, 0, 0, 29, 0, 0, 0, 5, 95, 111, 112, 105, 100, 0, 0, 0, 1, 48, 0, 0, 0, 4, 95, 99, 105, 100, 0, 0, 0, 3, 49, 50, 51}\n\tmockTransport.On(\"Write\", mock.Anything).Return(len(responseCtx), nil).Once()\n\t// [1,\"ping\",3,0,{\"1\":{\"str\":\"Unknown function ping\"},\"2\":{\"i32\":1}}]\n\tresponseBody := []byte{\n\t\t91, 49, 44, 34, 112, 105, 110, 103, 34, 44, 51, 44, 48, 44, 123, 34,\n\t\t49, 34, 58, 123, 34, 115, 116, 114, 34, 58, 34, 85, 110, 107, 110, 111,\n\t\t119, 110, 32, 102, 117, 110, 99, 116, 105, 111, 110, 32, 112, 105, 110,\n\t\t103, 34, 125, 44, 34, 50, 34, 58, 123, 34, 105, 51, 50, 34, 58, 49,\n\t\t125, 125, 93,\n\t}\n\tmockTransport.On(\"Write\", responseBody).Return(len(responseBody), nil).Once()\n\tmockTransport.On(\"Flush\", mock.Anything).Return(nil)\n\tproto := &FProtocol{TProtocol: thrift.NewTJSONProtocol(mockTransport)}\n\tprocessor := NewFBaseProcessor()\n\n\tassert.NoError(t, processor.Process(proto, proto))\n\tassert.True(t,\n\t\tstrings.Contains(\n\t\t\tstring(logBuf.Bytes()),\n\t\t\t\"frugal: client invoked unknown function ping on request with correlation id 123\"))\n\tmockTransport.AssertExpectations(t)\n}", "func mockSenderToReceiver() *SenderToReceiver {\n\tsr := NewSenderToReceiver()\n\tsr.CoverPayment.SwiftFieldTag = \"Swift Field Tag\"\n\tsr.CoverPayment.SwiftLineOne = \"Swift Line One\"\n\tsr.CoverPayment.SwiftLineTwo = \"Swift Line Two\"\n\tsr.CoverPayment.SwiftLineThree = \"Swift Line Three\"\n\tsr.CoverPayment.SwiftLineFour = \"Swift Line Four\"\n\tsr.CoverPayment.SwiftLineFive = \"Swift Line Five\"\n\tsr.CoverPayment.SwiftLineSix = \"Swift Line Six\"\n\treturn sr\n}", "func (_m *SinkEventer) Send(m frizzle.Msg, dest string) error {\n\tret := _m.Called(m, dest)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(frizzle.Msg, string) error); ok {\n\t\tr0 = rf(m, dest)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *CommMock) SendTransaction(targetID uint64, request []byte) {\n\t_m.Called(targetID, request)\n}", "func (m *mConsensusNetworkMockSendRequest) Expect(p network.Request, p1 core.RecordRef) *mConsensusNetworkMockSendRequest {\n\tm.mockExpectations = &ConsensusNetworkMockSendRequestParams{p, p1}\n\treturn m\n}", "func (_m *MockHistoryEngine) ReplicateEvents(ctx context.Context, request *gohistory.ReplicateEventsRequest) error {\n\tret := _m.Called(request)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*gohistory.ReplicateEventsRequest) error); ok {\n\t\tr0 = rf(request)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockMessageBusClient) Publish(ctx context.Context, in *proto.PublishRequest, opts ...grpc.CallOption) (*empty.Empty, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Publish\", varargs...)\n\tret0, _ := ret[0].(*empty.Empty)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mmRegisterOutgoingRequest *mClientMockRegisterOutgoingRequest) Set(f func(ctx context.Context, request *record.OutgoingRequest) (rp1 *payload.RequestInfo, err error)) *ClientMock {\n\tif mmRegisterOutgoingRequest.defaultExpectation != nil {\n\t\tmmRegisterOutgoingRequest.mock.t.Fatalf(\"Default expectation is already set for the Client.RegisterOutgoingRequest method\")\n\t}\n\n\tif len(mmRegisterOutgoingRequest.expectations) > 0 {\n\t\tmmRegisterOutgoingRequest.mock.t.Fatalf(\"Some expectations are already set for the Client.RegisterOutgoingRequest method\")\n\t}\n\n\tmmRegisterOutgoingRequest.mock.funcRegisterOutgoingRequest = f\n\treturn mmRegisterOutgoingRequest.mock\n}", "func (s *MockStream) SendRequest(req *envoy_service_discovery.DiscoveryRequest) error {\n\tsubCtx, cancel := context.WithTimeout(s.ctx, s.recvTimeout)\n\n\tselect {\n\tcase <-subCtx.Done():\n\t\tcancel()\n\t\tif errors.Is(subCtx.Err(), context.Canceled) {\n\t\t\treturn io.EOF\n\t\t}\n\t\treturn subCtx.Err()\n\tcase s.recv <- req:\n\t\tcancel()\n\t\treturn nil\n\t}\n}", "func (m *MockServiceRepositoryInterface) Request(id string) (model.Service, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Request\", id)\n\tret0, _ := ret[0].(model.Service)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mmExchange *mMDNSClientMockExchange) Set(f func(msg *mdns.Msg, address string) (r *mdns.Msg, rtt time.Duration, err error)) *MDNSClientMock {\n\tif mmExchange.defaultExpectation != nil {\n\t\tmmExchange.mock.t.Fatalf(\"Default expectation is already set for the mDNSClient.Exchange method\")\n\t}\n\n\tif len(mmExchange.expectations) > 0 {\n\t\tmmExchange.mock.t.Fatalf(\"Some expectations are already set for the mDNSClient.Exchange method\")\n\t}\n\n\tmmExchange.mock.funcExchange = f\n\treturn mmExchange.mock\n}", "func doRequest(client amqpcommand.Client, request *amqp.Message) (*amqp.Message, error) {\n\t// If by chance we got disconnected while waiting for the request\n\tresponse, err := client.RequestWithTimeout(request, 10*time.Second)\n\treturn response, err\n}", "func (_m *LambdaAPI) DeleteEventSourceMappingRequest(_a0 *lambda.DeleteEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) {\n\tret := _m.Called(_a0)\n\n\tvar r0 *request.Request\n\tif rf, ok := ret.Get(0).(func(*lambda.DeleteEventSourceMappingInput) *request.Request); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*request.Request)\n\t\t}\n\t}\n\n\tvar r1 *lambda.EventSourceMappingConfiguration\n\tif rf, ok := ret.Get(1).(func(*lambda.DeleteEventSourceMappingInput) *lambda.EventSourceMappingConfiguration); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*lambda.EventSourceMappingConfiguration)\n\t\t}\n\t}\n\n\treturn r0, r1\n}", "func (m *HostNetworkMock) SendRequest(p network.Request, p1 core.RecordRef) (r network.Future, r1 error) {\n\tatomic.AddUint64(&m.SendRequestPreCounter, 1)\n\tdefer atomic.AddUint64(&m.SendRequestCounter, 1)\n\n\tif m.SendRequestMock.mockExpectations != nil {\n\t\ttestify_assert.Equal(m.t, *m.SendRequestMock.mockExpectations, HostNetworkMockSendRequestParams{p, p1},\n\t\t\t\"HostNetwork.SendRequest got unexpected parameters\")\n\n\t\tif m.SendRequestFunc == nil {\n\n\t\t\tm.t.Fatal(\"No results are set for the HostNetworkMock.SendRequest\")\n\n\t\t\treturn\n\t\t}\n\t}\n\n\tif m.SendRequestFunc == nil {\n\t\tm.t.Fatal(\"Unexpected call to HostNetworkMock.SendRequest\")\n\t\treturn\n\t}\n\n\treturn m.SendRequestFunc(p, p1)\n}", "func (ds *fakeDebugSession) dispatchRequest(request dap.Message) {\n\tswitch request := request.(type) {\n\tcase *dap.InitializeRequest:\n\t\tds.onInitializeRequest(request)\n\tcase *dap.LaunchRequest:\n\t\tds.onLaunchRequest(request)\n\tcase *dap.AttachRequest:\n\t\tds.onAttachRequest(request)\n\tcase *dap.DisconnectRequest:\n\t\tds.onDisconnectRequest(request)\n\tcase *dap.TerminateRequest:\n\t\tds.onTerminateRequest(request)\n\tcase *dap.RestartRequest:\n\t\tds.onRestartRequest(request)\n\tcase *dap.SetBreakpointsRequest:\n\t\tds.onSetBreakpointsRequest(request)\n\tcase *dap.SetFunctionBreakpointsRequest:\n\t\tds.onSetFunctionBreakpointsRequest(request)\n\tcase *dap.SetExceptionBreakpointsRequest:\n\t\tds.onSetExceptionBreakpointsRequest(request)\n\tcase *dap.ConfigurationDoneRequest:\n\t\tds.onConfigurationDoneRequest(request)\n\tcase *dap.ContinueRequest:\n\t\tds.onContinueRequest(request)\n\tcase *dap.NextRequest:\n\t\tds.onNextRequest(request)\n\tcase *dap.StepInRequest:\n\t\tds.onStepInRequest(request)\n\tcase *dap.StepOutRequest:\n\t\tds.onStepOutRequest(request)\n\tcase *dap.StepBackRequest:\n\t\tds.onStepBackRequest(request)\n\tcase *dap.ReverseContinueRequest:\n\t\tds.onReverseContinueRequest(request)\n\tcase *dap.RestartFrameRequest:\n\t\tds.onRestartFrameRequest(request)\n\tcase *dap.GotoRequest:\n\t\tds.onGotoRequest(request)\n\tcase *dap.PauseRequest:\n\t\tds.onPauseRequest(request)\n\tcase *dap.StackTraceRequest:\n\t\tds.onStackTraceRequest(request)\n\tcase *dap.ScopesRequest:\n\t\tds.onScopesRequest(request)\n\tcase *dap.VariablesRequest:\n\t\tds.onVariablesRequest(request)\n\tcase *dap.SetVariableRequest:\n\t\tds.onSetVariableRequest(request)\n\tcase *dap.SetExpressionRequest:\n\t\tds.onSetExpressionRequest(request)\n\tcase *dap.SourceRequest:\n\t\tds.onSourceRequest(request)\n\tcase *dap.ThreadsRequest:\n\t\tds.onThreadsRequest(request)\n\tcase *dap.TerminateThreadsRequest:\n\t\tds.onTerminateThreadsRequest(request)\n\tcase *dap.EvaluateRequest:\n\t\tds.onEvaluateRequest(request)\n\tcase *dap.StepInTargetsRequest:\n\t\tds.onStepInTargetsRequest(request)\n\tcase *dap.GotoTargetsRequest:\n\t\tds.onGotoTargetsRequest(request)\n\tcase *dap.CompletionsRequest:\n\t\tds.onCompletionsRequest(request)\n\tcase *dap.ExceptionInfoRequest:\n\t\tds.onExceptionInfoRequest(request)\n\tcase *dap.LoadedSourcesRequest:\n\t\tds.onLoadedSourcesRequest(request)\n\tcase *dap.DataBreakpointInfoRequest:\n\t\tds.onDataBreakpointInfoRequest(request)\n\tcase *dap.SetDataBreakpointsRequest:\n\t\tds.onSetDataBreakpointsRequest(request)\n\tcase *dap.ReadMemoryRequest:\n\t\tds.onReadMemoryRequest(request)\n\tcase *dap.DisassembleRequest:\n\t\tds.onDisassembleRequest(request)\n\tcase *dap.CancelRequest:\n\t\tds.onCancelRequest(request)\n\tcase *dap.BreakpointLocationsRequest:\n\t\tds.onBreakpointLocationsRequest(request)\n\tdefault:\n\t\tlog.Fatalf(\"Unable to process %#v\", request)\n\t}\n}", "func (msq *MockSend) handler() {\nout:\n\tfor {\n\t\tselect {\n\t\tcase <-msq.quit:\n\t\t\tbreak out\n\t\tcase inv := <-msq.requestQueue:\n\t\t\tmsq.conn.RequestData(inv)\n\t\tcase msg := <-msq.msgQueue:\n\t\t\tmsq.conn.WriteMessage(msg)\n\t\t}\n\t}\n}", "func TestAuthRequestDispatch(t *testing.T) {\n\tassert := assert.New(t)\n\tid, err := uuid.Parse(\"1d6d02bd-8e56-421d-9438-3bd6d0079ff1\")\n\tassert.IsType(uuid.UUID{}, id)\n\tassert.Nil(err)\n\treq := prepareAuthRequest(id, \"test\", \"root\")\n\n\tc := newClient(nil)\n\tc.conf = &ClientConfig{Logger: logger.New()}\n\tassert.NotNil(c)\n\tmsg, err := packageRequest(req)\n\tassert.Nil(err)\n\tc.dispatchRequest(msg)\n\t_req := <-c.requests // c.requests is the channel where all requests are sent for writing to Gremlin Server, write workers listen on this channel\n\tassert.Equal(_req, msg)\n}", "func MockWire(hash []byte, round uint64, step uint8, keys []key.ConsensusKeys, p *user.Provisioners, i ...int) *bytes.Buffer {\n\tev := MockAgreementEvent(hash, round, step, keys, p, i...)\n\n\tbuf := new(bytes.Buffer)\n\tif err := header.Marshal(buf, ev.Header); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := Marshal(buf, *ev); err != nil {\n\t\tpanic(err)\n\t}\n\treturn buf\n}", "func (m *MockBotService) ProcessRequest(u *ports.Update) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ProcessRequest\", u)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mmRegisterIncomingRequest *mClientMockRegisterIncomingRequest) Set(f func(ctx context.Context, request *record.IncomingRequest) (rp1 *payload.RequestInfo, err error)) *ClientMock {\n\tif mmRegisterIncomingRequest.defaultExpectation != nil {\n\t\tmmRegisterIncomingRequest.mock.t.Fatalf(\"Default expectation is already set for the Client.RegisterIncomingRequest method\")\n\t}\n\n\tif len(mmRegisterIncomingRequest.expectations) > 0 {\n\t\tmmRegisterIncomingRequest.mock.t.Fatalf(\"Some expectations are already set for the Client.RegisterIncomingRequest method\")\n\t}\n\n\tmmRegisterIncomingRequest.mock.funcRegisterIncomingRequest = f\n\treturn mmRegisterIncomingRequest.mock\n}", "func (s *Subscription) Request(payload responses.Payload) (*api.CastMessage, error) {\n\trequestId := int(atomic.AddInt64(&s.requestId, 1))\n\tpayload.SetRequestId(requestId)\n\n\tresponse := make(chan *api.CastMessage)\n\ts.inFlight[requestId] = response\n\n\t// err := s.Send(payload)\n\terr := s.Device.Send(s.Urn, s.SourceId, s.DestinationId, payload)\n\tif err != nil {\n\t\tdelete(s.inFlight, requestId)\n\t\treturn nil, err\n\t}\n\n\tdelay := time.NewTimer(time.Second * 10)\n\tselect {\n\tcase reply := <-response:\n\t\tif !delay.Stop() {\n\t\t\t<-delay.C\n\t\t}\n\t\treturn reply, nil\n\tcase <-delay.C:\n\t\tdelete(s.inFlight, requestId)\n\t\treturn nil, fmt.Errorf(\"Timeout sending: %s\", spew.Sdump(payload))\n\t}\n}", "func (_m *LambdaAPI) UpdateEventSourceMappingRequest(_a0 *lambda.UpdateEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) {\n\tret := _m.Called(_a0)\n\n\tvar r0 *request.Request\n\tif rf, ok := ret.Get(0).(func(*lambda.UpdateEventSourceMappingInput) *request.Request); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*request.Request)\n\t\t}\n\t}\n\n\tvar r1 *lambda.EventSourceMappingConfiguration\n\tif rf, ok := ret.Get(1).(func(*lambda.UpdateEventSourceMappingInput) *lambda.EventSourceMappingConfiguration); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*lambda.EventSourceMappingConfiguration)\n\t\t}\n\t}\n\n\treturn r0, r1\n}", "func (t *Deliverys) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\r\n function, args := stub.GetFunctionAndParameters()\r\n fmt.Println(\"invoke is running \" + function)\r\n\r\n // Handle different functions\r\n if function == \"createDelivery\" { //create a new Delivery\r\n return t.createDelivery(stub, args)\r\n\t}else if function == \"getDeliveryByPurchaseID\" { //find delivery for a particular purchase id using rich query\r\n return t.getDeliveryByPurchaseID(stub, args)\r\n }else if function == \"getAllDAPDelivery\" { //find delivery for a particular purchase id using rich query\r\n return t.getAllDAPDelivery(stub, args)\r\n } else if function == \"getAllDAPDeliveryDate\" { //find delivery for a particular purchase id using rich query\r\n return t.getAllDAPDeliveryDate(stub, args)\r\n }\r\n\t \r\n eventMessage := \"{ \\\"message\\\" : \\\"Received unknown function invocation\\\", \\\"code\\\" : \\\"503\\\"}\"\r\n err := stub.SetEvent(\"errEvent\", []byte(eventMessage))\r\n if err != nil {\r\n return shim.Error(err.Error())\r\n }\r\n fmt.Println(\"invoke did not find func: \" + function) //error\r\n return shim.Error(\"Received unknown function invocation\")\r\n}", "func TestEventService(t *testing.T) {\n\tvar result EventService\n\terr := json.NewDecoder(strings.NewReader(eventServiceBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\tif result.ID != \"EventService\" {\n\t\tt.Errorf(\"Received invalid ID: %s\", result.ID)\n\t}\n\n\tif result.Name != \"Event Service\" {\n\t\tt.Errorf(\"Received invalid name: %s\", result.Name)\n\t}\n\n\tif result.DeliveryRetryAttempts != 4 {\n\t\tt.Errorf(\"Expected 4 retry attempts, got: %d\", result.DeliveryRetryAttempts)\n\t}\n\n\tif result.DeliveryRetryIntervalSeconds != 30 {\n\t\tt.Errorf(\"Expected 30 second retry interval, got: %d\", result.DeliveryRetryIntervalSeconds)\n\t}\n\n\tif result.SSEFilterPropertiesSupported.MetricReportDefinition {\n\t\tt.Error(\"MetricReportDefinition filter should be false\")\n\t}\n\n\tif !result.SSEFilterPropertiesSupported.MessageID {\n\t\tt.Error(\"Message ID filter should be true\")\n\t}\n\n\tif result.submitTestEventTarget != \"/redfish/v1/EventService/Actions/EventService.SubmitTestEvent\" {\n\t\tt.Errorf(\"Invalid SubmitTestEvent target: %s\", result.submitTestEventTarget)\n\t}\n\n\tfor _, et := range result.EventTypesForSubscription {\n\t\tif !et.IsValidEventType() {\n\t\t\tt.Errorf(\"invalid event type: %s\", et)\n\t\t}\n\t}\n\n}", "func (_m *MockConsumer) Consume(ctx context.Context, d Delivery) error {\n\tret := _m.Called(ctx, d)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, Delivery) error); ok {\n\t\tr0 = rf(ctx, d)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *MockEStringToStringMapEntry) EDeliver() bool {\n\tret := _m.Called()\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\treturn r0\n}", "func (_m *ISession) Request(method string, urlStr string, data interface{}, options ...discordgo.RequestOption) ([]byte, error) {\n\t_va := make([]interface{}, len(options))\n\tfor _i := range options {\n\t\t_va[_i] = options[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, method, urlStr, data)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 []byte\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(string, string, interface{}, ...discordgo.RequestOption) ([]byte, error)); ok {\n\t\treturn rf(method, urlStr, data, options...)\n\t}\n\tif rf, ok := ret.Get(0).(func(string, string, interface{}, ...discordgo.RequestOption) []byte); ok {\n\t\tr0 = rf(method, urlStr, data, options...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]byte)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(string, string, interface{}, ...discordgo.RequestOption) error); ok {\n\t\tr1 = rf(method, urlStr, data, options...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client {\n\treturn &http.Client{\n\t\tTransport: transportFunc(doer),\n\t}\n}", "func (m *MockRDSAPI) DescribeEventsRequest(arg0 *rds.DescribeEventsInput) (*request.Request, *rds.DescribeEventsOutput) {\n\tret := m.ctrl.Call(m, \"DescribeEventsRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.DescribeEventsOutput)\n\treturn ret0, ret1\n}", "func (_m *LambdaAPI) GetEventSourceMappingRequest(_a0 *lambda.GetEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) {\n\tret := _m.Called(_a0)\n\n\tvar r0 *request.Request\n\tif rf, ok := ret.Get(0).(func(*lambda.GetEventSourceMappingInput) *request.Request); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*request.Request)\n\t\t}\n\t}\n\n\tvar r1 *lambda.EventSourceMappingConfiguration\n\tif rf, ok := ret.Get(1).(func(*lambda.GetEventSourceMappingInput) *lambda.EventSourceMappingConfiguration); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*lambda.EventSourceMappingConfiguration)\n\t\t}\n\t}\n\n\treturn r0, r1\n}", "func TestAuthRequestDispatch(t *testing.T) {\n\tid := \"1d6d02bd-8e56-421d-9438-3bd6d0079ff1\"\n\ttestRequest, _ := prepareAuthRequest(id, \"test\", \"root\")\n\n\tc := newClient()\n\tmsg, err := packageRequest(testRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tc.dispatchRequest(msg)\n\treq := <-c.requests // c.requests is the channel where all requests are sent for writing to Gremlin Server, write workers listen on this channel\n\tif reflect.DeepEqual(msg, req) != true {\n\t\tt.Fail()\n\t}\n}", "func (m *MockRDSAPI) CreateEventSubscriptionRequest(arg0 *rds.CreateEventSubscriptionInput) (*request.Request, *rds.CreateEventSubscriptionOutput) {\n\tret := m.ctrl.Call(m, \"CreateEventSubscriptionRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.CreateEventSubscriptionOutput)\n\treturn ret0, ret1\n}", "func (mmRegisterOutgoingRequest *mClientMockRegisterOutgoingRequest) When(ctx context.Context, request *record.OutgoingRequest) *ClientMockRegisterOutgoingRequestExpectation {\n\tif mmRegisterOutgoingRequest.mock.funcRegisterOutgoingRequest != nil {\n\t\tmmRegisterOutgoingRequest.mock.t.Fatalf(\"ClientMock.RegisterOutgoingRequest mock is already set by Set\")\n\t}\n\n\texpectation := &ClientMockRegisterOutgoingRequestExpectation{\n\t\tmock: mmRegisterOutgoingRequest.mock,\n\t\tparams: &ClientMockRegisterOutgoingRequestParams{ctx, request},\n\t}\n\tmmRegisterOutgoingRequest.expectations = append(mmRegisterOutgoingRequest.expectations, expectation)\n\treturn expectation\n}", "func (_m *Asconn) RequestInfo(_a0 ...string) (map[string]string, aerospike.Error) {\n\t_va := make([]interface{}, len(_a0))\n\tfor _i := range _a0 {\n\t\t_va[_i] = _a0[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 map[string]string\n\tif rf, ok := ret.Get(0).(func(...string) map[string]string); ok {\n\t\tr0 = rf(_a0...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\n\tvar r1 aerospike.Error\n\tif rf, ok := ret.Get(1).(func(...string) aerospike.Error); ok {\n\t\tr1 = rf(_a0...)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(aerospike.Error)\n\t\t}\n\t}\n\n\treturn r0, r1\n}", "func HandleRequest(ctx context.Context, evt MyEvent) (*MyResponse, error) {\n\t// context\n\tlc, _ := lambdacontext.FromContext(ctx)\n\tlog.Printf(\"AwsRequestID: %s\", lc.AwsRequestID)\n\n\t// environment variables\n\tfor _, e := range os.Environ() {\n\t\tlog.Println(e)\n\t}\n\n\tlog.Printf(\"Key1: %s\", evt.Key1)\n\tlog.Printf(\"Key2: %s\", evt.Key2)\n\tlog.Printf(\"Key3: %s\", evt.Key3)\n\n\tif evt.Key3 == \"\" {\n\t\treturn nil, errors.New(\"key3 is empty\")\n\t}\n\treturn &MyResponse{Message: evt.Key1}, nil\n}", "func TestRequestDispatch(t *testing.T) {\n\tassert := assert.New(t)\n\n\tid, err := uuid.Parse(\"1d6d02bd-8e56-421d-9438-3bd6d0079ff1\")\n\tassert.IsType(uuid.UUID{}, id)\n\tassert.Nil(err)\n\treq := &GremlinRequest{\n\t\tRequestId: id,\n\t\tOp: \"eval\",\n\t\tProcessor: \"\",\n\t\tArgs: map[string]interface{}{\n\t\t\t\"gremlin\": \"g.V(x)\",\n\t\t\t\"bindings\": map[string]string{\"x\": \"10\"},\n\t\t\t\"language\": \"gremlin-groovy\",\n\t\t},\n\t}\n\n\tc := newClient(nil)\n\tc.conf = &ClientConfig{Logger: logger.New()}\n\tassert.NotNil(c)\n\tmsg, err := packageRequest(req)\n\tassert.Nil(err)\n\tc.dispatchRequest(msg)\n\t_req := <-c.requests // c.requests is the channel where all requests are sent for writing to Gremlin Server, write workers listen on this channel\n\tassert.Equal(_req, msg)\n}", "func (_m *Delivery) SetAvailable(c echo.Context) error {\n\tret := _m.Called(c)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(echo.Context) error); ok {\n\t\tr0 = rf(c)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockSocketClient) Ack(req socketmode.Request, payload ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{req}\n\tfor _, a := range payload {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"Ack\", varargs...)\n}", "func (suite *HandlerTestSuite) TestHandleRecvPacket() {\n\tvar (\n\t\tpacket channeltypes.Packet\n\t)\n\n\ttestCases := []struct {\n\t\tname string\n\t\tmalleate func()\n\t\texpPass bool\n\t}{\n\t\t{\"success: ORDERED\", func() {\n\t\t\t_, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, true},\n\t\t{\"success: UNORDERED\", func() {\n\t\t\t_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, true},\n\t\t{\"success: UNORDERED out of order packet\", func() {\n\t\t\t// setup uses an UNORDERED channel\n\t\t\t_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\t// attempts to receive packet with sequence 10 without receiving packet with sequence 1\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\t\t}, true},\n\t\t{\"failure: ORDERED out of order packet\", func() {\n\t\t\t_, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\n\t\t\t// attempts to receive packet with sequence 10 without receiving packet with sequence 1\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\t\t}, false},\n\t\t{\"channel does not exist\", func() {\n\t\t\t// any non-nil value of packet is valid\n\t\t\tsuite.Require().NotNil(packet)\n\t\t}, false},\n\t\t{\"packet not sent\", func() {\n\t\t\t_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\t\t}, false},\n\t\t{\"ORDERED: packet already received (replay)\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t\t{\"UNORDERED: packet already received (replay)\", func() {\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\n\t\tsuite.Run(tc.name, func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\thandler := ibc.NewHandler(*suite.chainB.App.IBCKeeper)\n\n\t\t\ttc.malleate()\n\n\t\t\t// get proof of packet commitment from chainA\n\t\t\tpacketKey := host.KeyPacketCommitment(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())\n\t\t\tproof, proofHeight := suite.chainA.QueryProof(packetKey)\n\n\t\t\tmsg := channeltypes.NewMsgPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress())\n\n\t\t\t// ante-handle RecvPacket\n\t\t\t_, err := handler(suite.chainB.GetContext(), msg)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\t// replay should fail since state changes occur\n\t\t\t\t_, err := handler(suite.chainB.GetContext(), msg)\n\t\t\t\tsuite.Require().Error(err)\n\n\t\t\t\t// verify ack was written\n\t\t\t\tack, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t\t\tsuite.Require().NotNil(ack)\n\t\t\t\tsuite.Require().True(found)\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t}\n\t\t})\n\t}\n}", "func (_m *NSenterEventIface) GetRequestMsg() *domain.NSenterMessage {\n\tret := _m.Called()\n\n\tvar r0 *domain.NSenterMessage\n\tif rf, ok := ret.Get(0).(func() *domain.NSenterMessage); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*domain.NSenterMessage)\n\t\t}\n\t}\n\n\treturn r0\n}", "func TestAcquireTargets(t *testing.T) {\n\ttype mockTarget struct {\n\t\tID string\n\t\tMessage string\n\t\tCreatedOn string\n\t}\n\n\tmockData := mockTarget{\n\t\tID: \"01EBP4DP4VECW8PHDJJFNEDVKE\",\n\t\tMessage: \"send a message\",\n\t\tCreatedOn: \"2020-06-25T16:23:37.720Z\",\n\t}\n\n\tres, err := NewClient().AcquireTargets(context.Background(), &pb.EventMessage{\n\t\tId: \"01EBP4DP4VECW8PHDJJFNEDVKE\",\n\t\tName: \"targets.acquired\",\n\t\tData: []*pb.TargetResponse{\n\t\t\t{\n\t\t\t\tId: \"01EBP4DP4VECW8PHDJJFNEDVKE\",\n\t\t\t\tMessage: mockData.Message,\n\t\t\t\tCreatedOn: mockData.CreatedOn,\n\t\t\t},\n\t\t},\n\t\tCreatedOn: \"2020-06-25T16:23:37.720Z\",\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Test failed with err %v\", err)\n\t}\n\tt.Log(res)\n}", "func RequestCapturingMockHttpClient(f RequestToResponse) (*http.Client, *http.Request) {\n\tvar capture http.Request\n\treturn &http.Client{\n\t\tTransport: RequestToResponse(func(req *http.Request) (*http.Response, error) {\n\t\t\tcapture = *req.Clone(req.Context())\n\t\t\treturn f(req)\n\t\t}),\n\t}, &capture\n}", "func MockDialer() (net.Conn, error) {\n\t//build correct net.Conn here.\n\treturn mockConnInst, nil\n}", "func TestRequestDispatch(t *testing.T) {\n\ttestRequest := request{\n\t\tRequestID: \"1d6d02bd-8e56-421d-9438-3bd6d0079ff1\",\n\t\tOp: \"eval\",\n\t\tProcessor: \"\",\n\t\tArgs: map[string]interface{}{\n\t\t\t\"gremlin\": \"g.V(x)\",\n\t\t\t\"bindings\": map[string]string{\"x\": \"10\"},\n\t\t\t\"language\": \"gremlin-groovy\",\n\t\t},\n\t}\n\tc := newClient()\n\tmsg, err := packageRequest(testRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tc.dispatchRequest(msg)\n\treq := <-c.requests // c.requests is the channel where all requests are sent for writing to Gremlin Server, write workers listen on this channel\n\tif reflect.DeepEqual(msg, req) != true {\n\t\tt.Fail()\n\t}\n}", "func (o *MockGrpcOrderer) SendDeliver(ctx reqContext.Context, envelope *fab.SignedEnvelope) (chan *common.Block, chan error) {\n\treturn nil, nil\n}", "func (m *MockSession) SetRequestInFlight(arg0, arg1 string, arg2 bool) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SetRequestInFlight\", arg0, arg1, arg2)\n}", "func (client *MockClient) Event(object runtime.Object, eventType string, reason string, message string) {\n\tclient.createEvent(buildEvent(object, eventType, reason, message))\n}", "func (_m *Topic) Send(_a0 context.Context, _a1 *pubsub.Message) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *pubsub.Message) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockRDSAPI) DownloadDBLogFilePortionRequest(arg0 *rds.DownloadDBLogFilePortionInput) (*request.Request, *rds.DownloadDBLogFilePortionOutput) {\n\tret := m.ctrl.Call(m, \"DownloadDBLogFilePortionRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.DownloadDBLogFilePortionOutput)\n\treturn ret0, ret1\n}", "func (m *MockCommand) SubscribeEvent(body string) (map[string]interface{}, error) {\n\tret := m.ctrl.Call(m, \"SubscribeEvent\", body)\n\tret0, _ := ret[0].(map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m MockBroker) Publish(qName string, msg []byte) error {\n\treturn nil\n}", "func (_m *Delivery) Fetch(c echo.Context) error {\n\tret := _m.Called(c)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(echo.Context) error); ok {\n\t\tr0 = rf(c)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (mmSend *mClientMockSend) Expect(ctx context.Context, n *Notification) *mClientMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"ClientMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &ClientMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &ClientMockSendParams{ctx, n}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func (m *mConsensusNetworkMockNewRequestBuilder) Set(f func() (r network.RequestBuilder)) *ConsensusNetworkMock {\n\tm.mock.NewRequestBuilderFunc = f\n\n\treturn m.mock\n}", "func (m *MockRDSAPI) DescribeEventSubscriptionsRequest(arg0 *rds.DescribeEventSubscriptionsInput) (*request.Request, *rds.DescribeEventSubscriptionsOutput) {\n\tret := m.ctrl.Call(m, \"DescribeEventSubscriptionsRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.DescribeEventSubscriptionsOutput)\n\treturn ret0, ret1\n}", "func (_m *MysqlOmdbRepository) LogRequest(ctx context.Context, request string, response string) (int64, error) {\n\tret := _m.Called(ctx, request, response)\n\n\tvar r0 int64\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) int64); ok {\n\t\tr0 = rf(ctx, request, response)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {\n\t\tr1 = rf(ctx, request, response)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *DynamoDBAPIMock) UpdateTimeToLiveRequest(_a0 *dynamodb.UpdateTimeToLiveInput) (*request.Request, *dynamodb.UpdateTimeToLiveOutput) {\n\tret := _m.Called(_a0)\n\n\tvar r0 *request.Request\n\tif rf, ok := ret.Get(0).(func(*dynamodb.UpdateTimeToLiveInput) *request.Request); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*request.Request)\n\t\t}\n\t}\n\n\tvar r1 *dynamodb.UpdateTimeToLiveOutput\n\tif rf, ok := ret.Get(1).(func(*dynamodb.UpdateTimeToLiveInput) *dynamodb.UpdateTimeToLiveOutput); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*dynamodb.UpdateTimeToLiveOutput)\n\t\t}\n\t}\n\n\treturn r0, r1\n}", "func (m *mHostNetworkMockSendRequest) Expect(p network.Request, p1 core.RecordRef) *mHostNetworkMockSendRequest {\n\tm.mockExpectations = &HostNetworkMockSendRequestParams{p, p1}\n\treturn m\n}", "func (m *ConsensusNetworkMock) SendRequest(p network.Request, p1 core.RecordRef) (r error) {\n\tatomic.AddUint64(&m.SendRequestPreCounter, 1)\n\tdefer atomic.AddUint64(&m.SendRequestCounter, 1)\n\n\tif m.SendRequestMock.mockExpectations != nil {\n\t\ttestify_assert.Equal(m.t, *m.SendRequestMock.mockExpectations, ConsensusNetworkMockSendRequestParams{p, p1},\n\t\t\t\"ConsensusNetwork.SendRequest got unexpected parameters\")\n\n\t\tif m.SendRequestFunc == nil {\n\n\t\t\tm.t.Fatal(\"No results are set for the ConsensusNetworkMock.SendRequest\")\n\n\t\t\treturn\n\t\t}\n\t}\n\n\tif m.SendRequestFunc == nil {\n\t\tm.t.Fatal(\"Unexpected call to ConsensusNetworkMock.SendRequest\")\n\t\treturn\n\t}\n\n\treturn m.SendRequestFunc(p, p1)\n}", "func (t *MockTransporter) Publish(e *Event) error {\n\tfmt.Println(string(e.GetEventAsJSON()))\n\treturn nil\n}", "func (m *mockedChannel) SendRequest(msg govppapi.Message) govppapi.RequestCtx {\n\tm.Msg = msg\n\tm.Msgs = append(m.Msgs, msg)\n\treturn m.channel.SendRequest(msg)\n}", "func handler(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tctx, cancel := context.WithTimeout(ctx, timeout)\n\tdefer cancel()\n\n\tlistener := new(eventListener)\n\n\tclientOptions := options.Client().ApplyURI(os.Getenv(\"MONGODB_URI\")).\n\t\tSetMonitor(listener.commandMonitor()).\n\t\tSetServerMonitor(listener.serverMonitor()).\n\t\tSetPoolMonitor(listener.poolMonitor())\n\n\t// Create a MongoClient that points to MONGODB_URI and listens to the\n\t// ComandMonitor, ServerMonitor, and PoolMonitor events.\n\tclient, err := mongo.NewClient(clientOptions)\n\tif err != nil {\n\t\treturn gateway500(), fmt.Errorf(\"failed to create client: %w\", err)\n\t}\n\n\t// Attempt to connect to the client with a timeout.\n\tif err = client.Connect(ctx); err != nil {\n\t\treturn gateway500(), fmt.Errorf(\"failed to connect: %w\", err)\n\t}\n\n\tdefer client.Disconnect(ctx)\n\n\tcollection := client.Database(\"faas\").Collection(\"lambda\")\n\n\t// Create a document to insert for the automated test.\n\tdoc := map[string]string{\"hello\": \"world\"}\n\n\t// Insert the document.\n\t_, err = collection.InsertOne(ctx, doc)\n\tif err != nil {\n\t\treturn gateway500(), fmt.Errorf(\"failed to insert: %w\", err)\n\t}\n\n\t// Delete the document.\n\t_, err = collection.DeleteOne(ctx, doc)\n\tif err != nil {\n\t\treturn gateway500(), fmt.Errorf(\"failed to delete: %w\", err)\n\t}\n\n\tvar avgCmdDur float64\n\tif count := listener.commandCount; count != 0 {\n\t\tavgCmdDur = float64(listener.commandDuration) / float64(count)\n\t}\n\n\tvar avgHBDur float64\n\tif count := listener.heartbeatCount; count != 0 {\n\t\tavgHBDur = float64(listener.heartbeatDuration) / float64(count)\n\t}\n\n\trsp := &response{\n\t\tAvgCommandDuration: avgCmdDur,\n\t\tAvgHeartbeatDuration: avgHBDur,\n\t\tOpenConnections: listener.openConnections,\n\t\tHeartbeatCount: listener.heartbeatCount,\n\t}\n\n\tbody, err := json.Marshal(rsp)\n\tif err != nil {\n\t\treturn gateway500(), fmt.Errorf(\"failed to marshal: %w\", err)\n\t}\n\n\treturn events.APIGatewayProxyResponse{\n\t\tBody: string(body),\n\t\tStatusCode: http.StatusOK,\n\t}, nil\n}", "func (mmSend *mClientMockSend) Set(f func(ctx context.Context, n *Notification) (err error)) *ClientMock {\n\tif mmSend.defaultExpectation != nil {\n\t\tmmSend.mock.t.Fatalf(\"Default expectation is already set for the Client.Send method\")\n\t}\n\n\tif len(mmSend.expectations) > 0 {\n\t\tmmSend.mock.t.Fatalf(\"Some expectations are already set for the Client.Send method\")\n\t}\n\n\tmmSend.mock.funcSend = f\n\treturn mmSend.mock\n}" ]
[ "0.58376014", "0.5605098", "0.55678225", "0.5521179", "0.5324111", "0.5312156", "0.53088546", "0.5291828", "0.52528024", "0.5136864", "0.5119927", "0.5097069", "0.50700843", "0.5063882", "0.50509727", "0.5048421", "0.50330704", "0.50261426", "0.5017538", "0.50014174", "0.49638373", "0.49535796", "0.49495", "0.492674", "0.49072686", "0.48897928", "0.48870334", "0.4825286", "0.48232996", "0.47936174", "0.47921753", "0.47786045", "0.47733656", "0.47642374", "0.47541177", "0.4745443", "0.47449374", "0.4738006", "0.47378728", "0.47318003", "0.4716037", "0.47154045", "0.47150517", "0.47112772", "0.47053808", "0.46835005", "0.46822855", "0.46777594", "0.46743515", "0.46581116", "0.4651475", "0.46420494", "0.4638548", "0.46379626", "0.46322328", "0.4629141", "0.46253386", "0.46179062", "0.46165362", "0.46154815", "0.4612495", "0.4600641", "0.45996934", "0.45920855", "0.4590728", "0.4589696", "0.45834464", "0.45809937", "0.45760334", "0.45759755", "0.4568351", "0.45663798", "0.45657054", "0.45652047", "0.4552315", "0.45496964", "0.4543873", "0.45408517", "0.4531692", "0.45314342", "0.45287752", "0.45240495", "0.45231417", "0.4520027", "0.4519807", "0.45172247", "0.45168525", "0.45140052", "0.4511339", "0.45086673", "0.45073324", "0.450349", "0.45027286", "0.4500247", "0.4495923", "0.44921905", "0.4491548", "0.44908082", "0.44902095", "0.44893852" ]
0.6817739
0
Init provides a mock function with given fields: ctx, prefix, callbacks
func (_m *Plugin) Init(ctx context.Context, prefix config.Prefix, callbacks events.Callbacks) error { ret := _m.Called(ctx, prefix, callbacks) var r0 error if rf, ok := ret.Get(0).(func(context.Context, config.Prefix, events.Callbacks) error); ok { r0 = rf(ctx, prefix, callbacks) } else { r0 = ret.Error(0) } return r0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (stub *MockStub) MockInit(uuid string, args [][]byte) pb.Response {\n\tstub.args = args\n\tstub.MockTransactionStart(uuid)\n\tres := stub.cc.Init(stub)\n\tstub.MockTransactionEnd(uuid)\n\treturn res\n}", "func (m *MockInterface) Init(kubeconfigPath, kubeconfigContext string) error {\n\treturn nil\n}", "func MockInitialize() {\n\tledgermgmt.InitializeTestEnvWithInitializer(\n\t\t&ledgermgmt.Initializer{\n\t\t\tCustomTxProcessors: ConfigTxProcessors,\n\t\t},\n\t)\n\tchains.list = make(map[string]*chain)\n\tchainInitializer = func(string) { return }\n}", "func Test_Init(t *testing.T) {\n\tclient, err := Load(\"\", true)\n\tassert.Nil(t, err)\n\tmockClient = client\n}", "func (p *provider) Init(ctx servicehub.Context) error {\n\tp.accessKeyValidator = &accessKeyValidator{\n\t\tTokenService: p.TokenService,\n\t\tcollection: AccessItemCollection{},\n\t}\n\tctx.AddTask(p.InitAKItemTask)\n\tctx.AddTask(p.SyncAKItemTask)\n\treturn nil\n}", "func (self *stubHandler) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\n\targs := stub.GetArgs()\n\n\tif len(args) != 2 {\n\t\treturn shim.Error(\"Expected exactly two arguments\")\n\t}\n\n\tfunction := string(args[0])\n\n\tif function != \"init\" {\n\t\treturn shim.Error(\"Function must be \\\"init\\\"\")\n\t}\n\n\tdispatcher := self.initDispatcher\n\tif dispatcher == nil {\n\t\treturn shim.Error(\"Init interface not found: Please define \\\"appinit\\\"\")\n\t}\n\n\treturn dispatcher.Dispatch(stub, 1, args[1])\n}", "func (_m *AsyncBR) Init(ctx context.Context, bslName string, sourceNamespace string, uploaderType string, repositoryType string, repoIdentifier string, repositoryEnsurer *repository.Ensurer, credentialGetter *credentials.CredentialGetter) error {\n\tret := _m.Called(ctx, bslName, sourceNamespace, uploaderType, repositoryType, repoIdentifier, repositoryEnsurer, credentialGetter)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string, *repository.Ensurer, *credentials.CredentialGetter) error); ok {\n\t\tr0 = rf(ctx, bslName, sourceNamespace, uploaderType, repositoryType, repoIdentifier, repositoryEnsurer, credentialGetter)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (t *testControllerInitFunc) Init(ctx context.Context) {\n\tt.Username = ctx.Params().Get(\"username\")\n\t// or t.Params.Get(\"username\") because the\n\t// t.Ctx == ctx and is being initialized before this \"Init\"\n}", "func (_m *MockStore) Init(opts ...store.Option) error {\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(...store.Option) error); ok {\n\t\tr0 = rf(opts...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *MockActors) Init(_ context.Context) error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *MockStateStore) Init(metadata state.Metadata) error {\n\tret := _m.Called(metadata)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(state.Metadata) error); ok {\n\t\tr0 = rf(metadata)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (t *AnswerChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfmt.Println(\"Answer Store Channel Is Starting Up\")\n\tfuncName, args := stub.GetFunctionAndParameters()\n\tvar err error\n\ttxId := stub.GetTxID()\n\n\tfmt.Println(\" Init() is running\")\n\tfmt.Println(\" Transaction ID: \", txId)\n\tfmt.Println(\" GetFunctionAndParameters() function: \", funcName)\n\tfmt.Println(\" GetFunctionAndParameters() args count: \", len(args))\n\tfmt.Println(\" GetFunctionAndParameters() args found: \", args)\n\n\t// expecting 1 arg for instantiate or upgrade\n\tif len(args) == 2 {\n\t\tfmt.Println(\" GetFunctionAndParameters() : Number of arguments\", len(args))\n\t}\n\n\terr = stub.PutState(args[0], []byte(args[1]))\n\tif err != nil {\n\t\treturn shim.Error(err.Error()) //self-test fail\n\t}\n\n\tfmt.Println(\"Ready for action\") //self-test pass\n\treturn shim.Success(nil)\n}", "func (m *MockUnsignedTx) InitCtx(arg0 *snow.Context) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"InitCtx\", arg0)\n}", "func init() {\n\tif STORMPATH_API_KEY_ID == \"\" {\n\t\tlog.Fatal(\"STORMPATH_API_KEY_ID not set in the environment.\")\n\t} else if STORMPATH_API_KEY_SECRET == \"\" {\n\t\tlog.Fatal(\"STORMPATH_API_KEY_SECRET not set in the environment.\")\n\t}\n\n\t// Generate a globally unique UUID to be used as a prefix throughout our\n\t// testing.\n\tuuid, err := uuid.NewV4()\n\tif err != nil {\n\t\tlog.Fatal(\"UUID generation failed.\")\n\t}\n\n\t// Store our test prefix.\n\tTEST_PREFIX = uuid.String() + \"-\"\n\n\t// Generate a Stormpath client we'll use for all our tests.\n\tclient, err := NewClient(&ApiKeyPair{\n\t\tId: STORMPATH_API_KEY_ID,\n\t\tSecret: STORMPATH_API_KEY_SECRET,\n\t})\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't create a Stormpath client.\")\n\t}\n\tCLIENT = client\n}", "func (c *SimpleChaincode) init(stub shim.ChaincodeStubInterface, args []string) pb.Response{\n\tfmt.Println(\"DONE !!!\")\n\treturn shim.Success(nil)\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tfmt.Println(\"invoke Init Method\")\n\t\n\tvar tranHld TRAN_Holder\n\t \n if len(args) != 1 {\n return nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n }\n\n bytes, err := json.Marshal(tranHld)\n\n if err != nil { \n \treturn nil, errors.New(\"Error creating TRAN_Holder record\") \n }\n\n\terr = stub.PutState(\"tranIDs\", bytes)\n\t\n return nil, nil\n}", "func (classRepo *mockClassRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (t *ManagePatient) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n var msg string\n var err error\n if len(args) != 1 {\n return nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n }\n // Initialize the chaincode\n msg = args[0]\n fmt.Println(\"ManagePatient chaincode is deployed successfully.\");\n \n // Write the state to the ledger\n err = stub.PutState(\"abc\", []byte(msg)) //making a test var \"abc\", I find it handy to read/write to it right away to test the network\n if err != nil {\n return nil, err\n }\n \n var empty []string\n jsonAsBytes, _ := json.Marshal(empty) //marshal an emtpy array of strings to clear the index\n err = stub.PutState(PatientIndexStr, jsonAsBytes)\n if err != nil {\n return nil, err\n }\n err = stub.PutState(EVENT_COUNTER, []byte(\"1\"))\n if err != nil {\n return nil, err\n }\n return nil, nil\n}", "func init(){\n \n err := CreateSchema()\n if err != nil {\n fmt.Printf(\"token-mgr_test:init:%s\\n\", err)\n os.Exit(1)\n }\n MockTokenTuple = TokenTuple{1234, MockUserEmail, \"12345678901234567890\"}\n err = MockTokenTuple.UpdateToken()\n if err != nil {\n fmt.Printf(\"token-mgr_test:init:%s\\n\", err)\n os.Exit(2)\n }\n}", "func InitMockConn() *Mock {\n\tm := &Mock{}\n\tm.ti = nftableslib.InitNFTables(m)\n\treturn m\n}", "func (_m *SMSStorage) Init() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockPluginInitializerClient) Init(ctx context.Context, in *PluginInitialization_Request, opts ...grpc.CallOption) (*PluginInitialization_Response, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Init\", varargs...)\n\tret0, _ := ret[0].(*PluginInitialization_Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func init() {\n\tExample_invoke()\n}", "func (t *SimpleChaincode) Init(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\terr := stub.PutState(\"hello_world\", []byte(args[0]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}", "func (t *SimpleChaincode) Init(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\terr := stub.PutState(\"hello_world\", []byte(args[0]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}", "func (t *SimpleChaincode) Init(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\terr := stub.PutState(\"hello_world\", []byte(args[0]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}", "func (askForHelpRepo *mockAskForHelpRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\terr := stub.PutState(\"hello_world\", []byte(args[0]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}", "func (tr *TestRecorder) init() {}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tfmt.Printf(\"Init called\")\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\t//return t.writeDummyProvider(stub)\n\treturn nil, nil\n}", "func init() {\n\tsrClient = CreateMockSchemaRegistryClient(\"mock://testingUrl\")\n\n\t// Test Schema and Value Schema creation\n\t_, _ = srClient.CreateSchema(\"test1\", schema, Avro, false)\n\t_, _ = srClient.CreateSchema(\"test1\", schema, Avro, true)\n\t// Test version upgrades for key and value and more registration\n\t_, _ = srClient.CreateSchema(\"test1\", schema2, Avro, false)\n\t_, _ = srClient.CreateSchema(\"test1\", schema2, Avro, true)\n\n\t// Test version upgrades for key and value and more registration (arbitrary subject)\n\t_, _ = srClient.CreateSchemaWithArbitrarySubject(\"test1_arb\", schema3, Avro)\n\t_, _ = srClient.CreateSchemaWithArbitrarySubject(\"test1_arb\", schema4, Avro)\n}", "func (c *ChainCode) Init(ctx contractapi.TransactionContextInterface) error {\r\n \r\n _, err := set(ctx.GetStub(), NEXT_SHOW_ID, \"0\")\r\n _, err = set(ctx.GetStub(), NEXT_TICKET_ID, \"0\")\r\n \r\n if err != nil {\r\n return \"nil\", fmt.Errorf(err.Error())\r\n }\r\n \r\n return nil\r\n}", "func (s *SmartContract) Init(APIstub shim.ChaincodeStubInterface) sc.Response {\n fmt.Println(\"Calling instantiate method.\")\n return shim.Success(nil)\n}", "func (achieveRepo *mockAchieveRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfmt.Println(\"Marbles Is Starting Up\")\n\tfuncName, args := stub.GetFunctionAndParameters()\n\tvar number int\n\tvar err error\n\ttxId := stub.GetTxID()\n\t\n\tfmt.Println(\"Init() is running\")\n\tfmt.Println(\"Transaction ID:\", txId)\n\tfmt.Println(\" GetFunctionAndParameters() function:\", funcName)\n\tfmt.Println(\" GetFunctionAndParameters() args count:\", len(args))\n\tfmt.Println(\" GetFunctionAndParameters() args found:\", args)\n\n\t// expecting 1 arg for instantiate or upgrade\n\tif len(args) == 1 {\n\t\tfmt.Println(\" GetFunctionAndParameters() arg[0] length\", len(args[0]))\n\n\t\t// expecting arg[0] to be length 0 for upgrade\n\t\tif len(args[0]) == 0 {\n\t\t\tfmt.Println(\" Uh oh, args[0] is empty...\")\n\t\t} else {\n\t\t\tfmt.Println(\" Great news everyone, args[0] is not empty\")\n\n\t\t\t// convert numeric string to integer\n\t\t\tnumber, err = strconv.Atoi(args[0])\n\t\t\tif err != nil {\n\t\t\t\treturn shim.Error(\"Expecting a numeric string argument to Init() for instantiate\")\n\t\t\t}\n\n\t\t\t// this is a very simple test. let's write to the ledger and error out on any errors\n\t\t\t// it's handy to read this right away to verify network is healthy if it wrote the correct value\n\t\t\terr = stub.PutState(\"selftest\", []byte(strconv.Itoa(number)))\n\t\t\tif err != nil {\n\t\t\t\treturn shim.Error(err.Error()) //self-test fail\n\t\t\t}\n\t\t}\n\t}\n\n\t// showing the alternative argument shim function\n\talt := stub.GetStringArgs()\n\tfmt.Println(\" GetStringArgs() args count:\", len(alt))\n\tfmt.Println(\" GetStringArgs() args found:\", alt)\n\n\t// store compatible marbles application version\n\terr = stub.PutState(\"marbles_ui\", []byte(\"4.0.1\"))\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"Ready for action\") //self-test pass\n\treturn shim.Success(nil)\n}", "func InitMockCalculator(d int, err error) {\n\tvar mock mockCalculator\n\n\tif err == nil {\n\t\tmock.distance = d\n\t} else {\n\t\tmock.err = &err\n\t}\n\n\tcalc = &mock\n}", "func (s *InMemoryHandler) Init() error {\n\ts.logger = log.WithFields(log.Fields{\"app\": \"inmemory-db\"})\n\ts.functions = make(map[string]model.FunctionConfig)\n\treturn nil\n}", "func (sessionRepo *mockSessionRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error){\n\t//generate params\n\tvar params = C.CCParamsGen()\n\tstub.PutState( \"params\", []byte( C.GoString(params) ) )\n\n\t//get params obj\n\toParams := C.CCParamsLoad( params )\n\tC.CCStrDel(params)\n\n\t//init commit counter\n\tcounter := 0\n\tstub.PutState( \"counter\", []byte( strconv.Itoa(counter) ) )\n\n\t//generate accumulator\n\taccum := C.CCAccumGen( oParams )\n\tstub.PutState( \"accumlator\", []byte( C.GoString(accum) ) )\n\tC.CCStrDel(accum)\n\n\t//release object params\n\t//defer C.free( oParams )//??\n\tC.CCParamsDel( oParams )\n\n\treturn nil, nil\n}", "func (transactionRepo *mockTransactionRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (t *myChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n if len(args) != 1 {\n return nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n }\n\n return nil, nil\n}", "func (_m *MockQueryCoord) Init() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (c *Context) init(ctx *fasthttp.RequestCtx) {\n\tc.RequestCtx = ctx\n\tc.data = newDataMap()\n\tc.index = -1\n\tc.Serialize = Serialize\n}", "func (cc *Chaincode) Init(stub shim.ChaincodeStubInterface) sc.Response {\n\t_, _ = stub.GetFunctionAndParameters()\n\n\treturn shim.Success(nil)\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error){\n\t//generate params\n\tvar params = C.CCParamsGen()\n\tstub.PutState( \"params\", []byte( C.GoString(params) ) )\n\n\t//get params obj\n\toParams := C.CCParamsLoad( params )\n\tC.CCStrDel(param)\n\n\t//init commit counter\n\tcounter := 0\n\tstub.PutState( \"counter\", []byte( strconv.Itoa(counter) ) )\n\n\t//generate accumulator\n\taccum := C.CCAccumGen( oParams )\n\tstub.PutState( \"accumlator\", []byte( C.GoString(accum) ) )\n\tC.CCStrDel(accum)\n\n\t//release object params\n\t//defer C.free( oParams )//??\n\tC.CCParamsDel( oParams )\n\n\treturn nil, nil\n}", "func (t *TestHandler) Init() error {\n\tfmt.Println(\"TestHandler.Init\")\n\treturn nil\n}", "func (t *TestHandler) Init() error {\n\tfmt.Println(\"TestHandler.Init\")\n\treturn nil\n}", "func (n *mockAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgentConfig) (bool, error) {\n\treturn false, nil\n}", "func (c *HelloWorld) Init(ctx contract.Context, req *types.HelloRequest) error {\n\treturn nil\n}", "func (_m *ReceiptStore) Init() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (cc *ValidatorChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfunction, params := stub.GetFunctionAndParameters()\n\tlogger.Info(function, params)\n\treturn shim.Success(nil)\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n var stateArg ContractState\n var err error\n\n log.Info(\"Entering INIT\")\n \n if len(args) != 1 {\n err = errors.New(\"init expects one argument, a JSON string with mandatory version and optional nickname\") \n log.Critical(err)\n return nil, err \n }\n\n err = json.Unmarshal([]byte(args[0]), &stateArg)\n if err != nil {\n err = fmt.Errorf(\"Version argument unmarshal failed: %s\", err)\n log.Critical(err)\n return nil, err \n }\n \n if stateArg.Nickname == \"\" {\n stateArg.Nickname = DEFAULTNICKNAME\n } \n\n (*log).setModule(stateArg.Nickname)\n \n err = initializeContractState(stub, stateArg.Version, stateArg.Nickname)\n if err != nil {\n return nil, err\n }\n \n log.Info(\"Contract initialized\")\n return nil, nil\n}", "func (announceRepo *mockAnnounceRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (t *SimpleChaincode) Init(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {\n\treturn nil, nil\n}", "func (c *contract) _init(ctx sdk.Context) error {\n\n\t// Save a key with the name \"name\" and string value \"Fun_Token\" in the state of the contract\n\tc.State.WriteStringByKey(ctx, \"name\", \"Fun_Token\")\n\n\t// Save a key with the name \"symbol\" and string value \"FUN\"\n\tc.State.WriteStringByKey(ctx, \"symbol\", \"FUN\")\n\n\t// Save a key with the name \"totalSupply\" and uint64 value \"FUN\" and return\n\treturn c.State.WriteUint64ByKey(ctx, \"totalSupply\", 1000000000)\n}", "func Mock(fake string) func() {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\torigin := backend\n\tbackend = fake\n\treturn func() { Mock(origin) }\n}", "func Test_Init(t *testing.T) {\n\tscc := new(SmartContract)\n\tstub := shim.NewMockStub(\"hcdm\", scc)\n\n\t// Init A=123 B=234\n\tcheckInit(t, stub)\n\n}", "func (t *SBITransaction) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tfmt.Println(\"Inside INIT for test chaincode\")\n\treturn nil, nil\n}", "func (accountRepo *mockAccountRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfmt.Println(\"CC Demo Is Starting Up\")\n\t_, args := stub.GetFunctionAndParameters()\n\tvar Aval int\n\tvar err error\n\n\tfmt.Println(\"Init() args count:\", len(args))\n\tfmt.Println(\"Init() args found:\", args)\n\n\t// expecting 1 arg for instantiate or upgrade\n\tif len(args) == 1 {\n\t\tfmt.Println(\"Init() arg[0] length\", len(args[0]))\n\n\t\t// expecting arg[0] to be length 0 for upgrade\n\t\tif len(args[0]) == 0 {\n\t\t\tfmt.Println(\"args[0] is empty... must be upgrading\")\n\t\t} else {\n\t\t\tfmt.Println(\"args[0] is not empty, must be instantiating\")\n\n\t\t\t// convert numeric string to integer\n\t\t\tAval, err = strconv.Atoi(args[0])\n\t\t\tif err != nil {\n\t\t\t\treturn shim.Error(\"Expecting a numeric string argument to Init() for instantiate\")\n\t\t\t}\n\n\t\t\t// this is a very simple test. let's write to the ledger and error out on any errors\n\t\t\t// it's handy to read this right away to verify network is healthy if it wrote the correct value\n\t\t\terr = stub.PutState(\"selftest\", []byte(strconv.Itoa(Aval)))\n\t\t\tif err != nil {\n\t\t\t\treturn shim.Error(err.Error()) //self-test fail\n\t\t\t}\n\t\t}\n\t}\n\n\t// store compaitible CC Demo application version\n\terr = stub.PutState(\"listings_ui\", []byte(\"4.0.0\"))\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\" - ready for action\") //self-test pass\n\treturn shim.Success(nil)\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\terr := stub.PutState(\"hello_Block\", []byte(args[0]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}", "func (_m *Plugin) InitPrefix(prefix config.Prefix) {\n\t_m.Called(prefix)\n}", "func (h *Handler) Init(dbManager database.Manager, cacheManager cache.Manager, mqManager messagingqueue.Manager) {\n\th.dbManager = dbManager\n\th.cacheManager = cacheManager\n\th.mqManager = mqManager\n}", "func init() {\n\tconfig = tendermint_test.ResetConfig(\"rpc_test_client_test\")\n\tchainID = config.GetString(\"chain_id\")\n\trpcAddr = config.GetString(\"rpc_laddr\")\n\tgrpcAddr = config.GetString(\"grpc_laddr\")\n\trequestAddr = rpcAddr\n\twebsocketAddr = rpcAddr\n\twebsocketEndpoint = \"/websocket\"\n\n\tclientURI = client.NewClientURI(requestAddr)\n\tclientJSON = client.NewClientJSONRPC(requestAddr)\n\tclientGRPC = core_grpc.StartGRPCClient(grpcAddr)\n\n\t// TODO: change consensus/state.go timeouts to be shorter\n\n\t// start a node\n\tready := make(chan struct{})\n\tgo newNode(ready)\n\t<-ready\n}", "func (m *MockPluginInitializerServer) Init(arg0 context.Context, arg1 *PluginInitialization_Request) (*PluginInitialization_Response, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Init\", arg0, arg1)\n\tret0, _ := ret[0].(*PluginInitialization_Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (programRepo *mockProgramRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func TestHandlerInit(t *testing.T) {\n\t// Sync the test group\n\tg := TestGroup{}\n\tg.Init()\n\t// Initialize the handler\n\tg.handler.Init(g.client, g.edgenetClient)\n\tutil.Equals(t, g.client, g.handler.clientset)\n\tutil.Equals(t, g.edgenetClient, g.handler.edgenetClientset)\n\tutil.Equals(t, \"team-quota\", g.handler.resourceQuota.Name)\n\tutil.NotEquals(t, nil, g.handler.resourceQuota.Spec.Hard)\n\tutil.Equals(t, int64(0), g.handler.resourceQuota.Spec.Hard.Pods().Value())\n}", "func (t *ManageMerchant) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\r\n\tvar msg string\r\n\tvar err error\r\n\tif len(args) != 1 {\r\n\t\terrMsg := \"{ \\\"message\\\" : \\\"Incorrect number of arguments. Expecting ' ' as an argument\\\", \\\"code\\\" : \\\"503\\\"}\"\r\n\t\terr = stub.SetEvent(\"errEvent\", []byte(errMsg))\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t} \r\n\t\treturn nil, nil\r\n\t}\r\n\r\n\t// Initialize the chaincode\r\n\tmsg = args[0]\r\n\t// Write the state to the ledger\r\n\terr = stub.PutState(\"abc\", []byte(msg))\t\t//making a test var \"abc\", I find it handy to read/write to it right away to test the network\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tvar empty []string\r\n\tjsonAsBytes, _ := json.Marshal(empty)\t\t\t\t\t\t\t\t//marshal an emtpy array of strings to clear the index\r\n\terr = stub.PutState(CustomerIndexStr, jsonAsBytes)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\terr = stub.PutState(TransactionIndexStr, jsonAsBytes)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\ttosend := \"{ \\\"message\\\" : \\\"ManageMerchant chaincode is deployed successfully.\\\", \\\"code\\\" : \\\"200\\\"}\"\r\n\terr = stub.SetEvent(\"evtsender\", []byte(tosend))\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t} \r\n\treturn nil, nil\r\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\treturn nil, nil\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfmt.Println(\"Marbles Is Starting Up\")\n\tfuncName, args := stub.GetFunctionAndParameters()\n\ttxId := stub.GetTxID()\n\n\tfmt.Println(\"Init() is running\")\n\tfmt.Println(\"Transaction ID:\", txId)\n\tfmt.Println(\" GetFunctionAndParameters() function:\", funcName)\n\tfmt.Println(\" GetFunctionAndParameters() args count:\", len(args))\n\tfmt.Println(\" GetFunctionAndParameters() args found:\", args)\n\n\tt.enroll_donor(stub, []string{\"d1\", \"김현욱\", \"010-1234-5678\"})\n\tt.enroll_npo(stub, []string{\"n1\",\"프리즈밍\"})\n\tt.enroll_npo(stub, []string{\"n2\",\"비영리스타트업\"})\n\tt.enroll_npo(stub, []string{\"n3\",\"서울시NPO지원센터\"})\n\tt.enroll_npo(stub, []string{\"n4\",\"아름다운가게\"})\n\tt.enroll_recipient(stub, []string{\"r1\",\"윤지성\",\"Permanent\"})\n\n\n\n\tfmt.Println(\"Ready for action\") //self-test pass\n\treturn shim.Success(nil)\n}", "func initMockedApp() *appWithMocks {\n\tmainMockTimer := newMockTimer()\n\tmainGreeter := greeter{\n\t\tT: mainMockTimer,\n\t}\n\tmainApp := app{\n\t\tg: mainGreeter,\n\t}\n\tmainAppWithMocks := &appWithMocks{\n\t\tapp: mainApp,\n\t\tmt: mainMockTimer,\n\t}\n\treturn mainAppWithMocks\n}", "func (userAfhRepo *mockUserAfhRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (c *Client) init() {\n\tif c.domains == nil {\n\t\tc.domains = map[int]string{}\n\t}\n\tif c.cfg == nil {\n\t\tc.cfg = manager.NewAtomicConfig(tg.Config{})\n\t}\n\tc.ready = tdsync.NewResetReady()\n\tc.restart = make(chan struct{})\n\tc.migration = make(chan struct{}, 1)\n\tc.sessions = map[int]*pool.SyncSession{}\n\tc.subConns = map[int]CloseInvoker{}\n\tc.invoker = chainMiddlewares(InvokeFunc(c.invokeDirect), c.mw...)\n\tc.tg = tg.NewClient(c.invoker)\n}", "func (t *CCHandler) Init() error {\n\tlog.Info(\"TestHandler.Init\")\n\treturn nil\n}", "func (c *CentralCacheTestImpl) Init(conf Config) {\n\tc.baseUrl = conf.Host\n\tc.keyPrefix = conf.KeyPrefix\n\tc.dumpFilePath = conf.DumpFilePath\n\tc.expirySec = conf.ExpirySec\n\tc.file = nil\n}", "func (_m *IReplyType) Initialize(ctx context.T, result agentcontracts.DocumentResult, replyUUID uuid.UUID) {\n\t_m.Called(ctx, result, replyUUID)\n}", "func (cc *Chaincode) Init(stub shim.ChaincodeStubInterface) sc.Response {\n\tfcn, params := stub.GetFunctionAndParameters()\n\tfmt.Println(\"Init()\", fcn, params)\n\treturn shim.Success(nil)\n}", "func (cc *Chaincode) Init(stub shim.ChaincodeStubInterface) sc.Response {\n\tfcn, params := stub.GetFunctionAndParameters()\n\tfmt.Println(\"Init()\", fcn, params)\n\treturn shim.Success(nil)\n}", "func (cc *Chaincode) Init(stub shim.ChaincodeStubInterface) sc.Response {\n\tfcn, params := stub.GetFunctionAndParameters()\n\tfmt.Println(\"Init()\", fcn, params)\n\treturn shim.Success(nil)\n}", "func getInitChainer(mapp *mock.App, keeper keeper.BaseKeeper, tk token.Keeper) sdk.InitChainer {\n\treturn func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain {\n\t\tmapp.InitChainer(ctx, req)\n\t\tbankGenesis := bank.DefaultGenesisState()\n\t\tbank.InitGenesis(ctx, keeper, bankGenesis)\n\n\t\t//init foocoinInfo and barcoinInfo\n\t\tinfos := token.DefaultGenesisState()\n\t\tgs := &infos\n\t\tfoocoinInfo := sdk.TokenInfoWithoutSupply{\n\t\t\tSymbol: \"foocoin\",\n\t\t\tIssuer: \"\",\n\t\t\tIsSendEnabled: true,\n\t\t\tDecimals: 8,\n\t\t}\n\t\tbarcoinInfo := sdk.TokenInfoWithoutSupply{\n\t\t\tSymbol: \"barcoin\",\n\t\t\tIssuer: \"\",\n\t\t\tIsSendEnabled: true,\n\t\t\tDecimals: 8,\n\t\t}\n\n\t\tgs.AddTokenInfoWithoutSupplyIntoGenesis(foocoinInfo)\n\t\tgs.AddTokenInfoWithoutSupplyIntoGenesis(barcoinInfo)\n\n\t\ttoken.InitGenesis(ctx, tk, *gs)\n\t\treturn abci.ResponseInitChain{}\n\t}\n}", "func (_m *MockEncoderPool) Init(alloc EncoderAllocate) {\n\t_m.ctrl.Call(_m, \"Init\", alloc)\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\terr := stub.PutState(\"contract_json\", []byte(args[0]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}", "func (p *PassthruChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfunction, _ := stub.GetFunctionAndParameters()\n\tif strings.Index(function, \"error\") >= 0 {\n\t\treturn shim.Error(function)\n\t}\n\treturn shim.Success([]byte(function))\n}", "func (userRepo *mockUserRepo) Initialize(ctx context.Context, db *sql.DB) {}", "func (t *BenchmarkerChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\targs := stub.GetStringArgs()\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(fmt.Sprintf(\"Incorrect number of arguments. Expecting 1. You gave %+v\", args))\n\t}\n\n\treturn shim.Success(nil)\n}", "func Initialize(cfg Config) (Context, error) {\n\n\tif cfg.Host == \"\" {\n\t\tlog.Info(\"Host name empty, returning mocked context instead.\")\n\t\treturn &mockedContext{}, nil\n\t}\n\n\tvar connClosedError = make(chan *amqp.Error)\n\tvar context *rabbitMQContext\n\tvar err error\n\n\tfor {\n\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tcontext, err = createMessageQueueChannel(&rabbitMQContext{\n\t\t\tcfg: cfg,\n\t\t\tconnectionClosedError: connClosedError,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\terr = createTopicExchange(context)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\terr = createCommandAndResponseQueues(context)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\treturn context, nil\n\t}\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\n\tif len(args) != 0 {\n\t\treturn nil, errors.New(\"incorrect number of arguments. Expecting 0\")\n\t}\n\n\treturn nil, nil\n}", "func (o *CallbackOperator) Init() {}", "func (_m *MockMultiReaderIteratorPool) Init(alloc ReaderIteratorAllocate) {\n\t_m.ctrl.Call(_m, \"Init\", alloc)\n}", "func TestInternalInit(t *testing.T) {\n\tvar err error\n\n\t// delete the default path\n\tdbPath := filepath.Join(fs.CacheDir, \"cache-backend\", *RemoteName+\".db\")\n\tboltDb, err = cache.GetPersistent(dbPath, &cache.Features{PurgeDb: true})\n\trequire.NoError(t, err)\n\tfstest.Initialise()\n\n\tif len(*WrapRemote) == 0 {\n\t\t*WrapRemote = \"localInternal:/var/tmp/rclone-cache\"\n\t\tfs.ConfigFileSet(\"localInternal\", \"type\", \"local\")\n\t\tfs.ConfigFileSet(\"localInternal\", \"nounc\", \"true\")\n\t}\n\n\tremoteExists := false\n\tfor _, s := range fs.ConfigFileSections() {\n\t\tif s == *RemoteName {\n\t\t\tremoteExists = true\n\t\t}\n\t}\n\n\tif !remoteExists {\n\t\tfs.ConfigFileSet(*RemoteName, \"type\", \"cache\")\n\t\tfs.ConfigFileSet(*RemoteName, \"remote\", *WrapRemote)\n\t\tfs.ConfigFileSet(*RemoteName, \"chunk_size\", \"1024\")\n\t\tfs.ConfigFileSet(*RemoteName, \"chunk_total_size\", \"2048\")\n\t\tfs.ConfigFileSet(*RemoteName, \"info_age\", infoAge.String())\n\t}\n\n\t_ = flag.Set(\"cache-chunk-no-memory\", \"true\")\n\t_ = flag.Set(\"cache-workers\", strconv.Itoa(workers))\n\t_ = flag.Set(\"cache-chunk-clean-interval\", chunkClean.String())\n\n\t// Instantiate root\n\trootFs, err = fs.NewFs(*RemoteName + \":\")\n\trequire.NoError(t, err)\n\t_ = rootFs.Features().Purge()\n\trequire.NoError(t, err)\n\terr = rootFs.Mkdir(\"\")\n\trequire.NoError(t, err)\n\n\t// flush cache\n\t_, err = getCacheFs(rootFs)\n\trequire.NoError(t, err)\n}", "func (m *MockDB) Init(arg0 context.Context) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Init\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (token *TokenChaincode) Init(stub shim.ChaincodeStubInterface) peer.Response{\n\tfmt.Println(\"Init Executed\")\n\t\n\tlogger.Debug(\"Init executed - DEBUG\") //debugging message outputs\n\treturn shim.Success(nil) //return success\n}", "func (_e *MockQueryCoord_Expecter) Init() *MockQueryCoord_Init_Call {\n\treturn &MockQueryCoord_Init_Call{Call: _e.mock.On(\"Init\")}\n}", "func InitForTesting(webapp_root string) {\n\twebhook.InitRequestSaltForTesting()\n\tinitUrls(webapp_root)\n}", "func (h *MessageHandler) Init(ctx context.Context) error {\n\tm := newMiddleware(h)\n\th.middleware = m\n\n\th.jetTreeUpdater = jet.NewFetcher(h.Nodes, h.JetStorage, h.Bus, h.JetCoordinator)\n\n\th.setHandlersForLight(m)\n\n\treturn nil\n}", "func (t *DTCChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n var err error\n if len(args) != 1 {\n return nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n }\n if function == \"InitContract\"{\n t.InitContract(stub, args);\n }\n\n if err != nil {\n return nil, err\n }\n return nil, nil\n}", "func (_m *Handler) Start(_a0 context.Context) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *Driver) Init(metadata mq.Metadata) error {\n\tmqttMeta, err := parseMQTTMetaData(metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.metadata = mqttMeta\n\tclient, err := m.connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.client = client\n\treturn nil\n\n}", "func (_m *LogPollerWrapper) Start(_a0 context.Context) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockSystemContract) Init(arg0 core.Keepers, arg1 types1.BaseTx, arg2 uint64) core.SystemContract {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Init\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(core.SystemContract)\n\treturn ret0\n}" ]
[ "0.62887895", "0.62578917", "0.5977592", "0.579189", "0.57256734", "0.5670382", "0.56410754", "0.56354225", "0.56183684", "0.56087774", "0.5593743", "0.5559057", "0.55394995", "0.5531443", "0.5497528", "0.5489102", "0.548839", "0.5477195", "0.54557586", "0.5453266", "0.54306275", "0.5414318", "0.54141456", "0.5401822", "0.5401822", "0.5401822", "0.5386962", "0.5365212", "0.5361781", "0.53574175", "0.53424704", "0.533721", "0.53319985", "0.5322235", "0.5320909", "0.531928", "0.5316599", "0.53145164", "0.530573", "0.5301487", "0.53008217", "0.5294299", "0.52853334", "0.52804404", "0.5280344", "0.52782977", "0.52782977", "0.5271724", "0.5257291", "0.5246266", "0.5235507", "0.5231615", "0.52303815", "0.5223101", "0.52035105", "0.5202261", "0.5193883", "0.51888055", "0.5182799", "0.5176679", "0.51705635", "0.5161895", "0.5152563", "0.51418257", "0.51336557", "0.5125917", "0.5122521", "0.5120938", "0.51203483", "0.5115844", "0.5110741", "0.5098617", "0.5097393", "0.5096324", "0.50873977", "0.50860304", "0.5082406", "0.5082406", "0.5082406", "0.5081502", "0.50796187", "0.50668126", "0.5065149", "0.5064091", "0.50612134", "0.5058709", "0.50575167", "0.505425", "0.50496083", "0.50390255", "0.5036511", "0.5032268", "0.5028202", "0.5024401", "0.502169", "0.5019443", "0.5018324", "0.5010296", "0.50072473", "0.500406" ]
0.65841055
0
InitPrefix provides a mock function with given fields: prefix
func (_m *Plugin) InitPrefix(prefix config.Prefix) { _m.Called(prefix) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_m *Knapsack) SetNotaryPrefix(prefix string) error {\n\tret := _m.Called(prefix)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(prefix)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *RequestHeaderMapReadOnly) GetByPrefix(prefix string) map[string][]string {\n\tret := _m.Called(prefix)\n\n\tvar r0 map[string][]string\n\tif rf, ok := ret.Get(0).(func(string) map[string][]string); ok {\n\t\tr0 = rf(prefix)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string][]string)\n\t\t}\n\t}\n\n\treturn r0\n}", "func TestSetPrefix(t *testing.T) {\n\tprefix := Prefix()\n\tdefer SetPrefix(prefix)\n\n\ttests := []struct {\n\t\tname string\n\t\tin string\n\t}{\n\t\t{\n\t\t\tname: \"Set: `hello`\",\n\t\t\tin: \"hello\",\n\t\t},\n\t\t{\n\t\t\tname: \"Set: `hello-world`\",\n\t\t\tin: \"hello-world\",\n\t\t},\n\t}\n\n\t// Don't use parallel tests here.\n\tfor _, tt := range tests {\n\t\tSetPrefix(tt.in)\n\t\tif s := Prefix(); s != tt.in {\n\t\t\tt.Errorf(\"%s: failed, got %s, want %s\",\n\t\t\t\ttt.name, s, tt.in)\n\t\t}\n\t}\n}", "func (_m *Knapsack) NotaryPrefix() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func InitWithPrefix(conf interface{}, prefix string) error {\n\treturn InitWithOptions(conf, Options{Prefix: prefix})\n}", "func (m *MockConnectorInfo) InboxPrefix() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InboxPrefix\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockKeystore) WatchPrefix(prefix string, onUpdate func(*keystoreregistry.KeyValueVersion, *keystoreregistry.KeyValueVersion)) {\n\tm.ctrl.Call(m, \"WatchPrefix\", prefix, onUpdate)\n}", "func NewMockprefixListGetter(ctrl *gomock.Controller) *MockprefixListGetter {\n\tmock := &MockprefixListGetter{ctrl: ctrl}\n\tmock.recorder = &MockprefixListGetterMockRecorder{mock}\n\treturn mock\n}", "func (_m *RediStore) SetKeyPrefix(p string) {\n\t_m.Called(p)\n}", "func SetPrefix(prefix string) { std.SetPrefix(prefix) }", "func (fn *FakeName) Prefix() string {\n\treturn random.PickString(fn.Prefixs)\n}", "func (_m *KV) Keys(prefix string, separator string, q *api.QueryOptions) ([]string, *api.QueryMeta, error) {\n\tret := _m.Called(prefix, separator, q)\n\n\tvar r0 []string\n\tif rf, ok := ret.Get(0).(func(string, string, *api.QueryOptions) []string); ok {\n\t\tr0 = rf(prefix, separator, q)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]string)\n\t\t}\n\t}\n\n\tvar r1 *api.QueryMeta\n\tif rf, ok := ret.Get(1).(func(string, string, *api.QueryOptions) *api.QueryMeta); ok {\n\t\tr1 = rf(prefix, separator, q)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*api.QueryMeta)\n\t\t}\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(string, string, *api.QueryOptions) error); ok {\n\t\tr2 = rf(prefix, separator, q)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (m *MockKeystore) CleanPrefix(prefix string) error {\n\tret := m.ctrl.Call(m, \"CleanPrefix\", prefix)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func SetPrefix(p string) {\n\tprefix = p\n}", "func (n *nodeHeader) setPrefix(p []byte) {\n\tpLen, pBytes := n.prefixFields()\n\n\t// Write to the byte array and set the length field to the num bytes copied\n\t*pLen = uint16(copy(pBytes, p))\n}", "func (l *DockerLib) SetPrefix(p string) {\n\tl.prefix = p\n}", "func ConfigureSetPrefixFunction(pf func() string) {\n\tprefixFunction = pf\n}", "func SetPrefix(pre string) {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tprefix = pre\n}", "func SetPrefix(s string) {\n\tprefix = s\n}", "func SetPrefix(s string) {\n\tprefix = s\n}", "func ConfigureResetPrefix() {\n\tprefixFunction = defaultPrefixFunction\n}", "func TestMapPrefix(t *testing.T) {\n\tbx := NewTestDB()\n\tdefer bx.Close()\n\n\t// Create a new things bucket.\n\tthings, err := bx.New([]byte(\"things\"))\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t}\n\n\t// Setup items to insert.\n\titems := []struct {\n\t\tKey, Value []byte\n\t}{\n\t\t{[]byte(\"A\"), []byte(\"1\")}, // `A` prefix match\n\t\t{[]byte(\"AA\"), []byte(\"2\")}, // match\n\t\t{[]byte(\"AAA\"), []byte(\"3\")}, // match\n\t\t{[]byte(\"AAB\"), []byte(\"2\")}, // match\n\t\t{[]byte(\"B\"), []byte(\"O\")},\n\t\t{[]byte(\"BA\"), []byte(\"0\")},\n\t\t{[]byte(\"BAA\"), []byte(\"0\")},\n\t}\n\n\t// Insert 'em.\n\tif err := things.Insert(items); err != nil {\n\t\tt.Error(err.Error())\n\t}\n\n\t// Now collect each item whose key starts with \"A\".\n\tprefix := []byte(\"A\")\n\n\t// Expected items for keys with prefix \"A\".\n\texpected := []struct {\n\t\tKey, Value []byte\n\t}{\n\t\t{[]byte(\"A\"), []byte(\"1\")},\n\t\t{[]byte(\"AA\"), []byte(\"2\")},\n\t\t{[]byte(\"AAA\"), []byte(\"3\")},\n\t\t{[]byte(\"AAB\"), []byte(\"2\")},\n\t}\n\n\t// Setup slice of items to collect results.\n\ttype item struct {\n\t\tKey, Value []byte\n\t}\n\tresults := []item{}\n\n\t// Anon func to map over matched keys.\n\tdo := func(k, v []byte) error {\n\t\tresults = append(results, item{k, v})\n\t\treturn nil\n\t}\n\n\tif err := things.MapPrefix(do, prefix); err != nil {\n\t\tt.Error(err.Error())\n\t}\n\n\tfor i, want := range expected {\n\t\tgot := results[i]\n\t\tif !bytes.Equal(got.Key, want.Key) {\n\t\t\tt.Errorf(\"got %v, want %v\", got.Key, want.Key)\n\t\t}\n\t\tif !bytes.Equal(got.Value, want.Value) {\n\t\t\tt.Errorf(\"got %v, want %v\", got.Value, want.Value)\n\t\t}\n\t}\n}", "func (c *FileSystemCache) Prefix(p ...string) Cache {\n\tc.prefix = p\n\treturn c\n}", "func (_m *KV) List(prefix string, q *api.QueryOptions) (api.KVPairs, *api.QueryMeta, error) {\n\tret := _m.Called(prefix, q)\n\n\tvar r0 api.KVPairs\n\tif rf, ok := ret.Get(0).(func(string, *api.QueryOptions) api.KVPairs); ok {\n\t\tr0 = rf(prefix, q)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(api.KVPairs)\n\t\t}\n\t}\n\n\tvar r1 *api.QueryMeta\n\tif rf, ok := ret.Get(1).(func(string, *api.QueryOptions) *api.QueryMeta); ok {\n\t\tr1 = rf(prefix, q)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*api.QueryMeta)\n\t\t}\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(string, *api.QueryOptions) error); ok {\n\t\tr2 = rf(prefix, q)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func SetPrefix(prefix string) {\n\tstd.SetPrefix(prefix)\n}", "func SetPrefix(prefix string) {\n\tstd.SetPrefix(prefix)\n}", "func (_m *Plugin) Init(ctx context.Context, prefix config.Prefix, callbacks events.Callbacks) error {\n\tret := _m.Called(ctx, prefix, callbacks)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, config.Prefix, events.Callbacks) error); ok {\n\t\tr0 = rf(ctx, prefix, callbacks)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func ConfigureSetPrefix(newPrefix string) {\n\tprefixFunction = func() string {\n\t\treturn newPrefix\n\t}\n}", "func (f *fakeProgressbar) SetPrefix(format string, args ...interface{}) {\n\tf.prefix = fmt.Sprintf(format, args...)\n}", "func (stub *MockStub) MockInit(uuid string, args [][]byte) pb.Response {\n\tstub.args = args\n\tstub.MockTransactionStart(uuid)\n\tres := stub.cc.Init(stub)\n\tstub.MockTransactionEnd(uuid)\n\treturn res\n}", "func TestJSONPrefix(t *testing.T) {\n\tprefix := JSONPrefix()\n\tif prefix != \"\" {\n\t\tt.Errorf(\"JSON default prefix was not \\\"\\\" as expected, found: \\\"%s\\\"\\n\", prefix)\n\t}\n\tSetJSONPrefix(\" \")\n\tprefix = JSONPrefix()\n\tif prefix != \" \" {\n\t\tt.Errorf(\"JSON prefix was just set to \\\" \\\" but when checking it, found: \\\"%s\\\"\\n\", prefix)\n\t}\n\tSetJSONPrefix(\"\")\n}", "func MatchPrefix(prefixes ...string) MatcherFunc { return MatchPrefixes(prefixes) }", "func SetPrefix(p string) {\n\tprefix = strings.ToUpper(p)\n}", "func TestPrefixConfigBuild(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\texpPrefixConfigs := []PrefixConfig{\n\t\t{proto.KeyMin, nil, config1},\n\t\t{proto.Key(\"/db1\"), nil, config2},\n\t\t{proto.Key(\"/db1/table\"), nil, config3},\n\t\t{proto.Key(\"/db1/tablf\"), nil, config2},\n\t\t{proto.Key(\"/db2\"), nil, config1},\n\t\t{proto.Key(\"/db3\"), nil, config4},\n\t\t{proto.Key(\"/db4\"), nil, config1},\n\t}\n\tverifyPrefixConfigMap(pcc, expPrefixConfigs, t)\n}", "func Prefix(p string) Option {\n\treturn func(s *storage) {\n\t\ts.prefix = p\n\t}\n}", "func initPrefix(colored bool) {\n\tif colored {\n\t\tPrefix[TRACE] = fmt.Sprintf(ColoredPrefixFormat, GRAY, TRACE.String())\n\t\tPrefix[DEBUG] = fmt.Sprintf(ColoredPrefixFormat, GREEN, DEBUG.String())\n\t\tPrefix[INFO] = fmt.Sprintf(ColoredPrefixFormat, BLUE, INFO.String())\n\t\tPrefix[WARNING] = fmt.Sprintf(ColoredPrefixFormat, YELLOW, WARNING.String())\n\t\tPrefix[ERROR] = fmt.Sprintf(ColoredPrefixFormat, RED, ERROR.String())\n\t\tPrefix[CRITICAL] = fmt.Sprintf(ColoredPrefixFormat, RED, CRITICAL.String())\n\t} else {\n\t\tPrefix[TRACE] = fmt.Sprintf(PrefixFormat, TRACE.String())\n\t\tPrefix[DEBUG] = fmt.Sprintf(PrefixFormat, DEBUG.String())\n\t\tPrefix[INFO] = fmt.Sprintf(PrefixFormat, INFO.String())\n\t\tPrefix[WARNING] = fmt.Sprintf(PrefixFormat, WARNING.String())\n\t\tPrefix[ERROR] = fmt.Sprintf(PrefixFormat, ERROR.String())\n\t\tPrefix[CRITICAL] = fmt.Sprintf(PrefixFormat, CRITICAL.String())\n\t}\n}", "func (f *InteractiveFiller) makePrefix(field *desc.FieldDescriptor) string {\n\treturn makePrefix(f.prefixFormat, field, f.state.ancestor, f.state.hasAncestorAndHasRepeatedField)\n}", "func InitWithPrefix(conf interface{}, prefix string) error {\n\tvalue := reflect.ValueOf(conf)\n\tif value.Kind() != reflect.Ptr {\n\t\treturn errors.New(\"envconfig: value is not a pointer\")\n\t}\n\n\telem := value.Elem()\n\n\tswitch elem.Kind() {\n\tcase reflect.Ptr:\n\t\telem.Set(reflect.New(elem.Type().Elem()))\n\t\treturn readStruct(elem.Elem(), prefix, false)\n\tcase reflect.Struct:\n\t\treturn readStruct(elem, prefix, false)\n\tdefault:\n\t\treturn errors.New(\"envconfig: invalid value kind, only works on structs\")\n\t}\n}", "func NewPrefix(v string) Value {\n\treturn prefixValue(v)\n}", "func TestFieldWithPrefixNoGlobal(t *testing.T) {\n\n\ttype Test struct {\n\t\tName string `pms:\"test, prefix=simple,tag1=nanna banna panna\"`\n\t}\n\n\tvar test Test\n\ttp := reflect.ValueOf(&test)\n\tnode, err := New(\"test-service\", \"dev\", \"\").\n\t\tRegisterTagParser(\"pms\", NewTagParser([]string{})).\n\t\tParse(tp)\n\n\tif err != nil {\n\t\tassert.Equal(t, nil, err)\n\t}\n\n\tassert.Equal(t,\n\t\t\"/dev/test-service/simple\",\n\t\tnode.Childs[0].Tag[\"pms\"].GetNamed()[\"prefix\"],\n\t)\n\n\t// DumpNode(node)\n}", "func (c *Client) Prefix(s string) {\n\tc.prefix = s\n}", "func WithPrefix(preffix string) opt {\n\tif !strings.HasSuffix(preffix, \"_\") {\n\t\tpreffix = preffix + \"_\"\n\t}\n\treturn func(key string) string {\n\t\treturn preffix + key\n\t}\n}", "func (b *bar) SetPrefix(prefix string) {\n\tb.mutex.Lock()\n\tdefer b.mutex.Unlock()\n\n\tb.prefix = prefix\n}", "func (client *RestClient) SetPrefix(newPrefix string) {\n\tclient.prefix = newPrefix\n}", "func (r *Routing) Prefix(prefix string, f func()) {\n\n\tdefer func() {\n\t\tr.routerWithPrefix = nil\n\t\tif len(r.prefixes) > 0 {\n\t\t\tr.prefixes = r.prefixes[:len(r.prefixes)-1]\n\t\t}\n\t}()\n\n\tif len(prefix) == 0 {\n\t\tpanic(\"Prefix(): the prefix can't be empty\")\n\t}\n\n\tr.prefixes = append(r.prefixes, prefix)\n\n\tvar mergePrefix = strings.Join(r.prefixes, \"/\")\n\n\tr.routerWithPrefix = r.Router.PathPrefix(fmt.Sprintf(\"/%s\", mergePrefix)).Subrouter().StrictSlash(true)\n\tf()\n\n}", "func (f *Faker) NamePrefix() string { return namePrefix(f.Rand) }", "func (a *AbbrFieldNamer) SetPrefix(s string) {\n\ta.Prefix = s\n}", "func (_m *MockStateStore) Init(metadata state.Metadata) error {\n\tret := _m.Called(metadata)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(state.Metadata) error); ok {\n\t\tr0 = rf(metadata)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func NewPrefix(p string) *Prefix {\n\tif len(p) > 0 && p[0] == '_' {\n\t\tp = p[1:]\n\t}\n\tpp := Prefix(p)\n\treturn &pp\n}", "func Prefix(prefix string) Option {\n\treturn func(s *Store) {\n\t\ts.prefix = prefix\n\t}\n}", "func (x *fastReflection_Bech32PrefixRequest) New() protoreflect.Message {\n\treturn new(fastReflection_Bech32PrefixRequest)\n}", "func AzurePrefix(prefix string) func(az *TierAzure) error {\n\treturn func(az *TierAzure) error {\n\t\taz.Prefix = prefix\n\t\treturn nil\n\t}\n}", "func (p *Periph) StorePREFIX(n int, prefix uint32) {\n\tp.prefix[n].Store(prefix)\n}", "func NamePrefix() string { return namePrefix(globalFaker.Rand) }", "func Mock(fake string) func() {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\torigin := backend\n\tbackend = fake\n\treturn func() { Mock(origin) }\n}", "func Test01GetPrefix(t *testing.T) {\n\tgomega.RegisterTestingT(t)\n\n\tprefixStack := getPrefixStack()\n\tresult := prefixStack.GetPrefix()\n\tgomega.Expect(result).To(gomega.BeEquivalentTo(\"-\"))\n}", "func Init(prefix string) {\n\tonce.Do(func() {\n\t\trdc := redis.NewClient(&redis.Options{\n\t\t\tAddr: \"localhost:6379\",\n\t\t\tPassword: \"\",\n\t\t\tDB: 0,\n\t\t})\n\n\t\tif prefix == \"\" {\n\t\t\tprefix = defaultPrefix\n\t\t}\n\n\t\tclient = &Client{\n\t\t\tprefix: prefix,\n\t\t\trdc: rdc,\n\t\t}\n\n\t\t_, err := client.rdc.Ping().Result()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Could not connect to redis %v\", err)\n\t\t}\n\n\t})\n}", "func TestFieldWithPrefixGlobal(t *testing.T) {\n\n\ttype Test struct {\n\t\tName string `pms:\"test, prefix=/global/simple,tag1=nanna banna panna\"`\n\t}\n\n\tvar test Test\n\ttp := reflect.ValueOf(&test)\n\tnode, err := New(\"test-service\", \"dev\", \"\").\n\t\tRegisterTagParser(\"pms\", NewTagParser([]string{})).\n\t\tParse(tp)\n\n\tif err != nil {\n\t\tassert.Equal(t, nil, err)\n\t}\n\n\tassert.Equal(t,\n\t\t\"/dev/global/simple\",\n\t\tnode.Childs[0].Tag[\"pms\"].GetNamed()[\"prefix\"],\n\t)\n\n\t// DumpNode(node)\n}", "func WithPrefix(metricPrefix string) Option {\n\treturn func(ro *registerOptions) {\n\t\tro.metricPrefix = metricPrefix\n\t}\n}", "func (mlog *MultiLogger) SetPrefix(prefix string) {\n\tmlog.prefix = []byte(prefix)\n}", "func (p *Periph) LoadPREFIX(n int) uint32 {\n\treturn p.prefix[n].Load()\n}", "func TestGetLeastPopulatedNamespace(t *testing.T) {\n\n\t// Test Data\n\tnamespaceName1 := \"TestNamespaceName1\"\n\tnamespaceName2 := \"TestNamespaceName2\"\n\tnamespaceName3 := \"TestNamespaceName3\"\n\tnamespaceName4 := \"TestNamespaceName4\"\n\tnamespaceName5 := \"TestNamespaceName5\"\n\tnamespaceCount1 := 4\n\tnamespaceCount2 := 2\n\tnamespaceCount3 := 0\n\tnamespaceCount4 := 2\n\tnamespaceCount5 := 1\n\n\t// Create A Mock HubManager\n\tmockHubManager := &MockHubManager{}\n\n\t// Replace The NewHubManagerFromConnectionString Wrapper To Provide Mock Implementation & Defer Reset\n\tnewHubManagerFromConnectionStringWrapperPlaceholder := NewHubManagerFromConnectionStringWrapper\n\tNewHubManagerFromConnectionStringWrapper = func(connectionString string) (managerInterface HubManagerInterface, e error) {\n\t\treturn mockHubManager, nil\n\t}\n\tdefer func() { NewHubManagerFromConnectionStringWrapper = newHubManagerFromConnectionStringWrapperPlaceholder }()\n\n\t// Create A Test Logger\n\tlogger := logtesting.TestLogger(t).Desugar()\n\n\t// Create The Cache's Namespace Map\n\tnamespaceMap := make(map[string]*Namespace)\n\tnamespaceMap[namespaceName1], _ = createTestNamespaceWithCount(logger, namespaceName1, namespaceCount1)\n\tnamespaceMap[namespaceName2], _ = createTestNamespaceWithCount(logger, namespaceName2, namespaceCount2)\n\tnamespaceMap[namespaceName3], _ = createTestNamespaceWithCount(logger, namespaceName3, namespaceCount3)\n\tnamespaceMap[namespaceName4], _ = createTestNamespaceWithCount(logger, namespaceName4, namespaceCount4)\n\tnamespaceMap[namespaceName5], _ = createTestNamespaceWithCount(logger, namespaceName5, namespaceCount5)\n\n\t// Create A Cache To Test\n\tcache := &Cache{\n\t\tlogger: logger,\n\t\tnamespaceMap: namespaceMap,\n\t}\n\n\t// Perform The Test\n\tnamespace := cache.GetLeastPopulatedNamespace()\n\n\t// Verify Results\n\tassert.NotNil(t, namespace)\n\tassert.Equal(t, namespaceName3, namespace.Name)\n\tassert.Equal(t, namespaceCount3, namespace.Count)\n}", "func (p *Parser) registerPrefix(tokenType token.TokenType, fn PrefixParseFn) {\n\tp.prefixParseFns[tokenType] = fn\n}", "func TestMatchByPrefix(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\ttestData := []struct {\n\t\tkey proto.Key\n\t\texpConfig interface{}\n\t}{\n\t\t{proto.KeyMin, config1},\n\t\t{proto.Key(\"\\x01\"), config1},\n\t\t{proto.Key(\"/db\"), config1},\n\t\t{proto.Key(\"/db1\"), config2},\n\t\t{proto.Key(\"/db1/a\"), config2},\n\t\t{proto.Key(\"/db1/table1\"), config3},\n\t\t{proto.Key(\"/db1/table\\xff\"), config3},\n\t\t{proto.Key(\"/db2\"), config1},\n\t\t{proto.Key(\"/db3\"), config4},\n\t\t{proto.Key(\"/db3\\xff\"), config4},\n\t\t{proto.Key(\"/db5\"), config1},\n\t\t{proto.Key(\"/xfe\"), config1},\n\t\t{proto.Key(\"/xff\"), config1},\n\t}\n\tfor i, test := range testData {\n\t\tpc := pcc.MatchByPrefix(test.key)\n\t\tif test.expConfig != pc.Config {\n\t\t\tt.Errorf(\"%d: expected config %v for %q; got %v\", i, test.expConfig, test.key, pc.Config)\n\t\t}\n\t}\n}", "func (_m *MockStore) Init(opts ...store.Option) error {\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(...store.Option) error); ok {\n\t\tr0 = rf(opts...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (p *Parser) registerPrefix(tokenType token.TokenType, fn prefixParseFn) {\n\tp.prefixParseFns[tokenType] = fn\n}", "func(m *QueryMatch) WithPrefixLength(_length int){\n\tm.Options[\"prefix_length\"]=_length\n\n}", "func (g *Gopher) SetPrefix(s string) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tg.prefix = s\n}", "func Prefix(value string) Option {\n\treturn addParam(\"prefix\", value)\n}", "func Prefix(prefix string) Option {\n\treturn func(o *options) {\n\t\to.prefix = prefix\n\t}\n}", "func (tbl RecordTable) WithPrefix(pfx string) RecordTable {\n\ttbl.name.Prefix = pfx\n\treturn tbl\n}", "func (_m *KV) ListMap(prefix string, offset string) (map[string]interface{}, error) {\n\tret := _m.Called(prefix, offset)\n\n\tvar r0 map[string]interface{}\n\tif rf, ok := ret.Get(0).(func(string, string) map[string]interface{}); ok {\n\t\tr0 = rf(prefix, offset)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]interface{})\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(prefix, offset)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func resourceNetboxIpamPrefixCreate(d *schema.ResourceData, meta interface{}) error {\n\tnetboxClient := meta.(*ProviderNetboxClient).client\n\n\tprefix := d.Get(\"prefix\").(string)\n\tdescription := d.Get(\"description\").(string)\n\tvrfID := int64(d.Get(\"vrf_id\").(int))\n\tisPool := d.Get(\"is_pool\").(bool)\n\t//status := d.Get(\"status\").(string)\n\ttenantID := int64(d.Get(\"tenant_id\").(int))\n\n\tvar parm = ipam.NewIPAMPrefixesCreateParams().WithData(\n\t\t&models.PrefixCreateUpdate{\n\t\t\tPrefix: &prefix,\n\t\t\tDescription: description,\n\t\t\tIsPool: isPool,\n\t\t\tTags: []string{},\n\t\t\tVrf: vrfID,\n\t\t\tTenant: tenantID,\n\t\t},\n\t)\n\n\tlog.Debugf(\"Executing IPAMPrefixesCreate against Netbox: %v\", parm)\n\n\tout, err := netboxClient.IPAM.IPAMPrefixesCreate(parm, nil)\n\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to execute IPAMPrefixesCreate: %v\", err)\n\n\t\treturn err\n\t}\n\n\t// TODO Probably a better way to parse this ID\n\td.SetId(fmt.Sprintf(\"ipam/prefix/%d\", out.Payload.ID))\n\td.Set(\"prefix_id\", out.Payload.ID)\n\n\tlog.Debugf(\"Done Executing IPAMPrefixesCreate: %v\", out)\n\n\treturn nil\n}", "func (m *Model) FieldsPrefix(prefix string, fieldNamesOrMapStruct ...interface{}) *Model {\n\tfields := m.getFieldsFrom(fieldNamesOrMapStruct...)\n\tif len(fields) == 0 {\n\t\treturn m\n\t}\n\tgstr.PrefixArray(fields, prefix+\".\")\n\treturn m.appendFieldsByStr(gstr.Join(fields, \",\"))\n}", "func RunFeatureStorePrefixIndependenceTests(t *testing.T,\n\tmakeStoreWithPrefix func(string) (ld.FeatureStore, error),\n\tclearExistingData func() error) {\n\n\trunWithPrefixes := func(t *testing.T, name string, test func(*testing.T, ld.FeatureStore, ld.FeatureStore)) {\n\t\terr := clearExistingData()\n\t\trequire.NoError(t, err)\n\t\tstore1, err := makeStoreWithPrefix(\"aaa\")\n\t\trequire.NoError(t, err)\n\t\tstore2, err := makeStoreWithPrefix(\"bbb\")\n\t\trequire.NoError(t, err)\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttest(t, store1, store2)\n\t\t})\n\t}\n\n\trunWithPrefixes(t, \"Init\", func(t *testing.T, store1 ld.FeatureStore, store2 ld.FeatureStore) {\n\t\tassert.False(t, store1.Initialized())\n\t\tassert.False(t, store2.Initialized())\n\n\t\titem1a := &MockDataItem{Key: \"flag-a\", Version: 1}\n\t\titem1b := &MockDataItem{Key: \"flag-b\", Version: 1}\n\t\titem2a := &MockDataItem{Key: \"flag-a\", Version: 2}\n\t\titem2c := &MockDataItem{Key: \"flag-c\", Version: 2}\n\n\t\tdata1 := makeMockDataMap(item1a, item1b)\n\t\tdata2 := makeMockDataMap(item2a, item2c)\n\n\t\terr := store1.Init(data1)\n\t\trequire.NoError(t, err)\n\n\t\tassert.True(t, store1.Initialized())\n\t\tassert.False(t, store2.Initialized())\n\n\t\terr = store2.Init(data2)\n\t\trequire.NoError(t, err)\n\n\t\tassert.True(t, store1.Initialized())\n\t\tassert.True(t, store2.Initialized())\n\n\t\tnewItems1, err := store1.All(MockData)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, data1[MockData], newItems1)\n\n\t\tnewItem1a, err := store1.Get(MockData, item1a.Key)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, item1a, newItem1a)\n\n\t\tnewItem1b, err := store1.Get(MockData, item1b.Key)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, item1b, newItem1b)\n\n\t\tnewItems2, err := store2.All(MockData)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, data2[MockData], newItems2)\n\n\t\tnewItem2a, err := store2.Get(MockData, item2a.Key)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, item2a, newItem2a)\n\n\t\tnewItem2c, err := store2.Get(MockData, item2c.Key)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, item2c, newItem2c)\n\t})\n\n\trunWithPrefixes(t, \"Upsert/Delete\", func(t *testing.T, store1 ld.FeatureStore, store2 ld.FeatureStore) {\n\t\tassert.False(t, store1.Initialized())\n\t\tassert.False(t, store2.Initialized())\n\n\t\tkey := \"flag\"\n\t\titem1 := &MockDataItem{Key: key, Version: 1}\n\t\titem2 := &MockDataItem{Key: key, Version: 2}\n\n\t\t// Insert the one with the higher version first, so we can verify that the version-checking logic\n\t\t// is definitely looking in the right namespace\n\t\terr := store2.Upsert(MockData, item2)\n\t\trequire.NoError(t, err)\n\t\terr = store1.Upsert(MockData, item1)\n\t\trequire.NoError(t, err)\n\n\t\tnewItem1, err := store1.Get(MockData, key)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, item1, newItem1)\n\n\t\tnewItem2, err := store2.Get(MockData, key)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, item2, newItem2)\n\n\t\terr = store1.Delete(MockData, key, 2)\n\t\trequire.NoError(t, err)\n\n\t\tnewItem1a, err := store1.Get(MockData, key)\n\t\trequire.NoError(t, err)\n\t\tassert.Nil(t, newItem1a)\n\t})\n}", "func (_m *TranslationKeyStore) CacheOnStart() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func SetPrefix(prefix string) Option {\n\treturn func(g *Generator) error {\n\t\tg.Prefix = prefix\n\t\treturn nil\n\t}\n}", "func (m *MockIModel) MapFromByFieldName(src interface{}) interfaces.IModel {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"MapFromByFieldName\", src)\n\tret0, _ := ret[0].(interfaces.IModel)\n\treturn ret0\n}", "func Test_Init(t *testing.T) {\n\tscc := new(SmartContract)\n\tstub := shim.NewMockStub(\"hcdm\", scc)\n\n\t// Init A=123 B=234\n\tcheckInit(t, stub)\n\n}", "func SetPrefix(prefix string) string {\n\tdefer logger.SetPrefix(prefix)\n\told := tags[0]\n\ttags[0] = prefix\n\treturn old\n}", "func ResetPrefix(in chan int,\n\tout chan int,\n\treset chan int,\n\tinitial int) {\n\tvar v, r int = 0, 0\n\tout <- initial\n\tfor {\n\t\tselect {\n\t\tcase r = <-reset:\n\t\t\t<-in\n\t\t\tout <- r\n\t\tcase v = <-in:\n\t\t\tout <- v\n\t\t}\n\t}\n}", "func (p *Parser) registerPrefix(tokenType token.Type, fn prefixParseFn) {\n\tp.prefixParseFns[tokenType] = fn\n}", "func (m *Member) SetTablePrefix(prefix string) {\r\n\tm.ecosystem = converter.StrToInt64(prefix)\r\n}", "func (puo *PrenameUpdateOne) SetPrefix(s string) *PrenameUpdateOne {\n\tpuo.mutation.SetPrefix(s)\n\treturn puo\n}", "func (f *MemKv) newPrefixWatcher(ctx context.Context, prefix string, fromVersion string) (*watcher, error) {\n\tif !strings.HasSuffix(prefix, \"/\") {\n\t\tprefix += \"/\"\n\t}\n\treturn f.watch(ctx, prefix, fromVersion, true)\n}", "func instPrefix(b byte, mode int) (Inst, error) {\n\t// When tracing it is useful to see what called instPrefix to report an error.\n\tif trace {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"%s:%d\\n\", file, line)\n\t}\n\tp := Prefix(b)\n\tswitch p {\n\tcase PrefixDataSize:\n\t\tif mode == 16 {\n\t\t\tp = PrefixData32\n\t\t} else {\n\t\t\tp = PrefixData16\n\t\t}\n\tcase PrefixAddrSize:\n\t\tif mode == 32 {\n\t\t\tp = PrefixAddr16\n\t\t} else {\n\t\t\tp = PrefixAddr32\n\t\t}\n\t}\n\t// Note: using composite literal with Prefix key confuses 'bundle' tool.\n\tinst := Inst{Len: 1}\n\tinst.Prefix = Prefixes{p}\n\treturn inst, nil\n}", "func (c CidStruct) Prefix() Prefix {\n\tdec, _ := mh.Decode(c.hash) // assuming we got a valid multiaddr, this will not error\n\treturn Prefix{\n\t\tMhType: dec.Code,\n\t\tMhLength: dec.Length,\n\t\tVersion: c.version,\n\t\tCodec: c.codec,\n\t}\n}", "func PrefixDetector(prefix string, handler Handler) Detector {\n\treturn Detector{\n\t\tNeeded: len([]byte(prefix)),\n\t\tTest: func(b []byte) bool { return string(b) == prefix },\n\t\tHandler: handler,\n\t}\n}", "func (m *margopher) getRandomPrefix(prefix [2]string) [2]string {\n\t// By default, Go orders keys randomly for maps\n\tfor key := range m.states {\n\t\tif key != prefix {\n\t\t\tprefix = key\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn prefix\n}", "func ExampleBucket_MapPrefix() {\n\tbx, _ := buckets.Open(tempfile())\n\tdefer os.Remove(bx.Path())\n\tdefer bx.Close()\n\n\t// Create a new things bucket.\n\tthings, _ := bx.New([]byte(\"things\"))\n\n\t// Setup items to insert.\n\titems := []struct {\n\t\tKey, Value []byte\n\t}{\n\t\t{[]byte(\"A\"), []byte(\"1\")}, // `A` prefix match\n\t\t{[]byte(\"AA\"), []byte(\"2\")}, // match\n\t\t{[]byte(\"AAA\"), []byte(\"3\")}, // match\n\t\t{[]byte(\"AAB\"), []byte(\"2\")}, // match\n\t\t{[]byte(\"B\"), []byte(\"O\")},\n\t\t{[]byte(\"BA\"), []byte(\"0\")},\n\t\t{[]byte(\"BAA\"), []byte(\"0\")},\n\t}\n\n\t// Insert 'em.\n\tif err := things.Insert(items); err != nil {\n\t\tfmt.Printf(\"could not insert items in `things` bucket: %v\\n\", err)\n\t}\n\n\t// Now collect each item whose key starts with \"A\".\n\tprefix := []byte(\"A\")\n\n\t// Setup slice of items.\n\ttype item struct {\n\t\tKey, Value []byte\n\t}\n\tresults := []item{}\n\n\t// Anon func to map over matched keys.\n\tdo := func(k, v []byte) error {\n\t\tresults = append(results, item{k, v})\n\t\treturn nil\n\t}\n\n\tif err := things.MapPrefix(do, prefix); err != nil {\n\t\tfmt.Printf(\"could not map items with prefix %s: %v\\n\", prefix, err)\n\t}\n\n\tfor _, item := range results {\n\t\tfmt.Printf(\"%s -> %s\\n\", item.Key, item.Value)\n\t}\n\t// Output:\n\t// A -> 1\n\t// AA -> 2\n\t// AAA -> 3\n\t// AAB -> 2\n}", "func setPrefix(prefix string, m map[string]string) map[string]string {\n\tn := make(map[string]string)\n\tfor k, v := range m {\n\t\tn[prefix+k] = prefix + v\n\t}\n\n\treturn n\n}", "func (m *MockprefixListGetter) CloudFrontManagedPrefixListID() (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CloudFrontManagedPrefixListID\")\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func PrefixRenamer(prefix string) Renamer {\n\treturn func(name string) string {\n\t\treturn prefix + name\n\t}\n}", "func WithPrefix(prefix string) OptFunc {\n\treturn func(l *Logger) {\n\t\tl.SetPrefix(prefix)\n\t}\n}", "func checkPrefix(t *testing.T, prefix string, targetString, networkName string) {\n\tif strings.Index(targetString, prefix) != 0 {\n\t\tt.Logf(\"Address prefix mismatch for <%s>: expected <%s> received <%s>\",\n\t\t\tnetworkName, prefix, targetString)\n\t\tt.FailNow()\n\t}\n}", "func newPrefix(targetPrefix []string) (prefixes []models.Prefix, err error) {\n\tprefixes = make([]models.Prefix, len(targetPrefix))\n\tfor i, cidr := range targetPrefix {\n\t\tprefix, err := models.NewPrefix(cidr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tprefixes[i] = prefix\n\t}\n\treturn\n}", "func (rad *Radix) Prefix(prefix string) *list.List {\n\trad.lock.Lock()\n\tdefer rad.lock.Unlock()\n\tl := list.New()\n\tn, _ := rad.root.lookup([]rune(prefix))\n\tif n == nil {\n\t\treturn l\n\t}\n\tn.addToList(l)\n\treturn l\n}", "func WithPrefix(value string) Option {\n\treturn func(opts *options) error {\n\t\topts.prefix = value\n\t\t// No error\n\t\treturn nil\n\t}\n}", "func (b UpdateBuilder) Prefix(sql string, args ...interface{}) UpdateCondition {\n\treturn builder.Append(b, \"Prefixes\", Expr(sql, args...)).(UpdateBuilder)\n}", "func (r *Router) UsePrefix(prefix string) {\n\tr.prefix = prefix\n}" ]
[ "0.6717542", "0.6362641", "0.632077", "0.63053375", "0.62258774", "0.61870056", "0.61736923", "0.6170196", "0.6011685", "0.5768026", "0.5754696", "0.5752619", "0.5746645", "0.57363063", "0.5678546", "0.566307", "0.55710304", "0.55613947", "0.5543519", "0.5543519", "0.5521867", "0.55019194", "0.54716104", "0.5463708", "0.5451917", "0.5451917", "0.54178077", "0.5391116", "0.5378699", "0.5375881", "0.53675646", "0.53669506", "0.5356597", "0.5286839", "0.5269165", "0.52492285", "0.5228955", "0.52216965", "0.52056557", "0.5198797", "0.5185394", "0.5170899", "0.51452357", "0.51384026", "0.513595", "0.51332515", "0.5133153", "0.5124522", "0.5105012", "0.51047814", "0.5097505", "0.5097454", "0.50882155", "0.5073039", "0.5068716", "0.50627124", "0.5061966", "0.5056594", "0.505047", "0.5047164", "0.5043996", "0.5039599", "0.50365317", "0.5027658", "0.5026967", "0.5021851", "0.5013879", "0.5013556", "0.5010582", "0.50100356", "0.4986374", "0.49840233", "0.49780408", "0.49729815", "0.49698785", "0.49657804", "0.49647677", "0.49644896", "0.49630496", "0.49602112", "0.49570167", "0.49552354", "0.4954886", "0.49513185", "0.49471903", "0.49468493", "0.49423587", "0.49371412", "0.49341306", "0.49226007", "0.4915272", "0.49135593", "0.49099725", "0.49086684", "0.49076828", "0.49049369", "0.49038795", "0.49036318", "0.48959792", "0.4892568" ]
0.6460963
1
Name provides a mock function with given fields:
func (_m *Plugin) Name() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Test_Client_MapByName(t *testing.T) {\n\t// should map by name\n\tret := mockClient.MapByName(\"South Korea\")\n\tassert.Equal(t, ret.Name, \"South Korea\")\n\tassert.Equal(t, ret.Alpha2, \"KR\")\n\tassert.Equal(t, ret.Alpha3, \"KOR\")\n\tassert.Equal(t, ret.Capital, \"Seoul\")\n\tassert.Equal(t, ret.Currency, []string{\"KRW\"})\n\tassert.Equal(t, ret.CallingCode, []string{\"82\"})\n\tassert.Equal(t, ret.Region, \"Asia\")\n\tassert.Equal(t, ret.Subregion, \"Eastern Asia\")\n\n\t// should be able to map different variations of name\n\tret = mockClient.MapByName(\"south korea\")\n\tassert.Equal(t, ret.Name, \"South Korea\")\n\n\tret = mockClient.MapByName(\"대한민국\")\n\tassert.Equal(t, ret.Name, \"South Korea\")\n\n\t// should return nil when you try to map names not commonly used\n\tret = mockClient.MapByName(\"southkorea\")\n\tassert.Nil(t, ret)\n}", "func (_m *Forge) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (_m *mockUpdater) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (_m *FieldReader) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (_m *MockConnection) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (_m *MockStore) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (_m *ServerConnexion) Rename(sSource string, sDestination string) error {\n\tret := _m.Called(sSource, sDestination)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string) error); ok {\n\t\tr0 = rf(sSource, sDestination)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockCall) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockCache) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockAPI) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *Service) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (_m *IDepTool) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (_m *LogPollerWrapper) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (c *Mock) Name() string {\n\treturn c.FakeName()\n}", "func (_m *T) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (m *MockProc) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockAccessPolicyMeshEnforcer) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockIModel) MapFromByFieldName(src interface{}) interfaces.IModel {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"MapFromByFieldName\", src)\n\tret0, _ := ret[0].(interfaces.IModel)\n\treturn ret0\n}", "func (_m *TestingT) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (m *MockDynamicCertPrivate) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (p *PrincipalMock) Name() string {\n\treturn p.NameFunc()\n}", "func (m *MockMember) Name() string {\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockCandidatePropertyGetter) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *EventBroadcaster) Name() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (m *MockSpaceStorage) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *System) Name() (types.Text, error) {\n\tret := _m.Called()\n\n\tvar r0 types.Text\n\tif rf, ok := ret.Get(0).(func() types.Text); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(types.Text)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = rf()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockIModel) MapToByFieldName(dest interface{}) interfaces.IModel {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"MapToByFieldName\", dest)\n\tret0, _ := ret[0].(interfaces.IModel)\n\treturn ret0\n}", "func (m *MockFile) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (s *MyTestStruct) Name() string {\n\treturn s.field_Name\n}", "func (m *MockUpstreamIntf) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (p *TestProvider) Name() string {\n\treturn p.Called().String(0)\n}", "func (mock *PluginerMock) NameCalls() []struct {\n} {\n\tvar calls []struct {\n\t}\n\tmock.lockName.RLock()\n\tcalls = mock.calls.Name\n\tmock.lockName.RUnlock()\n\treturn calls\n}", "func (m *MockWatcher) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *MockAuth) AuthName() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (m *MockServiceReconciler) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *MockUserRepositoryProvider) GetByNameAndSurname(name string, surname string) (*model.User, error) {\n\tret := _m.Called(name, surname)\n\n\tvar r0 *model.User\n\tif rf, ok := ret.Get(0).(func(string, string) *model.User); ok {\n\t\tr0 = rf(name, surname)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(name, surname)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (u *MockUserRecord) RealName() string { return \"\" }", "func (c *MockRemoteWriteClient) Name() string { return \"\" }", "func (m *MockFileInfo) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *IReplyType) GetName() agentcontracts.ResultType {\n\tret := _m.Called()\n\n\tvar r0 agentcontracts.ResultType\n\tif rf, ok := ret.Get(0).(func() agentcontracts.ResultType); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(agentcontracts.ResultType)\n\t}\n\n\treturn r0\n}", "func (m *MockMachine) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (fp MockProvider) Name() string {\n\treturn fp.faux.Name()\n}", "func (m *MockBaseEvent) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *MockDatabase) GetUserByName(name string) *model.User {\n\tret := _m.Called(name)\n\n\tvar r0 *model.User\n\tif rf, ok := ret.Get(0).(func(string) *model.User); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.User)\n\t\t}\n\t}\n\n\treturn r0\n}", "func Test_Client_MapByAlpha2(t *testing.T) {\n\tret := mockClient.MapByAlpha2(\"SG\")\n\tassert.Equal(t, ret.Name, \"Singapore\")\n}", "func (m *MockDao) ChangeName(info *model.Info) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ChangeName\", info)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *VolumeCreator) CheckName(ctx context.Context, name string) error {\n\tret := _m.Called(ctx, name)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = rf(ctx, name)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockHistoryManager) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *RedsyncConn) Get(name string) (string, error) {\n\tret := _m.Called(name)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(name)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func mockOlt() *fields {\n\tdh := newMockDeviceHandler()\n\tnewOlt := &fields{}\n\tnewOlt.deviceHandlers = map[string]*DeviceHandler{}\n\tnewOlt.deviceHandlers[dh.device.Id] = dh\n\treturn newOlt\n}", "func (m *MockRemotePeer) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *SecretClient) Get(name string) (string, error) {\n\tret := _m.Called(name)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(name)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockShardManager) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *Entity) EntityName() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (_m *UserRepositoryI) CheckNamePassword(tx database.TransactionI, name string, password string) (int32, *models.UserPublicInfo, error) {\n\tret := _m.Called(tx, name, password)\n\n\tvar r0 int32\n\tif rf, ok := ret.Get(0).(func(database.TransactionI, string, string) int32); ok {\n\t\tr0 = rf(tx, name, password)\n\t} else {\n\t\tr0 = ret.Get(0).(int32)\n\t}\n\n\tvar r1 *models.UserPublicInfo\n\tif rf, ok := ret.Get(1).(func(database.TransactionI, string, string) *models.UserPublicInfo); ok {\n\t\tr1 = rf(tx, name, password)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*models.UserPublicInfo)\n\t\t}\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(database.TransactionI, string, string) error); ok {\n\t\tr2 = rf(tx, name, password)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func Mock(fake string) func() {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\torigin := backend\n\tbackend = fake\n\treturn func() { Mock(origin) }\n}", "func (m *MockPlayer) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *TemplatesRepositoryMock) GetByName(_a0 string) (*Template, error) {\n\tret := _m.Called(_a0)\n\n\tvar r0 *Template\n\tif rf, ok := ret.Get(0).(func(string) *Template); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Template)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *IRepository) Store(name string, age int) error {\n\tret := _m.Called(name, age)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, int) error); ok {\n\t\tr0 = rf(name, age)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockHostEvent) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockTaskManager) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_m *MockORM) Get(name string) (interface{}, bool) {\n\tret := _m.Called(name)\n\n\tvar r0 interface{}\n\tif rf, ok := ret.Get(0).(func(string) interface{}); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(interface{})\n\t\t}\n\t}\n\n\tvar r1 bool\n\tif rf, ok := ret.Get(1).(func(string) bool); ok {\n\t\tr1 = rf(name)\n\t} else {\n\t\tr1 = ret.Get(1).(bool)\n\t}\n\n\treturn r0, r1\n}", "func (_m *ContainerIface) Data(path string, name string) (string, bool) {\n\tret := _m.Called(path, name)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string, string) string); ok {\n\t\tr0 = rf(path, name)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\tvar r1 bool\n\tif rf, ok := ret.Get(1).(func(string, string) bool); ok {\n\t\tr1 = rf(path, name)\n\t} else {\n\t\tr1 = ret.Get(1).(bool)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockClusterEvent) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m MockFileInfo) Name() string {\n\treturn m.name\n}", "func (_m *ContextHandler) Param(name string) string {\n\tret := _m.Called(name)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (r *Radio) Name() string {\n\treturn \"mock\"\n}", "func (m *MockExecutionManager) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (_DetailedTestToken *DetailedTestTokenCaller) Name(opts *bind.CallOpts) (string, error) {\n\tvar out []interface{}\n\terr := _DetailedTestToken.contract.Call(opts, &out, \"name\")\n\n\tif err != nil {\n\t\treturn *new(string), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(string)).(*string)\n\n\treturn out0, err\n\n}", "func awsMockStackID(stackName string) string {\n\tnh := sha1.Sum([]byte(stackName))\n\treturn fmt.Sprintf(\"arn:aws:cloudformation:mock:%012d:stack/%s/%x-%x-%x-%x-%x\",\n\t\tbinary.BigEndian.Uint32(nh[16:20]), stackName, nh[0:4], nh[4:6], nh[6:8], nh[8:10], nh[10:16])\n}", "func (_m *UserRepositoryI) UpdateNamePassword(tx database.TransactionI, user *models.UserPrivateInfo) error {\n\tret := _m.Called(tx, user)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(database.TransactionI, *models.UserPrivateInfo) error); ok {\n\t\tr0 = rf(tx, user)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *ChannelStore) GetByName(team_id string, name string, allowFromCache bool) (*model.Channel, error) {\n\tret := _m.Called(team_id, name, allowFromCache)\n\n\tvar r0 *model.Channel\n\tif rf, ok := ret.Get(0).(func(string, string, bool) *model.Channel); ok {\n\t\tr0 = rf(team_id, name, allowFromCache)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.Channel)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string, bool) error); ok {\n\t\tr1 = rf(team_id, name, allowFromCache)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Storage) DisplayName() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (m *MockDomainManager) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockUserClient) GetUserByName(ctx context.Context, in *pb.UserRequest, opts ...grpc.CallOption) (*pb.UserReply, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"GetUserByName\", varargs...)\n\tret0, _ := ret[0].(*pb.UserReply)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockRepository) GetByName(name string) (*models.Film, bool) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetByName\", name)\n\tret0, _ := ret[0].(*models.Film)\n\tret1, _ := ret[1].(bool)\n\treturn ret0, ret1\n}", "func (m *MockRestAPIDiscoveryReconciler) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func Test_Client_MapByCallingCode(t *testing.T) {\n\tret := mockClient.MapByCallingCode(\"65\")\n\tassert.Equal(t, ret[0].Name, \"Singapore\")\n}", "func (_m *TeamStore) GetByNames(name []string) ([]*model.Team, error) {\n\tret := _m.Called(name)\n\n\tvar r0 []*model.Team\n\tif rf, ok := ret.Get(0).(func([]string) []*model.Team); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*model.Team)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func([]string) error); ok {\n\t\tr1 = rf(name)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockIUserService) QueryUserByName(name string) (*model.UserDB, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"QueryUserByName\", name)\n\tret0, _ := ret[0].(*model.UserDB)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Test_Client_MapByAlpha3(t *testing.T) {\n\tret := mockClient.MapByAlpha3(\"SGP\")\n\tassert.Equal(t, ret.Name, \"Singapore\")\n}", "func (m *MockInfraEnvEvent) GetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *Mock) ModuleName() string {\n\targs := m.Called()\n\treturn args.Get(0).(string)\n}", "func (m *Mocker) Mock(name, method, path string, f MockFunc) {\n\tpath = strings.TrimRight(path, \"/\")\n\n\tif m.handlers[path] == nil {\n\t\tm.handlers[path] = make(map[string][]Handler)\n\t}\n\n\th := Handler{\n\t\tName: name,\n\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif f != nil {\n\t\t\t\tf(w, r, m.t)\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t}\n\t\t}),\n\t}\n\n\tm.handlers[path][method] = append(m.handlers[path][method], h)\n}", "func (_m *MockUserRepositoryProvider) CheckIfExistWithNameAndSurname(name string, surname string) (bool, error) {\n\tret := _m.Called(name, surname)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(string, string) bool); ok {\n\t\tr0 = rf(name, surname)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(name, surname)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *SecretClient) Set(name string, val string) error {\n\tret := _m.Called(name, val)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string) error); ok {\n\t\tr0 = rf(name, val)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Asconn) RequestInfo(_a0 ...string) (map[string]string, aerospike.Error) {\n\t_va := make([]interface{}, len(_a0))\n\tfor _i := range _a0 {\n\t\t_va[_i] = _a0[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 map[string]string\n\tif rf, ok := ret.Get(0).(func(...string) map[string]string); ok {\n\t\tr0 = rf(_a0...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\n\tvar r1 aerospike.Error\n\tif rf, ok := ret.Get(1).(func(...string) aerospike.Error); ok {\n\t\tr1 = rf(_a0...)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(aerospike.Error)\n\t\t}\n\t}\n\n\treturn r0, r1\n}", "func (_m *TeamStore) GetByName(name string) (*model.Team, error) {\n\tret := _m.Called(name)\n\n\tvar r0 *model.Team\n\tif rf, ok := ret.Get(0).(func(string) *model.Team); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.Team)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(name)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func DummyStringGetterFunction(name string) string {\n\treturn name\n}", "func FieldByName(name string) func(interface{}) interface{} {\n\treturn func(record interface{}) interface{} {\n\t\treturn reflect.ValueOf(record).FieldByName(name).Interface()\n\t}\n}", "func (_m *Pattern) Names() []string {\n\tret := _m.Called()\n\n\tvar r0 []string\n\tif rf, ok := ret.Get(0).(func() []string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]string)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (_m *UserRepositoryI) FetchNamePassword(tx database.TransactionI, userID int32) (*models.UserPrivateInfo, error) {\n\tret := _m.Called(tx, userID)\n\n\tvar r0 *models.UserPrivateInfo\n\tif rf, ok := ret.Get(0).(func(database.TransactionI, int32) *models.UserPrivateInfo); ok {\n\t\tr0 = rf(tx, userID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*models.UserPrivateInfo)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(database.TransactionI, int32) error); ok {\n\t\tr1 = rf(tx, userID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *mockFile) Name() string {\n\treturn m.name\n}", "func TestCallFunc_arguments(t *testing.T) {\n\n}", "func (m *MockManager) SerializeReleaseName(arg0 string) error {\n\tret := m.ctrl.Call(m, \"SerializeReleaseName\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockResourceHandle) ResourceName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ResourceName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockHandle) LinkByName(name string) (netlink.Link, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LinkByName\", name)\n\tret0, _ := ret[0].(netlink.Link)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func IsMock(name string) bool {\n\treturn name == Mock\n}", "func (_m *MockGrabX) Get(ctx context.Context, name string) string {\n\tret := _m.Called(ctx, name)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(context.Context, string) string); ok {\n\t\tr0 = rf(ctx, name)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func (rf RadioStruct) Name(name string) RadioStruct {\n\trf.NameField = name\n\treturn rf\n}" ]
[ "0.615958", "0.61477035", "0.6127143", "0.61256045", "0.60194963", "0.60167503", "0.59380925", "0.5935587", "0.5927315", "0.5859304", "0.5842395", "0.58246493", "0.5813304", "0.58018726", "0.57960755", "0.5731718", "0.5703874", "0.5667328", "0.56665623", "0.5655371", "0.5646953", "0.5646458", "0.56400824", "0.5633371", "0.5564311", "0.5554881", "0.55187875", "0.550049", "0.54987955", "0.5497103", "0.5493209", "0.54925567", "0.5487127", "0.5466318", "0.5462905", "0.54597974", "0.5435461", "0.5399896", "0.53869694", "0.53462124", "0.53382397", "0.5336069", "0.533493", "0.53140676", "0.53090703", "0.53021055", "0.52798975", "0.52686375", "0.5260407", "0.52431923", "0.5238596", "0.52317834", "0.5226228", "0.52229375", "0.5220964", "0.5220055", "0.52124625", "0.52046424", "0.52034885", "0.5202557", "0.5196451", "0.5192429", "0.51900774", "0.5174395", "0.5146918", "0.5143759", "0.5129779", "0.5127574", "0.51122034", "0.5108069", "0.5106477", "0.509877", "0.50932205", "0.50919944", "0.50916284", "0.5090394", "0.5090366", "0.50898904", "0.5088019", "0.508493", "0.5083889", "0.507087", "0.50541365", "0.5054133", "0.50520074", "0.50502634", "0.50421584", "0.50413203", "0.504075", "0.5040661", "0.50402504", "0.5037349", "0.503691", "0.503249", "0.50316215", "0.5029648", "0.5017189", "0.5016887", "0.5016458", "0.501539" ]
0.5819971
12
getDataset collects the various information we want to report about SELinux and returns a separate dataset for each type of output: basicData: Overall SELinux status whether it's running, what mode it's in, etc. policyData: Individual SELinux policy flags a highlevel overview of SELinux configuration policyModules: Listing of policy modules in use and which version of modules are active
func (self *SELinuxPlugin) getDataset() (basicData agent.PluginInventoryDataset, policyData agent.PluginInventoryDataset, policyModules agent.PluginInventoryDataset, err error) { // Get basic selinux status data using sestatus. If selinux isn't enabled or installed, this will fail. output, err := helpers.RunCommand("sestatus", "", "-b") if err != nil { return } if basicData, policyData, err = self.parseSestatusOutput(output); err != nil { return } if self.enableSemodule { // Get versions of policy modules installed using semodule if output, err = helpers.RunCommand("semodule", "", "-l"); err != nil { return } if policyModules, err = self.parseSemoduleOutput(output); err != nil { return } } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetDatasets(a *config.AppContext) ([]models.Dataset, error) {\n\tresults := []models.Dataset{}\n\terr := a.Db.Where(\"user_id = ?\", a.Session.User.ID).Find(&results).Error\n\treturn results, err\n}", "func (bq BQClientStruct) getDatasets() string {\n\treturn \"real list of datasets\"\n}", "func GetDatasets(data string, caller driver.Syscaller) ([]ransuq.Dataset, error) {\n\tvar datasets []ransuq.Dataset\n\n\tflatplate3_06 := newFlatplate(3e6, 0, \"med\", \"atwall\")\n\tflatplate4_06 := newFlatplate(4e6, 0, \"med\", \"atwall\")\n\tflatplate5_06 := newFlatplate(5e6, 0, \"med\", \"atwall\")\n\tflatplate6_06 := newFlatplate(6e6, 0, \"med\", \"atwall\")\n\tflatplate7_06 := newFlatplate(7e6, 0, \"med\", \"atwall\")\n\n\tflatplate3_06_BL := newFlatplate(3e6, 0, \"med\", \"justbl\")\n\tflatplate4_06_BL := newFlatplate(4e6, 0, \"med\", \"justbl\")\n\tflatplate5_06_BL := newFlatplate(5e6, 0, \"med\", \"justbl\")\n\tflatplate6_06_BL := newFlatplate(6e6, 0, \"med\", \"justbl\")\n\tflatplate7_06_BL := newFlatplate(7e6, 0, \"med\", \"justbl\")\n\n\tblIgnoreNames, blIgnoreFunc := GetIgnoreData(\"justbl\")\n\n\t// TODO: Move these to a function\n\tflatplateLoc := filepath.Join(gopath, \"data\", \"ransuq\", \"flatplate\", \"med\")\n\n\tflatplate3_06_budget_BL_Loc := filepath.Join(flatplateLoc, \"Flatplate_Re_3e_06\", \"turb_flatplate_sol_budget.dat\")\n\tflatplate3_06_budget_BL := &datawrapper.CSV{\n\t\tLocation: flatplate3_06_budget_BL_Loc,\n\t\tName: \"Flat306Budget\",\n\t\tIgnoreFunc: blIgnoreFunc,\n\t\tIgnoreNames: blIgnoreNames,\n\t\tFieldMap: budgetFieldMap,\n\t}\n\n\tflatplate5_06_budget_BL_Loc := filepath.Join(flatplateLoc, \"Flatplate_Re_5e_06\", \"turb_flatplate_sol_budget.dat\")\n\tflatplate5_06_budget_BL := &datawrapper.CSV{\n\t\tLocation: flatplate5_06_budget_BL_Loc,\n\t\tName: \"Flat506Budget\",\n\t\tIgnoreFunc: blIgnoreFunc,\n\t\tIgnoreNames: blIgnoreNames,\n\t\tFieldMap: budgetFieldMap,\n\t}\n\n\tflatplate7_06_budget_BL_Loc := filepath.Join(flatplateLoc, \"Flatplate_Re_7e_06\", \"turb_flatplate_sol_budget.dat\")\n\tflatplate7_06_budget_BL := &datawrapper.CSV{\n\t\tLocation: flatplate7_06_budget_BL_Loc,\n\t\tName: \"Flat706Budget\",\n\t\tIgnoreFunc: blIgnoreFunc,\n\t\tIgnoreNames: blIgnoreNames,\n\t\tFieldMap: budgetFieldMap,\n\t}\n\n\tflatplateSweep := []ransuq.Dataset{flatplate3_06, flatplate4_06, flatplate5_06, flatplate6_06, flatplate7_06}\n\tmultiFlatplate := []ransuq.Dataset{flatplate3_06, flatplate5_06, flatplate7_06}\n\n\tswitch data {\n\tdefault:\n\t\treturn nil, Missing{\n\t\t\tPrefix: \"dataset setting not found\",\n\t\t\tOptions: sortedDatasets,\n\t\t}\n\tcase \"none\":\n\t\tdatasets = []ransuq.Dataset{}\n\tcase SingleFlatplate:\n\t\tdatasets = []ransuq.Dataset{flatplate5_06}\n\tcase SingleFlatplateBL:\n\t\tdatasets = []ransuq.Dataset{flatplate5_06_BL}\n\tcase MultiFlatplate:\n\t\tdatasets = multiFlatplate\n\tcase MultiFlatplateBL:\n\t\tdatasets = []ransuq.Dataset{flatplate3_06_BL, flatplate5_06_BL, flatplate7_06_BL}\n\tcase ExtraFlatplate:\n\t\tdatasets = []ransuq.Dataset{newFlatplate(1e6, 0, \"med\", \"atwall\"), newFlatplate(2e6, 0, \"med\", \"atwall\"), newFlatplate(1.5e6, 0, \"med\", \"atwall\")}\n\tcase FlatplateSweep:\n\t\tdatasets = flatplateSweep\n\tcase FlatplateSweepBl:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate3_06_BL,\n\t\t\tflatplate4_06_BL,\n\t\t\tflatplate5_06_BL,\n\t\t\tflatplate6_06_BL,\n\t\t\tflatplate7_06_BL,\n\t\t}\n\tcase SyntheticFlatplateProduction:\n\t\tdatasets = []ransuq.Dataset{synthetic.Production{synthetic.FlatplateBounds}}\n\tcase MultiAndSynthFlatplate:\n\t\tdatasets = []ransuq.Dataset{synthetic.Production{synthetic.FlatplateBounds}}\n\t\tdatasets = append(datasets, flatplateSweep...)\n\tcase SingleRae:\n\t\tdatasets = []ransuq.Dataset{newAirfoil()}\n\tcase LES4:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\t&datawrapper.CSV{\n\t\t\t\tLocation: filepath.Join(gopath, \"data\", \"ransuq\", \"HiFi\", \"exp4_mod.txt\"),\n\t\t\t\tName: \"LES_exp4\",\n\t\t\t\tIgnoreFunc: func([]float64) bool { return false },\n\t\t\t},\n\t\t}\n\t\t// Need to check correctness of this case\n\t\t/*\n\t\t\tcase SingleFlatplateBudget:\n\n\t\t\t\tlocation := filepath.Join(gopath, \"data\", \"ransuq\", \"flatplate\", \"med\", \"Flatplate_Re_5e_06\", \"turb_flatplate_sol_budget.dat\")\n\t\t\t\tdatasets = []ransuq.Dataset{\n\t\t\t\t\t&datawrapper.CSV{\n\t\t\t\t\t\tLocation: location,\n\t\t\t\t\t\tName: \"Flat06Budget\",\n\t\t\t\t\t\tIgnoreFunc: func(d []float64) bool { return d[0] < wallDistIgnore },\n\t\t\t\t\t\tIgnoreNames: []string{\"WallDist\"},\n\t\t\t\t\t\tFieldMap: budgetFieldMap,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t*/\n\tcase MultiFlatplateBudgetBL:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate3_06_budget_BL,\n\t\t\tflatplate5_06_budget_BL,\n\t\t\tflatplate7_06_budget_BL,\n\t\t}\n\tcase DNS5n:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\t&datawrapper.CSV{\n\t\t\t\tLocation: filepath.Join(gopath, \"data\", \"ransuq\", \"HiFi\", \"exp5xn.txt\"),\n\t\t\t\tName: \"DNS5n\",\n\t\t\t\tIgnoreFunc: func([]float64) bool { return false },\n\t\t\t},\n\t\t}\n\tcase LES4Tenth:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\t&datawrapper.CSV{\n\t\t\t\tLocation: filepath.Join(gopath, \"data\", \"ransuq\", \"LES\", \"exp4_mod.txt\"),\n\t\t\t\tName: \"LES_exp4\",\n\t\t\t\tIgnoreFunc: func(a []float64) bool {\n\t\t\t\t\tintpoint := int(a[0])\n\t\t\t\t\treturn (intpoint % 10) != 0\n\t\t\t\t},\n\t\t\t\tIgnoreNames: []string{\"Datapoint\"},\n\t\t\t},\n\t\t}\n\tcase FwNACA0012:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\t&datawrapper.CSV{\n\t\t\t\tLocation: filepath.Join(gopath, \"data\", \"ransuq\", \"RANS_Shivaji\", \"naca0012_fw.dat\"),\n\t\t\t\tName: \"NACA_0012_Shivaji\",\n\t\t\t\tIgnoreFunc: func([]float64) bool {\n\t\t\t\t\treturn false\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\tcase FlatPress:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate3_06,\n\t\t\tflatplate4_06,\n\t\t\tflatplate5_06,\n\t\t\tflatplate6_06,\n\t\t\tflatplate6_06,\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .10, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .03, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .01, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, 0, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.01, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.03, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.10, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"atwall\"),\n\t\t}\n\tcase NacaPressureFlatSmall:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate5_06,\n\t\t\tnewNaca0012(3, \"atwall\"),\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"atwall\"),\n\t\t}\n\tcase NacaPressureFlatMedium:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate3_06,\n\t\t\tflatplate5_06,\n\t\t\tflatplate7_06,\n\t\t\tnewNaca0012(0, \"atwall\"),\n\t\t\tnewNaca0012(6, \"atwall\"),\n\t\t\tnewNaca0012(12, \"atwall\"),\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"atwall\"),\n\t\t}\n\tcase NacaPressureFlatMediumBL:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate3_06_BL,\n\t\t\tflatplate5_06_BL,\n\t\t\tflatplate7_06_BL,\n\t\t\tnewNaca0012(0, \"justbl\"),\n\t\t\tnewNaca0012(6, \"justbl\"),\n\t\t\tnewNaca0012(12, \"justbl\"),\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"justbl\"),\n\t\t}\n\tcase NacaPressureFlatSmallBL:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate5_06_BL,\n\t\t\tnewNaca0012(3, \"justbl\"),\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"justbl\"),\n\t\t}\n\tcase NacaPressureFlat:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate3_06,\n\t\t\tflatplate4_06,\n\t\t\tflatplate5_06,\n\t\t\tflatplate6_06,\n\t\t\tflatplate7_06,\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .10, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .03, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .01, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.01, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.03, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.10, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"atwall\"),\n\t\t\tnewNaca0012(0, \"atwall\"),\n\t\t\tnewNaca0012(1, \"atwall\"),\n\t\t\tnewNaca0012(2, \"atwall\"),\n\t\t\tnewNaca0012(3, \"atwall\"),\n\t\t\tnewNaca0012(4, \"atwall\"),\n\t\t\tnewNaca0012(5, \"atwall\"),\n\t\t\tnewNaca0012(6, \"atwall\"),\n\t\t\tnewNaca0012(7, \"atwall\"),\n\t\t\tnewNaca0012(8, \"atwall\"),\n\t\t\tnewNaca0012(9, \"atwall\"),\n\t\t\tnewNaca0012(10, \"atwall\"),\n\t\t\tnewNaca0012(11, \"atwall\"),\n\t\t\tnewNaca0012(12, \"atwall\"),\n\t\t}\n\tcase NacaPressureFlatBl:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewNaca0012(0, \"justbl\"),\n\t\t\tnewNaca0012(1, \"justbl\"),\n\t\t\tnewNaca0012(2, \"justbl\"),\n\t\t\tnewNaca0012(3, \"justbl\"),\n\t\t\tnewNaca0012(4, \"justbl\"),\n\t\t\tnewNaca0012(5, \"justbl\"),\n\t\t\tnewNaca0012(6, \"justbl\"),\n\t\t\tnewNaca0012(7, \"justbl\"),\n\t\t\tnewNaca0012(8, \"justbl\"),\n\t\t\tnewNaca0012(9, \"justbl\"),\n\t\t\tnewNaca0012(10, \"justbl\"),\n\t\t\tnewNaca0012(11, \"justbl\"),\n\t\t\tnewNaca0012(12, \"justbl\"),\n\t\t\tflatplate3_06_BL,\n\t\t\tflatplate4_06_BL,\n\t\t\tflatplate5_06_BL,\n\t\t\tflatplate6_06_BL,\n\t\t\tflatplate7_06_BL,\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .10, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .03, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .01, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.01, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.03, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.10, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"justbl\"),\n\t\t}\n\tcase PressureBl:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .10, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .03, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .01, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.01, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.03, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.10, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"justbl\"),\n\t\t}\n\tcase FlatPressureBl:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tflatplate3_06_BL,\n\t\t\tflatplate4_06_BL,\n\t\t\tflatplate5_06_BL,\n\t\t\tflatplate6_06_BL,\n\t\t\tflatplate7_06_BL,\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .10, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .03, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, .01, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.01, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.03, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.10, \"med\", \"justbl\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"justbl\"),\n\t\t}\n\tcase SingleNaca0012:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewNaca0012(0, \"atwall\"),\n\t\t}\n\tcase SingleNaca0012Bl:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewNaca0012(0, \"justbl\"),\n\t\t}\n\tcase MultiNaca0012:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewNaca0012(0, \"atwall\"),\n\t\t\tnewNaca0012(3, \"atwall\"),\n\t\t\tnewNaca0012(6, \"atwall\"),\n\t\t\tnewNaca0012(9, \"atwall\"),\n\t\t\tnewNaca0012(12, \"atwall\"),\n\t\t}\n\tcase MultiNaca0012Bl:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewNaca0012(0, \"justbl\"),\n\t\t\tnewNaca0012(3, \"justbl\"),\n\t\t\tnewNaca0012(6, \"justbl\"),\n\t\t\tnewNaca0012(9, \"justbl\"),\n\t\t\tnewNaca0012(12, \"justbl\"),\n\t\t}\n\tcase Naca0012SweepBl:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewNaca0012(0, \"justbl\"),\n\t\t\tnewNaca0012(1, \"justbl\"),\n\t\t\tnewNaca0012(2, \"justbl\"),\n\t\t\tnewNaca0012(3, \"justbl\"),\n\t\t\tnewNaca0012(4, \"justbl\"),\n\t\t\tnewNaca0012(5, \"justbl\"),\n\t\t\tnewNaca0012(6, \"justbl\"),\n\t\t\tnewNaca0012(7, \"justbl\"),\n\t\t\tnewNaca0012(8, \"justbl\"),\n\t\t\tnewNaca0012(9, \"justbl\"),\n\t\t\tnewNaca0012(10, \"justbl\"),\n\t\t\tnewNaca0012(11, \"justbl\"),\n\t\t\tnewNaca0012(12, \"justbl\"),\n\t\t}\n\tcase Naca0012Sweep:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewNaca0012(0, \"atwall\"),\n\t\t\tnewNaca0012(1, \"atwall\"),\n\t\t\tnewNaca0012(2, \"atwall\"),\n\t\t\tnewNaca0012(3, \"atwall\"),\n\t\t\tnewNaca0012(4, \"atwall\"),\n\t\t\tnewNaca0012(5, \"atwall\"),\n\t\t\tnewNaca0012(6, \"atwall\"),\n\t\t\tnewNaca0012(7, \"atwall\"),\n\t\t\tnewNaca0012(8, \"atwall\"),\n\t\t\tnewNaca0012(9, \"atwall\"),\n\t\t\tnewNaca0012(10, \"atwall\"),\n\t\t\tnewNaca0012(11, \"atwall\"),\n\t\t\tnewNaca0012(12, \"atwall\"),\n\t\t}\n\tcase PressureGradientMultiSmall:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewFlatplate(5e6, .30, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .10, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .03, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .01, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, 0, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.01, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.03, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.10, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.30, \"med\", \"atwall\"),\n\t\t}\n\tcase PressureGradientMulti:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewFlatplate(5e6, 30, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, 10, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, 3, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, 1, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, .1, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, 0, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -.1, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -1, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -3, \"med\", \"atwall\"),\n\t\t\tnewFlatplate(5e6, -10, \"med\", \"atwall\"),\n\t\t}\n\tcase OneraM6:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewOneraM6(3.06, \"atwall\"),\n\t\t}\n\tcase OneraM6BL:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewOneraM6(3.06, \"justbl\"),\n\t\t}\n\tcase OneraM6Sweep:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewOneraM6(3.06, \"atwall\"),\n\t\t\tnewOneraM6(1, \"atwall\"),\n\t\t\tnewOneraM6(2, \"atwall\"),\n\t\t\tnewOneraM6(0, \"atwall\"),\n\t\t\tnewOneraM6(4, \"atwall\"),\n\t\t}\n\tcase MultiOneraM6:\n\t\tdatasets = []ransuq.Dataset{\n\t\t\tnewOneraM6(0, \"atwall\"),\n\t\t\tnewOneraM6(2, \"atwall\"),\n\t\t\tnewOneraM6(4, \"atwall\"),\n\t\t}\n\tcase LavalDNS, LavalDNSBL, LavalDNSBLAll, LavalDNSCrop:\n\t\tignoreNames, ignoreFunc := GetIgnoreData(data)\n\t\tdatasets = []ransuq.Dataset{\n\t\t\t&datawrapper.CSV{\n\t\t\t\tLocation: lavalLoc,\n\t\t\t\tName: \"Laval\",\n\t\t\t\tIgnoreFunc: ignoreFunc,\n\t\t\t\tIgnoreNames: ignoreNames,\n\t\t\t\tFieldMap: datawrapper.LavalMap,\n\t\t\t},\n\t\t}\n\tcase ShivajiRANS:\n\t\tignoreNames, ingoreFunc := GetIgnoreData(\"atwall\")\n\t\tdatasets = []ransuq.Dataset{\n\t\t\t&datawrapper.CSV{\n\t\t\t\tLocation: filepath.Join(gopath, \"data\", \"ransuq\", \"RANS_Shivaji\", \"bigrans\", \"data_extracomputed.txt\"),\n\t\t\t\tName: \"RANS_Shivaji\",\n\t\t\t\tIgnoreFunc: ingoreFunc,\n\t\t\t\tIgnoreNames: ignoreNames,\n\t\t\t},\n\t\t}\n\tcase ShivajiComputed:\n\t\tignoreNames, ingoreFunc := GetIgnoreData(\"atwall\")\n\t\tdatasets = []ransuq.Dataset{\n\t\t\t&datawrapper.CSV{\n\t\t\t\tLocation: filepath.Join(gopath, \"data\", \"ransuq\", \"RANS_Shivaji\", \"bigrans\", \"data_recomputed.txt\"),\n\t\t\t\tName: \"RANS_Shivaji_Computed\",\n\t\t\t\tIgnoreFunc: ingoreFunc,\n\t\t\t\tIgnoreNames: ignoreNames,\n\t\t\t},\n\t\t}\n\tcase LESKarthik:\n\t\tignoreNames, ingoreFunc := GetIgnoreData(\"none\")\n\t\tdatasets = []ransuq.Dataset{\n\t\t\t&datawrapper.CSV{\n\t\t\t\tLocation: filepath.Join(gopath, \"data\", \"ransuq\", \"les_karthik\", \"sadatacomputed.txt\"),\n\t\t\t\tName: \"LES_Karthik\",\n\t\t\t\tIgnoreFunc: ingoreFunc,\n\t\t\t\tIgnoreNames: ignoreNames,\n\t\t\t},\n\t\t}\n\t}\n\n\tfor _, dataset := range datasets {\n\t\tif dataset == nil {\n\t\t\tpanic(\"nil dataset\")\n\t\t}\n\t\tfmt.Println(dataset.ID())\n\t\tsu2, ok := dataset.(*datawrapper.SU2)\n\t\tif ok {\n\t\t\tfmt.Println(\"in setting syscaller\")\n\t\t\tsu2.SetSyscaller(caller)\n\t\t}\n\t}\n\treturn datasets, nil\n}", "func (o *PolicyPaginationAllOf) GetData() []PolicyExtended {\n\tif o == nil {\n\t\tvar ret []PolicyExtended\n\t\treturn ret\n\t}\n\n\treturn o.Data\n}", "func GetDatasourceStats(ds *Datasource) *xcore.XDataset {\n\n\tsubdata := xcore.XDataset{}\n\tsubdata[\"languages\"] = ds.GetLanguages()\n\tsubdata[\"database\"] = ds.GetDatabase()\n\tsubdata[\"logs\"] = ds.GetLogs()\n\n\tcaches := []string{}\n\tfor id := range ds.GetCaches() {\n\t\tcaches = append(caches, id)\n\t}\n\tsubdata[\"xcaches\"] = caches\n\n\ttables := map[string]string{}\n\tfor id, table := range ds.GetTables() {\n\t\tif table.Base != nil {\n\t\t\tdb := table.Base.Database\n\t\t\ttables[id] = db\n\t\t} else {\n\t\t\ttables[id] = \"N/A\"\n\t\t}\n\t}\n\tsubdata[\"tables\"] = tables\n\n\tsubdata[\"config\"] = tools.BuildConfigSet(ds.Config)\n\n\t// analiza los módulos instalados\n\tmodules := map[string]interface{}{}\n\tfor id, v := range ds.GetModules() {\n\t\tmd := struct {\n\t\t\tVersion string\n\t\t\tInstalledVersion string\n\t\t}{v, ModuleInstalledVersion(ds, id)}\n\t\tmodules[id] = md\n\t}\n\tsubdata[\"modules\"] = modules\n\n\treturn &subdata\n}", "func (o PowerBIOutputDataSourceResponseOutput) Dataset() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PowerBIOutputDataSourceResponse) *string { return v.Dataset }).(pulumi.StringPtrOutput)\n}", "func dataSourceSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {\n config := meta.(*PConfig)\n\n platform := d.Get(\"platform\").(string)\n siteID := d.Get(\"vcs\").(string)\n\n resourcePath := fmt.Sprintf(\"api/v3/%s/sites/%s/\", platform, siteID)\n response, err := config.doNormalRequest(platform, resourcePath, \"GET\", nil)\n if err != nil {\n return fmt.Errorf(\"Unable to get VCS %s: %v\", siteID, err)\n }\n var site map[string]interface{}\n if err = json.Unmarshal([]byte(response), &site); err != nil {\n return err\n }\n projectID := int(site[\"project\"].(float64))\n var serverID int\n for _, server := range site[\"servers\"].([]interface{}) {\n serverInfo := server.(map[string]interface{})\n serverID = int(serverInfo[\"id\"].(float64))\n break\n }\n\n resourcePath = fmt.Sprintf(\"api/v3/%s/security_groups/?project=%d&server=%d\", platform, projectID, serverID)\n response, err = config.doNormalRequest(platform, resourcePath, \"GET\", nil)\n\n if err != nil {\n return fmt.Errorf(\"Unable to list security_groups: %v\", err)\n }\n\n var security_groups []map[string]interface{}\n if err = json.Unmarshal([]byte(response), &security_groups); err != nil {\n return err\n }\n\n for _, security_group := range security_groups {\n return dataSourceSecurityGroupAttributes(d, security_group)\n }\n\n return fmt.Errorf(\"Unable to retrieve security group by VCS %s: %v\", siteID, err)\n}", "func (o PowerBIOutputDataSourceOutput) Dataset() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PowerBIOutputDataSource) *string { return v.Dataset }).(pulumi.StringPtrOutput)\n}", "func getDataSetStats(w http.ResponseWriter, r *http.Request) {\n\torgID := domain.GetOrganizationID(r)\n\tspec := chi.URLParam(r, \"spec\")\n\tlog.Printf(\"Get stats for spec %s\", domain.LogUserInput(spec))\n\tstats, err := models.CreateDataSetStats(r.Context(), string(orgID), spec)\n\tif err != nil {\n\t\tif err == storm.ErrNotFound {\n\t\t\trender.Error(w, r, err, &render.ErrorConfig{\n\t\t\t\tStatusCode: http.StatusNotFound,\n\t\t\t\tMessage: fmt.Sprintf(\"Unable to retrieve dataset: %s\", spec),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\trender.Error(w, r, err, &render.ErrorConfig{\n\t\t\tStatusCode: http.StatusNotFound,\n\t\t\tMessage: fmt.Sprintf(\"Unable to create dataset stats: %s\", spec),\n\t\t})\n\n\t\treturn\n\t}\n\n\trender.JSON(w, r, stats)\n}", "func requestAndValidateDatasets(JWT string, environment string) ([]datasetModel.APIDatasetSummary, error) {\n\tvar result = []datasetModel.APIDatasetSummary{}\n\treq, err := http.NewRequest(\"GET\", generateURL(environment)+\"/dataset\", nil)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\treq.Header.Set(\"Authorization\", \"Bearer \"+JWT)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\t//storing errors in a dynamic slice to allow for multiple errors across multiple datasets\n\t//to be outputted to better diagnosis issues\n\terrCount := 0\n\tdownloadLimit := 1\n\tfor c, item := range result {\n\t\tdserror := isValidDatasetItem(item, JWT)\n\n\t\tif dserror != nil {\n\t\t\tfmt.Printf(\"%v\\n\", dserror)\n\t\t\terrCount++\n\t\t}\n\n\t\tif c >= downloadLimit {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif errCount > 0 {\n\t\treturn result, errors.New(\"Dataset query failed\")\n\t}\n\n\tfmt.Printf(\" Received %v dataset summaries\\n\", len(result))\n\treturn result, nil\n}", "func (*RunnablePolicySet) Descriptor() ([]byte, []int) {\n\treturn file_cerbos_runtime_v1_runtime_proto_rawDescGZIP(), []int{0}\n}", "func (*RunnablePrincipalPolicySet) Descriptor() ([]byte, []int) {\n\treturn file_cerbos_runtime_v1_runtime_proto_rawDescGZIP(), []int{4}\n}", "func (*RunnableResourcePolicySet) Descriptor() ([]byte, []int) {\n\treturn file_cerbos_runtime_v1_runtime_proto_rawDescGZIP(), []int{1}\n}", "func (o DatasetAccessRoutineOutput) DatasetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DatasetAccessRoutine) string { return v.DatasetId }).(pulumi.StringOutput)\n}", "func (d casandraSQLDialect) GetDatastores(manager Manager) ([]string, error) {\n\n\t/*\n\t\tTODO add version support\n\t\tversion, err := d.getCQLVersion(manager)\n\t\tif err != nil {\n\t\t\treturn nil,err\n\t\t}\n\t*/\n\tvar SQL = casandraSchemaListSQL\n\tvar rows = make([][]interface{}, 0)\n\terr := manager.ReadAll(&rows, SQL, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar result = make([]string, 0)\n\tfor _, row := range rows {\n\t\tresult = append(result, normalizeName(toolbox.AsString(row[d.schemaResultsetIndex])))\n\t}\n\treturn result, nil\n}", "func getDataSet(w http.ResponseWriter, r *http.Request) {\n\torgID := domain.GetOrganizationID(r)\n\tspec := chi.URLParam(r, \"spec\")\n\tds, err := models.GetDataSet(orgID.String(), spec)\n\tif err != nil {\n\t\tif err == storm.ErrNotFound {\n\t\t\trender.Error(w, r, err, &render.ErrorConfig{\n\t\t\t\tStatusCode: http.StatusNotFound,\n\t\t\t\tMessage: fmt.Sprintf(\"Unable to retrieve dataset: %s\", spec),\n\t\t\t})\n\n\t\t\treturn\n\t\t}\n\n\t\trender.Error(w, r, err, &render.ErrorConfig{\n\t\t\tStatusCode: http.StatusNotFound,\n\t\t\tMessage: fmt.Sprintf(\"Unable to get dataset: %s\", spec),\n\t\t})\n\n\t\treturn\n\t}\n\n\trender.JSON(w, r, ds)\n}", "func getDatasources() []datasource.Datasource {\n\tdss := make([]datasource.Datasource, 0, 5)\n\tif flags.sources.file != \"\" {\n\t\tdss = append(dss, file.NewDatasource(flags.sources.file))\n\t}\n\tif flags.sources.url != \"\" {\n\t\tdss = append(dss, url.NewDatasource(flags.sources.url))\n\t}\n\tif flags.sources.configDrive != \"\" {\n\t\tdss = append(dss, configdrive.NewDatasource(flags.sources.configDrive))\n\t}\n\tif flags.sources.metadataService {\n\t\tdss = append(dss, ec2.NewDatasource(ec2.DefaultAddress))\n\t}\n\tif flags.sources.ec2MetadataService != \"\" {\n\t\tdss = append(dss, ec2.NewDatasource(flags.sources.ec2MetadataService))\n\t}\n\tif flags.sources.cloudSigmaMetadataService {\n\t\tdss = append(dss, cloudsigma.NewServerContextService())\n\t}\n\tif flags.sources.digitalOceanMetadataService != \"\" {\n\t\tdss = append(dss, digitalocean.NewDatasource(flags.sources.digitalOceanMetadataService))\n\t}\n\tif flags.sources.waagent != \"\" {\n\t\tdss = append(dss, waagent.NewDatasource(flags.sources.waagent))\n\t}\n\tif flags.sources.procCmdLine {\n\t\tdss = append(dss, proc_cmdline.NewDatasource())\n\t}\n\treturn dss\n}", "func (o FhirStoreIamPolicyOutput) PolicyData() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *FhirStoreIamPolicy) pulumi.StringOutput { return v.PolicyData }).(pulumi.StringOutput)\n}", "func (m *Minerva) getDataset(block uint64) *dataset {\n\t// Retrieve the requested ethash dataset\n\tepoch := block / epochLength\n\tlog.Debug(\"epoch value: \", epoch, \"------\", \"block number is: \", block)\n\tcurrentI, _ := m.datasets.get(epoch)\n\tcurrent := currentI.(*dataset)\n\n\tcurrent.generate(block, m)\n\t//if futureI != nil {\n\t//\tfuture := futureI.(*dataset)\n\t//\tfuture.generate(block, m)\n\t//\n\t//}\n\n\t// Test byte possession\n\t//da := fmt.Sprintln(\"dataset size:\", unsafe.Sizeof(current.dataset))\n\t//on := fmt.Sprintln(\"once size:\", unsafe.Sizeof(current.once))\n\t//dai := fmt.Sprintln(\"dateInit size:\", unsafe.Sizeof(current.dateInit))\n\t//ep := fmt.Sprintln(\"epoch size:\", unsafe.Sizeof(current.epoch))\n\treturn current\n}", "func authenticationPoliciesData() map[string]*auth.AuthenticationPolicy {\n\tpolicydata := make(map[string]*auth.AuthenticationPolicy)\n\n\tpolicydata[\"LDAP enabled, Local enabled\"] = &auth.AuthenticationPolicy{\n\t\tTypeMeta: api.TypeMeta{Kind: \"AuthenticationPolicy\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"AuthenticationPolicy\",\n\t\t},\n\t\tSpec: auth.AuthenticationPolicySpec{\n\t\t\tAuthenticators: auth.Authenticators{\n\t\t\t\tLdap: &auth.Ldap{\n\t\t\t\t\tDomains: []*auth.LdapDomain{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tServers: []*auth.LdapServer{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tUrl: \"localhost:389\",\n\t\t\t\t\t\t\t\t\tTLSOptions: &auth.TLSOptions{\n\t\t\t\t\t\t\t\t\t\tStartTLS: true,\n\t\t\t\t\t\t\t\t\t\tSkipServerCertVerification: false,\n\t\t\t\t\t\t\t\t\t\tServerName: ServerName,\n\t\t\t\t\t\t\t\t\t\tTrustedCerts: TrustedCerts,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\tBaseDN: BaseDN,\n\t\t\t\t\t\t\tBindDN: BindDN,\n\t\t\t\t\t\t\tBindPassword: BindPassword,\n\t\t\t\t\t\t\tAttributeMapping: &auth.LdapAttributeMapping{\n\t\t\t\t\t\t\t\tUser: UserAttribute,\n\t\t\t\t\t\t\t\tUserObjectClass: UserObjectClassAttribute,\n\t\t\t\t\t\t\t\tGroup: GroupAttribute,\n\t\t\t\t\t\t\t\tGroupObjectClass: GroupObjectClassAttribute,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLocal: &auth.Local{},\n\t\t\t\tAuthenticatorOrder: []string{auth.Authenticators_LDAP.String(), auth.Authenticators_LOCAL.String()},\n\t\t\t},\n\t\t\tTokenExpiry: expiration.String(),\n\t\t},\n\t}\n\tpolicydata[\"LDAP disabled, Local enabled\"] = &auth.AuthenticationPolicy{\n\t\tTypeMeta: api.TypeMeta{Kind: \"AuthenticationPolicy\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"AuthenticationPolicy\",\n\t\t},\n\t\tSpec: auth.AuthenticationPolicySpec{\n\t\t\tAuthenticators: auth.Authenticators{\n\t\t\t\tLdap: &auth.Ldap{},\n\t\t\t\tLocal: &auth.Local{},\n\t\t\t\tAuthenticatorOrder: []string{auth.Authenticators_LOCAL.String()},\n\t\t\t},\n\t\t\tTokenExpiry: expiration.String(),\n\t\t},\n\t}\n\tpolicydata[\"Local enabled, LDAP enabled\"] = &auth.AuthenticationPolicy{\n\t\tTypeMeta: api.TypeMeta{Kind: \"AuthenticationPolicy\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"AuthenticationPolicy\",\n\t\t},\n\t\tSpec: auth.AuthenticationPolicySpec{\n\t\t\tAuthenticators: auth.Authenticators{\n\t\t\t\tLdap: &auth.Ldap{\n\t\t\t\t\tDomains: []*auth.LdapDomain{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tServers: []*auth.LdapServer{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tUrl: \"localhost:389\",\n\t\t\t\t\t\t\t\t\tTLSOptions: &auth.TLSOptions{\n\t\t\t\t\t\t\t\t\t\tStartTLS: true,\n\t\t\t\t\t\t\t\t\t\tSkipServerCertVerification: false,\n\t\t\t\t\t\t\t\t\t\tServerName: ServerName,\n\t\t\t\t\t\t\t\t\t\tTrustedCerts: TrustedCerts,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\tBaseDN: BaseDN,\n\t\t\t\t\t\t\tBindDN: BindDN,\n\t\t\t\t\t\t\tBindPassword: BindPassword,\n\t\t\t\t\t\t\tAttributeMapping: &auth.LdapAttributeMapping{\n\t\t\t\t\t\t\t\tUser: UserAttribute,\n\t\t\t\t\t\t\t\tUserObjectClass: UserObjectClassAttribute,\n\t\t\t\t\t\t\t\tGroup: GroupAttribute,\n\t\t\t\t\t\t\t\tGroupObjectClass: GroupObjectClassAttribute,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLocal: &auth.Local{},\n\t\t\t\tAuthenticatorOrder: []string{auth.Authenticators_LOCAL.String(), auth.Authenticators_LDAP.String()},\n\t\t\t},\n\t\t\tTokenExpiry: expiration.String(),\n\t\t},\n\t}\n\tpolicydata[\"Local enabled, LDAP disabled\"] = &auth.AuthenticationPolicy{\n\t\tTypeMeta: api.TypeMeta{Kind: \"AuthenticationPolicy\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"AuthenticationPolicy\",\n\t\t},\n\t\tSpec: auth.AuthenticationPolicySpec{\n\t\t\tAuthenticators: auth.Authenticators{\n\t\t\t\tLdap: &auth.Ldap{},\n\t\t\t\tLocal: &auth.Local{},\n\t\t\t\tAuthenticatorOrder: []string{auth.Authenticators_LOCAL.String()},\n\t\t\t},\n\t\t\tTokenExpiry: expiration.String(),\n\t\t},\n\t}\n\n\treturn policydata\n}", "func (d sqlDatastoreDialect) GetDatastores(manager Manager) ([]string, error) {\n\tvar rows = make([][]interface{}, 0)\n\terr := manager.ReadAll(&rows, d.allSchemaSQL, nil, nil)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"unable to open database\") {\n\t\t\treturn []string{}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tvar result = make([]string, 0)\n\tfor _, row := range rows {\n\t\tresult = append(result, normalizeName(toolbox.AsString(row[d.schemaResultsetIndex])))\n\t}\n\treturn result, nil\n}", "func GetServerSSLData(\n\tserver *Server,\n\tservers []Server,\n\tdses []DeliveryService,\n\tdss []DeliveryServiceServer,\n\tdsRegexArr []tc.DeliveryServiceRegexes,\n\ttcParentConfigParams []tc.Parameter,\n\tcdn *tc.CDN,\n\ttopologies []tc.Topology,\n\tcacheGroupArr []tc.CacheGroupNullable,\n\tserverCapabilities map[int]map[ServerCapability]struct{},\n\tdsRequiredCapabilities map[int]map[ServerCapability]struct{},\n\tdefaultTLSVersions []TLSVersion,\n\tdefaultEnableH2 bool,\n) ([]SSLData, []string, error) {\n\twarnings := []string{}\n\n\tif len(server.ProfileNames) == 0 {\n\t\treturn nil, warnings, errors.New(\"this server missing Profiles\")\n\t}\n\n\tdsRegexes := MakeDSRegexMap(dsRegexArr)\n\n\tparentConfigParamsWithProfiles, err := tcParamsToParamsWithProfiles(tcParentConfigParams)\n\tif err != nil {\n\t\twarnings = append(warnings, \"error getting profiles from Traffic Ops Parameters, Parameters will not be considered for generation! : \"+err.Error())\n\t\tparentConfigParamsWithProfiles = []parameterWithProfiles{}\n\t}\n\n\tprofileParentConfigParams := map[string]map[string]string{} // map[profileName][paramName]paramVal\n\tfor _, param := range parentConfigParamsWithProfiles {\n\t\tfor _, profile := range param.ProfileNames {\n\t\t\tif _, ok := profileParentConfigParams[profile]; !ok {\n\t\t\t\tprofileParentConfigParams[profile] = map[string]string{}\n\t\t\t}\n\t\t\tprofileParentConfigParams[profile][param.Name] = param.Value\n\t\t}\n\t}\n\n\tcacheGroups, err := makeCGMap(cacheGroupArr)\n\tif err != nil {\n\t\treturn nil, warnings, errors.New(\"making cachegroup map: \" + err.Error())\n\t}\n\n\tnameTopologies := makeTopologyNameMap(topologies)\n\tanyCastPartners := GetAnyCastPartners(server, servers)\n\n\tsort.Sort(dsesSortByName(dses))\n\n\tsslDatas := []SSLData{}\n\n\tfor _, ds := range dses {\n\t\thasDS, err := dsUsesServer(&ds, server, dss, nameTopologies, cacheGroups, serverCapabilities, dsRequiredCapabilities)\n\t\tif err != nil {\n\t\t\twarnings = append(warnings, \"error checking if ds uses this server, considering false! Error: \"+err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tif !hasDS {\n\t\t\tcontinue\n\t\t}\n\n\t\tdsParentConfigParams := map[string]string{}\n\t\tif ds.ProfileName != nil {\n\t\t\tdsParentConfigParams = profileParentConfigParams[*ds.ProfileName]\n\t\t}\n\n\t\trequestFQDNs, err := GetDSRequestFQDNs(&ds, dsRegexes[tc.DeliveryServiceName(*ds.XMLID)], server, anyCastPartners, cdn.DomainName)\n\t\tif err != nil {\n\t\t\twarnings = append(warnings, \"error getting ds '\"+*ds.XMLID+\"' request fqdns, skipping! Error: \"+err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tenableH2 := defaultEnableH2\n\t\ttlsVersions := defaultTLSVersions\n\n\t\tdsTLSVersions := []TLSVersion{}\n\t\tfor _, tlsVersion := range ds.TLSVersions {\n\t\t\tif _, ok := tlsVersionsToATS[TLSVersion(tlsVersion)]; !ok {\n\t\t\t\twarnings = append(warnings, \"ds '\"+*ds.XMLID+\"' had unknown TLS Version '\"+tlsVersion+\"' - ignoring!\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdsTLSVersions = append(dsTLSVersions, TLSVersion(tlsVersion))\n\t\t}\n\t\tif len(dsTLSVersions) > 0 {\n\t\t\ttlsVersions = dsTLSVersions\n\t\t}\n\n\t\tparamValEnableH2 := dsParentConfigParams[SSLServerNameYAMLParamEnableH2]\n\t\tparamValEnableH2 = strings.TrimSpace(paramValEnableH2)\n\t\tparamValEnableH2 = strings.ToLower(paramValEnableH2)\n\n\t\tif paramValEnableH2 != \"\" {\n\t\t\tenableH2 = strings.HasPrefix(paramValEnableH2, \"t\") || strings.HasPrefix(paramValEnableH2, \"y\")\n\t\t}\n\n\t\tparamValTLSVersions := dsParentConfigParams[SSLServerNameYAMLParamTLSVersions]\n\t\tparamValTLSVersions = strings.Replace(paramValTLSVersions, \" \", \"\", -1)\n\t\tparamValTLSVersions = strings.TrimSpace(paramValTLSVersions)\n\n\t\tparamTLSVersions := []TLSVersion{}\n\t\tif paramValTLSVersions != \"\" {\n\t\t\t// Allow delimiting with commas, semicolons, spaces, or newlines.\n\t\t\tdelim := \",\"\n\t\t\tif !strings.Contains(paramValTLSVersions, delim) {\n\t\t\t\tdelim = \";\"\n\t\t\t}\n\t\t\tif !strings.Contains(paramValTLSVersions, delim) {\n\t\t\t\tdelim = \" \"\n\t\t\t}\n\t\t\tif !strings.Contains(paramValTLSVersions, delim) {\n\t\t\t\tdelim = \"\\n\"\n\t\t\t}\n\n\t\t\ttlsVersionsParamArr := strings.Split(paramValTLSVersions, delim)\n\t\t\tfor _, tlsVersion := range tlsVersionsParamArr {\n\t\t\t\tif _, ok := tlsVersionsToATS[TLSVersion(tlsVersion)]; !ok {\n\t\t\t\t\twarnings = append(warnings, \"ds '\"+*ds.XMLID+\"' had unknown \"+SSLServerNameYAMLParamTLSVersions+\" parameter '\"+tlsVersion+\"' - ignoring!\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tparamTLSVersions = append(paramTLSVersions, TLSVersion(tlsVersion))\n\t\t\t}\n\t\t}\n\n\t\t// let Parameters override the Delivery Service field, for backward-compatibility,\n\t\t// and also because this lets tenants who own multiple DSes set them all in a single\n\t\t// place, instead of duplicating for every DS (which will be even more de-duplicated\n\t\t// when Layered Profiles are implemented)\n\t\tif len(paramTLSVersions) != 0 {\n\t\t\ttlsVersions = paramTLSVersions\n\t\t}\n\n\t\tsslDatas = append(sslDatas, SSLData{\n\t\t\tDSName: *ds.XMLID,\n\t\t\tRequestFQDNs: requestFQDNs,\n\t\t\tEnableH2: enableH2,\n\t\t\tTLSVersions: tlsVersions,\n\t\t})\n\t}\n\n\treturn sslDatas, warnings, nil\n}", "func (*OperationSetPrivilege) Descriptor() ([]byte, []int) {\n\treturn file_chain_proto_rawDescGZIP(), []int{14}\n}", "func listDataSets(w http.ResponseWriter, r *http.Request) {\n\torgID := domain.GetOrganizationID(r)\n\n\tsets, err := models.ListDataSets(orgID.String())\n\tif err != nil {\n\t\trender.Error(w, r, err, &render.ErrorConfig{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\tMessage: \"Unable to list datasets\",\n\t\t})\n\n\t\treturn\n\t}\n\n\tif strings.EqualFold(r.URL.Query().Get(\"applyPreSave\"), \"true\") {\n\t\ttotal := len(sets)\n\n\t\tfor idx, ds := range sets {\n\t\t\tif err := ds.Save(); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif idx%250 == 0 && idx > 0 {\n\t\t\t\tlog.Printf(\"presaved %d of %d\", idx, total)\n\t\t\t}\n\t\t}\n\t}\n\n\trender.Status(r, http.StatusOK)\n\trender.JSON(w, r, sets)\n}", "func (r *ConfList) GetData() []string {\n\tr.mutex.RLock()\n\tdefer r.mutex.RUnlock()\n\td := make([]string, len(r.data))\n\tcopy(d, r.data)\n\treturn d\n}", "func getFeatureData(flexlmStats string) []featureUsage {\n\tvar featuresUsage []featureUsage\n\tvar features []string\n\tvar featureInfo, usersInfo string\n\tflexlmStats = strings.Trim(flexlmStats, \"\\n \\t\")\n\t// Split data by features. String \"Users of \" will be deleted.\n\tfeatures = splitdata(flexlmStats, featuresSeparator)\n\tif len(features[0]) == 0 {\n\t\tfeatures = features[1:]\n\t}\n\n\t// feture with users (data) will be processed\n\tfor i, data := range features {\n\t\tdata = strings.Trim(data, \"\\n \\t\")\n\t\t// split feature data and active users data\n\t\tslice := splitdata(data, \"\\n\\n\")\n\t\tif len(slice) > 1 {\n\t\t\tfeatureInfo, _, usersInfo = splitFeatureUsers(slice)\n\t\t}\n\t\tif len(slice) == 1 {\n\t\t\tfeatureInfo = slice[0]\n\t\t\tusersInfo = \"\"\n\t\t}\n\t\tfeaturesUsage = append(featuresUsage, parseFeatureData(featureInfo))\n\t\tif len(usersInfo) > 0 {\n\t\t\tfeaturesUsage[i].Users = getUsersData(usersInfo, featuresUsage[i].Feature)\n\t\t}\n\n\t}\n\n\treturn featuresUsage\n}", "func resourceDetails(cfg api.Config, context string) error {\n\tcs, err := csForContext(cfg, context)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Can't create a clientset based on config provided\")\n\t}\n\t_, reslist, err := cs.Discovery().ServerGroupsAndResources()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Can't get cluster server version\")\n\t}\n\tfmt.Println(\"Resources supported in this cluster:\")\n\tfor _, res := range reslist {\n\t\tfmt.Println(strings.Repeat(\"-\", 80))\n\t\tfmt.Printf(\"%v:\\n \", res.GroupVersion)\n\t\tfor _, r := range res.APIResources {\n\t\t\tif !strings.Contains(r.Name, \"/\") {\n\t\t\t\tfmt.Printf(\"%v (namespaced: %v) \", r.Name, r.Namespaced)\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n\tfmt.Println(strings.Repeat(\"*\", 80))\n\treturn nil\n}", "func (m *InformationProtection) GetDataLossPreventionPolicies()([]DataLossPreventionPolicyable) {\n val, err := m.GetBackingStore().Get(\"dataLossPreventionPolicies\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]DataLossPreventionPolicyable)\n }\n return nil\n}", "func getPolicyLogs(ctx context.Context, client *tfe.Client, r *tfe.Run) error {\n\tif r.PolicyChecks == nil {\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"------------------------------------------------------------------------\")\n\n\tfor i, pc := range r.PolicyChecks {\n\t\t// Read the policy check logs. This is a blocking call that will only\n\t\t// return once the policy check is complete.\n\t\tlogs, err := client.PolicyChecks.Logs(ctx, pc.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treader := bufio.NewReaderSize(logs, 64*1024)\n\n\t\t// Retrieve the policy check to get its current status.\n\t\tpc, err := client.PolicyChecks.Read(ctx, pc.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the run is canceled or errored, but the policy check still has\n\t\t// no result, there is nothing further to render.\n\t\tif r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored {\n\t\t\tswitch pc.Status {\n\t\t\tcase tfe.PolicyPending, tfe.PolicyQueued, tfe.PolicyUnreachable:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tvar msgPrefix string\n\t\tswitch pc.Scope {\n\t\tcase tfe.PolicyScopeOrganization:\n\t\t\tmsgPrefix = \"Organization policy check\"\n\t\tcase tfe.PolicyScopeWorkspace:\n\t\t\tmsgPrefix = \"Workspace policy check\"\n\t\tdefault:\n\t\t\tmsgPrefix = fmt.Sprintf(\"Unknown policy check (%s)\", pc.Scope)\n\t\t}\n\n\t\tfmt.Println(msgPrefix + \":\")\n\n\t\tfor next := true; next; {\n\t\t\tvar l, line []byte\n\n\t\t\tfor isPrefix := true; isPrefix; {\n\t\t\t\tl, isPrefix, err = reader.ReadLine()\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err != io.EOF {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tnext = false\n\t\t\t\t}\n\t\t\t\tline = append(line, l...)\n\t\t\t}\n\n\t\t\tif next || len(line) > 0 {\n\t\t\t\tfmt.Println(string(line))\n\t\t\t}\n\t\t}\n\n\t\tswitch pc.Status {\n\t\tcase tfe.PolicyPasses:\n\t\t\tif r.HasChanges || i < len(r.PolicyChecks)-1 {\n\t\t\t\tfmt.Println(\"------------------------------------------------------------------------\")\n\t\t\t}\n\t\t\tcontinue\n\t\tcase tfe.PolicyErrored:\n\t\t\treturn fmt.Errorf(msgPrefix + \" errored.\")\n\t\tcase tfe.PolicyHardFailed:\n\t\t\treturn fmt.Errorf(msgPrefix + \" hard failed.\")\n\t\tcase tfe.PolicySoftFailed:\n\t\t\tfmt.Println(\"PolicySoftFailed\")\n\t\t\t// runUrl := fmt.Sprintf(runHeader, b.hostname, b.organization, op.Workspace, r.ID)\n\n\t\t\t// if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil ||\n\t\t\t// \t!pc.Actions.IsOverridable || !pc.Permissions.CanOverride {\n\t\t\t// \treturn fmt.Errorf(msgPrefix + \" soft failed.\\n\" + runUrl)\n\t\t\t// }\n\n\t\t\t// if op.AutoApprove {\n\t\t\t// \tif _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil {\n\t\t\t// \t\treturn generalError(fmt.Sprintf(\"Failed to override policy check.\\n%s\", runUrl), err)\n\t\t\t// \t}\n\t\t\t// } else {\n\t\t\t// \topts := &terraform.InputOpts{\n\t\t\t// \t\tId: \"override\",\n\t\t\t// \t\tQuery: \"\\nDo you want to override the soft failed policy check?\",\n\t\t\t// \t\tDescription: \"Only 'override' will be accepted to override.\",\n\t\t\t// \t}\n\t\t\t// \terr = b.confirm(stopCtx, op, opts, r, \"override\")\n\t\t\t// \tif err != nil && err != errRunOverridden {\n\t\t\t// \t\treturn fmt.Errorf(\n\t\t\t// \t\t\tfmt.Sprintf(\"Failed to override: %s\\n%s\\n\", err.Error(), runUrl),\n\t\t\t// \t\t)\n\t\t\t// \t}\n\n\t\t\t// \tif err != errRunOverridden {\n\t\t\t// \t\tif _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil {\n\t\t\t// \t\t\treturn generalError(fmt.Sprintf(\"Failed to override policy check.\\n%s\", runUrl), err)\n\t\t\t// \t\t}\n\t\t\t// \t} else {\n\t\t\t// \t\tb.CLI.Output(fmt.Sprintf(\"The run needs to be manually overridden or discarded.\\n%s\\n\", runUrl))\n\t\t\t// \t}\n\t\t\t// }\n\n\t\t\tfmt.Println(\"------------------------------------------------------------------------\")\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Unknown or unexpected policy state: %s\", pc.Status)\n\t\t}\n\t}\n\n\treturn nil\n}", "func GatherData(mStruct *manifest.Manifest, baseDir string, namespace string, instanceGroupName string) ([]byte, error) {\n\tjobReleaseSpecs, jobProviderLinks, err := CollectReleaseSpecsAndProviderLinks(mStruct, baseDir, namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ProcessConsumersAndRenderBPM(mStruct, baseDir, jobReleaseSpecs, jobProviderLinks, instanceGroupName)\n}", "func GetPolicyStatements(driverName string) *PolicyStatements {\n\tstmts := &PolicyStatements{}\n\tswitch driverName {\n\tcase \"postgres\", \"pg\", \"pgx\":\n\t\tstmts.QueryInsertPolicy = `INSERT INTO am.ladon_policy(id, description, effect, conditions, meta) SELECT $1::varchar, $2, $3, $4, $5 WHERE NOT EXISTS (SELECT 1 FROM am.ladon_policy WHERE id = $1)`\n\t\tstmts.QueryInsertPolicyActions = `INSERT INTO am.ladon_action (id, template, compiled, has_regex) SELECT $1::varchar, $2, $3, $4 WHERE NOT EXISTS (SELECT 1 FROM am.ladon_action WHERE id = $1)`\n\t\tstmts.QueryInsertPolicyActionsRel = `INSERT INTO am.ladon_policy_action_rel (policy, action) SELECT $1::varchar, $2::varchar WHERE NOT EXISTS (SELECT 1 FROM am.ladon_policy_action_rel WHERE policy = $1 AND action = $2)`\n\t\tstmts.QueryInsertPolicyResources = `INSERT INTO am.ladon_resource (id, template, compiled, has_regex) SELECT $1::varchar, $2, $3, $4 WHERE NOT EXISTS (SELECT 1 FROM am.ladon_resource WHERE id = $1)`\n\t\tstmts.QueryInsertPolicyResourcesRel = `INSERT INTO am.ladon_policy_resource_rel (policy, resource) SELECT $1::varchar, $2::varchar WHERE NOT EXISTS (SELECT 1 FROM am.ladon_policy_resource_rel WHERE policy = $1 AND resource = $2)`\n\t\tstmts.QueryInsertPolicySubjects = `INSERT INTO am.ladon_subject (id, template, compiled, has_regex) SELECT $1::varchar, $2, $3, $4 WHERE NOT EXISTS (SELECT 1 FROM am.ladon_subject WHERE id = $1)`\n\t\tstmts.QueryInsertPolicySubjectsRel = `INSERT INTO am.ladon_policy_subject_rel (policy, subject) SELECT $1::varchar, $2::varchar WHERE NOT EXISTS (SELECT 1 FROM am.ladon_policy_subject_rel WHERE policy = $1 AND subject = $2)`\n\t\tstmts.QueryRequestCandidates = `\n\t\tSELECT\n\t\t\tp.id,\n\t\t\tp.effect,\n\t\t\tp.conditions,\n\t\t\tp.description,\n\t\t\tp.meta,\n\t\t\tsubject.template AS subject,\n\t\t\tresource.template AS resource,\n\t\t\taction.template AS action\n\t\tFROM\n\t\t\tam.ladon_policy AS p\n\n\t\t\tINNER JOIN am.ladon_policy_subject_rel AS rs ON rs.policy = p.id\n\t\t\tLEFT JOIN am.ladon_policy_action_rel AS ra ON ra.policy = p.id\n\t\t\tLEFT JOIN am.ladon_policy_resource_rel AS rr ON rr.policy = p.id\n\n\t\t\tINNER JOIN am.ladon_subject AS subject ON rs.subject = subject.id\n\t\t\tLEFT JOIN am.ladon_action AS action ON ra.action = action.id\n\t\t\tLEFT JOIN am.ladon_resource AS resource ON rr.resource = resource.id\n\t\tWHERE\n\t\t\t(subject.has_regex IS NOT TRUE AND subject.template = $1)\n\t\t\tOR\n\t\t\t(subject.has_regex IS TRUE AND $2 ~ subject.compiled)`\n\t\tstmts.GetQuery = `SELECT\n\t\t\tp.id, p.effect, p.conditions, p.description, p.meta,\n\t\t\tsubject.template as subject, resource.template as resource, action.template as action\n\t\tFROM\n\t\t\tam.ladon_policy as p\n\t\t\n\t\tLEFT JOIN am.ladon_policy_subject_rel as rs ON rs.policy = p.id\n\t\tLEFT JOIN am.ladon_policy_action_rel as ra ON ra.policy = p.id\n\t\tLEFT JOIN am.ladon_policy_resource_rel as rr ON rr.policy = p.id\n\t\t\n\t\tLEFT JOIN am.ladon_subject as subject ON rs.subject = subject.id\n\t\tLEFT JOIN am.ladon_action as action ON ra.action = action.id\n\t\tLEFT JOIN am.ladon_resource as resource ON rr.resource = resource.id\n\t\t\n\t\tWHERE p.id=$1`\n\t\tstmts.GetAllQuery = `SELECT\n\tp.id, p.effect, p.conditions, p.description, p.meta,\n\tsubject.template as subject, resource.template as resource, action.template as action\nFROM\n\t(SELECT * from am.ladon_policy ORDER BY id LIMIT $1 OFFSET $2) as p\n\nLEFT JOIN am.ladon_policy_subject_rel as rs ON rs.policy = p.id\nLEFT JOIN am.ladon_policy_action_rel as ra ON ra.policy = p.id\nLEFT JOIN am.ladon_policy_resource_rel as rr ON rr.policy = p.id\n\nLEFT JOIN am.ladon_subject as subject ON rs.subject = subject.id\nLEFT JOIN am.ladon_action as action ON ra.action = action.id\nLEFT JOIN am.ladon_resource as resource ON rr.resource = resource.id`\n\t\tstmts.DeletePolicy = \"DELETE FROM am.ladon_policy WHERE id=$1\"\n\t\tstmts.QueryPoliciesForSubject = `\n\t\tSELECT\n\t\t\tp.id,\n\t\t\tp.effect,\n\t\t\tp.conditions,\n\t\t\tp.description,\n\t\t\tp.meta,\n\t\t\tsubject.template AS subject,\n\t\t\tresource.template AS resource,\n\t\t\taction.template AS action\n\t\tFROM\n\t\t\tam.ladon_policy AS p\n\t\t\tINNER JOIN am.ladon_policy_subject_rel AS rs ON rs.policy = p.id\n\t\t\tLEFT JOIN am.ladon_policy_action_rel AS ra ON ra.policy = p.id\n\t\t\tLEFT JOIN am.ladon_policy_resource_rel AS rr ON rr.policy = p.id\n\t\t\tINNER JOIN am.ladon_subject AS subject ON rs.subject = subject.id\n\t\t\tLEFT JOIN am.ladon_action AS action ON ra.action = action.id\n\t\t\tLEFT JOIN am.ladon_resource AS resource ON rr.resource = resource.id\n\t\tWHERE\n\t\t\t(subject.has_regex IS NOT TRUE AND subject.template = $1)\n\t\t\tOR\n\t\t\t(subject.has_regex IS TRUE AND $2 ~ subject.compiled)`\n\t\tstmts.QueryPoliciesForResource = `\n\t\tSELECT\n\t\t\tp.id,\n\t\t\tp.effect,\n\t\t\tp.conditions,\n\t\t\tp.description,\n\t\t\tp.meta,\n\t\t\tsubject.template AS subject,\n\t\t\tresource.template AS resource,\n\t\t\taction.template AS action\n\t\tFROM\n\t\tam.ladon_policy AS p\n\t\t\tINNER JOIN am.ladon_policy_subject_rel AS rs ON rs.policy = p.id\n\t\t\tLEFT JOIN am.ladon_policy_action_rel AS ra ON ra.policy = p.id\n\t\t\tLEFT JOIN am.ladon_policy_resource_rel AS rr ON rr.policy = p.id\n\t\t\tINNER JOIN am.ladon_subject AS subject ON rs.subject = subject.id\n\t\t\tLEFT JOIN am.ladon_action AS action ON ra.action = action.id\n\t\t\tLEFT JOIN am.ladon_resource AS resource ON rr.resource = resource.id\n\t\tWHERE\n\t\t\t(resource.has_regex IS NOT TRUE AND resource.template = $1)\n\t\t\tOR\n\t\t\t(resource.has_regex IS TRUE AND $2 ~ resource.compiled)`\n\tcase \"mysql\":\n\t\tstmts.QueryInsertPolicy = `INSERT IGNORE INTO am.ladon_policy (id, description, effect, conditions, meta) VALUES(?,?,?,?,?)`\n\t\tstmts.QueryInsertPolicyActions = `INSERT IGNORE INTO am.ladon_action (id, template, compiled, has_regex) VALUES(?,?,?,?)`\n\t\tstmts.QueryInsertPolicyActionsRel = `INSERT IGNORE INTO am.ladon_policy_action_rel (policy, action) VALUES(?,?)`\n\t\tstmts.QueryInsertPolicyResources = `INSERT IGNORE INTO am.ladon_resource (id, template, compiled, has_regex) VALUES(?,?,?,?)`\n\t\tstmts.QueryInsertPolicyResourcesRel = `INSERT IGNORE INTO am.ladon_policy_resource_rel (policy, resource) VALUES(?,?)`\n\t\tstmts.QueryInsertPolicySubjects = `INSERT IGNORE INTO am.ladon_subject (id, template, compiled, has_regex) VALUES(?,?,?,?)`\n\t\tstmts.QueryInsertPolicySubjectsRel = `INSERT IGNORE INTO am.ladon_policy_subject_rel (policy, subject) VALUES(?,?)`\n\t\tstmts.QueryRequestCandidates = `\n\tSELECT\n\t\tp.id,\n\t\tp.effect,\n\t\tp.conditions,\n\t\tp.description,\n\t\tp.meta,\n\t\tsubject.template AS subject,\n\t\tresource.template AS resource,\n\t\taction.template AS action\n\tFROM\n\t\tam.ladon_policy AS p\n\n\t\tINNER JOIN am.ladon_policy_subject_rel AS rs ON rs.policy = p.id\n\t\tLEFT JOIN am.ladon_policy_action_rel AS ra ON ra.policy = p.id\n\t\tLEFT JOIN am.ladon_policy_resource_rel AS rr ON rr.policy = p.id\n\n\t\tINNER JOIN am.ladon_subject AS subject ON rs.subject = subject.id\n\t\tLEFT JOIN am.ladon_action AS action ON ra.action = action.id\n\t\tLEFT JOIN am.ladon_resource AS resource ON rr.resource = resource.id\n\tWHERE\n\t\t(subject.has_regex = 0 AND subject.template = ?)\n\t\tOR\n\t\t(subject.has_regex = 1 AND ? REGEXP BINARY subject.compiled)`\n\t\tstmts.GetQuery = `SELECT\n\t\tp.id, p.effect, p.conditions, p.description, p.meta,\n\t\tsubject.template as subject, resource.template as resource, action.template as action\n\tFROM\n\t\tam.ladon_policy as p\n\t\n\tLEFT JOIN am.ladon_policy_subject_rel as rs ON rs.policy = p.id\n\tLEFT JOIN am.ladon_policy_action_rel as ra ON ra.policy = p.id\n\tLEFT JOIN am.ladon_policy_resource_rel as rr ON rr.policy = p.id\n\t\n\tLEFT JOIN am.ladon_subject as subject ON rs.subject = subject.id\n\tLEFT JOIN am.ladon_action as action ON ra.action = action.id\n\tLEFT JOIN am.ladon_resource as resource ON rr.resource = resource.id\n\t\n\tWHERE p.id=?`\n\t\tstmts.GetAllQuery = `SELECT\n\tp.id, p.effect, p.conditions, p.description, p.meta,\n\tsubject.template as subject, resource.template as resource, action.template as action\nFROM\n\t(SELECT * from am.ladon_policy ORDER BY id LIMIT ? OFFSET ?) as p\n\nLEFT JOIN am.ladon_policy_subject_rel as rs ON rs.policy = p.id\nLEFT JOIN am.ladon_policy_action_rel as ra ON ra.policy = p.id\nLEFT JOIN am.ladon_policy_resource_rel as rr ON rr.policy = p.id\n\nLEFT JOIN am.ladon_subject as subject ON rs.subject = subject.id\nLEFT JOIN am.ladon_action as action ON ra.action = action.id\nLEFT JOIN am.ladon_resource as resource ON rr.resource = resource.id`\n\t\tstmts.DeletePolicy = \"DELETE FROM am.ladon_policy WHERE id=?\"\n\t\tstmts.QueryPoliciesForSubject = ``\n\t\tstmts.QueryPoliciesForResource = ``\n\tdefault:\n\t\treturn nil\n\t}\n\treturn stmts\n}", "func (*RunnableResourcePolicySet_Policy) Descriptor() ([]byte, []int) {\n\treturn file_cerbos_runtime_v1_runtime_proto_rawDescGZIP(), []int{1, 1}\n}", "func CollectNetworkData(context context.T) (data []model.NetworkData) {\n\tvar output, dataB []byte\n\tvar err error\n\tvar singleInterface NwInterface\n\tvar multipleInterfaces []NwInterface\n\n\tlog := context.Log()\n\n\tlog.Infof(\"Collecting all networking interfaces by executing command:\\n%v %v\", cmd, cmdArgsToGetListAllInterfaces)\n\n\tif output, err = cmdExecutor(cmd, cmdArgsToGetListAllInterfaces); err == nil {\n\t\tcmdOutput := string(output)\n\t\tlog.Debugf(\"Command output: %v\", cmdOutput)\n\n\t\t//windows command can either return a single network interface or an array of network interfaces\n\t\tif err = json.Unmarshal(output, &singleInterface); err == nil {\n\n\t\t\tdata = append(data, convertToNetworkData(singleInterface))\n\n\t\t} else if err = json.Unmarshal(output, &multipleInterfaces); err == nil {\n\n\t\t\tfor _, nwInterface := range multipleInterfaces {\n\t\t\t\tdata = append(data, convertToNetworkData(nwInterface))\n\t\t\t}\n\n\t\t} else {\n\t\t\tlog.Infof(\"Unable to get network interface info because of unexpected command output - %v\",\n\t\t\t\tcmdOutput)\n\t\t\treturn\n\t\t}\n\n\t\tdataB, _ = json.Marshal(data)\n\t\tlog.Debugf(\"Basic network interface data collected so far: %v\", jsonutil.Indent(string(dataB)))\n\n\t\t//collecting advanced network information for those interfaces\n\t\tdata = GetAdvancedNetworkData(context, data)\n\n\t} else {\n\t\tlog.Debugf(\"Failed to execute command : %v %v with error - %v\",\n\t\t\tcmd,\n\t\t\tcmdArgsToGetListAllInterfaces,\n\t\t\terr.Error())\n\t\tlog.Errorf(\"Command failed with error: %v\", string(output))\n\t\tlog.Infof(\"Unable to get network data on windows platform\")\n\t}\n\n\treturn\n}", "func (o *IPFixGen) getDataTemplateSets() layers.IPFixSets {\n\n\ttemplateEntry := layers.NewIPFixTemplate(o.templateID, o.templateFields)\n\tsetID := uint16(layers.IpfixTemplateSetIDVer10)\n\tif o.ipfixPlug.ver == 9 {\n\t\tsetID = layers.IpfixTemplateSetIDVer9\n\t}\n\n\treturn layers.IPFixSets{\n\t\tlayers.IPFixSet{\n\t\t\tID: setID,\n\t\t\tSetEntries: layers.IPFixSetEntries{\n\t\t\t\tlayers.IPFixSetEntry(templateEntry),\n\t\t\t},\n\t\t},\n\t}\n}", "func (o *StatsAppliancesListAllOf) GetData() []StatsAppliancesListAllOfData {\n\tif o == nil || o.Data == nil {\n\t\tvar ret []StatsAppliancesListAllOfData\n\t\treturn ret\n\t}\n\treturn *o.Data\n}", "func (*RunnablePrincipalPolicySet_Policy) Descriptor() ([]byte, []int) {\n\treturn file_cerbos_runtime_v1_runtime_proto_rawDescGZIP(), []int{4, 1}\n}", "func DatasetsHandler(metaCtors []model.MetadataStorageCtor) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar datasets []*model.Dataset\n\t\t// check for search terms\n\t\tterms, err := url.QueryUnescape(r.URL.Query().Get(\"search\"))\n\t\tif err != nil {\n\t\t\thandleError(w, errors.Wrap(err, \"Malformed datasets query\"))\n\t\t\treturn\n\t\t}\n\t\tfor _, ctor := range metaCtors {\n\t\t\t// get metadata client\n\t\t\tstorage, err := ctor()\n\t\t\tif err != nil {\n\t\t\t\thandleError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// if its present, forward a search, otherwise fetch all datasets\n\t\t\tvar datasetsPart []*model.Dataset\n\t\t\tif terms != \"\" {\n\t\t\t\tdatasetsPart, err = storage.SearchDatasets(terms, false, false)\n\t\t\t} else {\n\t\t\t\tdatasetsPart, err = storage.FetchDatasets(false, false)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\t//handleError(w, err)\n\t\t\t\tlog.Warnf(\"error querying dataset: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// render dataset description as HTML\n\t\t\tfor _, dataset := range datasetsPart {\n\t\t\t\tdataset.Description = renderMarkdown(dataset.Description)\n\t\t\t}\n\n\t\t\tdatasets = append(datasets, datasetsPart...)\n\t\t}\n\n\t\t// imported datasets override non-imported datasets\n\t\texists := make(map[string]*model.Dataset)\n\t\tfor _, dataset := range datasets {\n\t\t\texisting, ok := exists[dataset.ID]\n\t\t\tif !ok {\n\t\t\t\t// we don't have it, add it\n\t\t\t\texists[dataset.ID] = dataset\n\t\t\t} else {\n\t\t\t\t// we already have it, if it is `dataset`, replace it\n\t\t\t\tif existing.Provenance == datamart.ProvenanceNYU || existing.Provenance == datamart.ProvenanceISI {\n\t\t\t\t\texists[dataset.ID] = dataset\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tvar deconflicted []*model.Dataset\n\t\tfor _, dataset := range exists {\n\t\t\tdeconflicted = append(deconflicted, dataset)\n\t\t}\n\n\t\t// marshal data\n\t\terr = handleJSON(w, DatasetsResult{\n\t\t\tDatasets: deconflicted,\n\t\t})\n\t\tif err != nil {\n\t\t\thandleError(w, errors.Wrap(err, \"unable marshal dataset result into JSON\"))\n\t\t\treturn\n\t\t}\n\t}\n}", "func DatasetListNext(name string, cursor uint64) (string, uint64, DMUObjectSetStats, DatasetPropsWithSource, error) {\n\tcmd := &Cmd{\n\t\tCookie: cursor,\n\t}\n\tprops := make(DatasetPropsWithSource)\n\tif err := NvlistIoctl(zfsHandle.Fd(), ZFS_IOC_DATASET_LIST_NEXT, name, cmd, nil, props, nil); err != nil {\n\t\treturn \"\", 0, DMUObjectSetStats{}, props, err\n\t}\n\treturn delimitedBufToString(cmd.Name[:]), cmd.Cookie, cmd.Objset_stats, props, nil\n}", "func (o *PrivilegedBagData) GetData() map[string]string {\n\tif o == nil {\n\t\tvar ret map[string]string\n\t\treturn ret\n\t}\n\n\treturn o.Data\n}", "func (o DatasetAccessAuthorizedDatasetOutput) Dataset() DatasetAccessAuthorizedDatasetDatasetOutput {\n\treturn o.ApplyT(func(v DatasetAccessAuthorizedDataset) DatasetAccessAuthorizedDatasetDataset { return v.Dataset }).(DatasetAccessAuthorizedDatasetDatasetOutput)\n}", "func (o DataExchangeIamPolicyOutput) PolicyData() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DataExchangeIamPolicy) pulumi.StringOutput { return v.PolicyData }).(pulumi.StringOutput)\n}", "func (d *Driver) Status() [][2]string {\n\tparts := strings.Split(d.dataset.Name, \"/\")\n\tpool, err := zfs.GetZpool(parts[0])\n\n\tvar poolName, poolHealth string\n\tif err == nil {\n\t\tpoolName = pool.Name\n\t\tpoolHealth = pool.Health\n\t} else {\n\t\tpoolName = fmt.Sprintf(\"error while getting pool information %v\", err)\n\t\tpoolHealth = \"not available\"\n\t}\n\n\tquota := \"no\"\n\tif d.dataset.Quota != 0 {\n\t\tquota = strconv.FormatUint(d.dataset.Quota, 10)\n\t}\n\n\treturn [][2]string{\n\t\t{\"Zpool\", poolName},\n\t\t{\"Zpool Health\", poolHealth},\n\t\t{\"Parent Dataset\", d.dataset.Name},\n\t\t{\"Space Used By Parent\", strconv.FormatUint(d.dataset.Used, 10)},\n\t\t{\"Space Available\", strconv.FormatUint(d.dataset.Avail, 10)},\n\t\t{\"Parent Quota\", quota},\n\t\t{\"Compression\", d.dataset.Compression},\n\t}\n}", "func (*ListEnabledHTTPAccessLogPoliciesResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_http_access_log_policy_proto_rawDescGZIP(), []int{2}\n}", "func (o *IPFixGen) getDataSets() (layers.IPFixSets, uint32) {\n\tvar setEntries layers.IPFixSetEntries\n\tavailablePayload := o.availableRecordPayload\n\tlongestRecord := o.calcLongestRecord()\n\n\trecordsNumToSend := o.recordsNumToSent\n\tif o.ipfixPlug.stats.maxDataRecordsToSend > 0 {\n\t\trecordsNumToSend = uint32(math.Min(float64(o.recordsNumToSent),\n\t\t\tfloat64(o.ipfixPlug.stats.maxDataRecordsToSend-o.ipfixPlug.stats.recordsDataSent)))\n\t}\n\n\tfor i := uint32(0); i < recordsNumToSend; i++ {\n\t\tif availablePayload < longestRecord {\n\t\t\t// in case we don't have variable length fields this shouldn't happen\n\t\t\tbreak\n\t\t} else {\n\t\t\tdata := o.getDataRecord()\n\t\t\tavailablePayload -= len(data)\n\t\t\tsetEntries = append(setEntries, layers.IPFixSetEntry(&layers.IPFixRecord{Data: data}))\n\t\t}\n\t}\n\treturn layers.IPFixSets{\n\t\tlayers.IPFixSet{\n\t\t\tID: o.templateID,\n\t\t\tSetEntries: setEntries,\n\t\t},\n\t}, uint32(len(setEntries))\n}", "func FileDescriptorSet() *descriptor.FileDescriptorSet {\n\t// We just need ONE of the service names to look up the FileDescriptorSet.\n\tret, err := discovery.GetDescriptorSet(\"logdog.Admin\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}", "func FileDescriptorSet() *descriptor.FileDescriptorSet {\n\t// We just need ONE of the service names to look up the FileDescriptorSet.\n\tret, err := discovery.GetDescriptorSet(\"logdog.Admin\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}", "func (o DatasetAccessRoutinePtrOutput) DatasetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DatasetAccessRoutine) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.DatasetId\n\t}).(pulumi.StringPtrOutput)\n}", "func dataSourceSecurityGroupAttributes(d *schema.ResourceData, data map[string]interface{}) error {\n security_group_id := data[\"id\"].(string)\n log.Printf(\"[DEBUG] Retrieved twcc_security_group: %s\", security_group_id)\n\n d.SetId(security_group_id)\n d.Set(\"name\", data[\"name\"])\n security_group_rules := flattenSecurityGroupRulesInfo(data[\"security_group_rules\"].([]interface{}))\n d.Set(\"security_group_rules\", security_group_rules)\n\n return nil\n}", "func (l TestableLog) TestData() []settings.TestBundle {\n\treturn []settings.TestBundle{\n\t\t{\n\t\t\tDescription: \"Basic log\",\n\t\t\tData: templates.Args{\n\t\t\t\t\"Log\": \"This is the log\",\n\t\t\t\t\"Closed\": true,\n\t\t\t},\n\t\t},\n\t}\n}", "func (o *GuardianPolicyAddSuccess) GetData() []GuardianPolicyAddSuccessData {\n\tif o == nil || o.Data == nil {\n\t\tvar ret []GuardianPolicyAddSuccessData\n\t\treturn ret\n\t}\n\treturn *o.Data\n}", "func (s *BasePlSqlParserListener) ExitSet_container_data(ctx *Set_container_dataContext) {}", "func (*RunnablePrincipalPolicySet_Metadata) Descriptor() ([]byte, []int) {\n\treturn file_cerbos_runtime_v1_runtime_proto_rawDescGZIP(), []int{4, 0}\n}", "func (*FindEnabledHTTPFirewallRuleSetResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_http_firewall_rule_set_proto_rawDescGZIP(), []int{6}\n}", "func getDataPoints(pmd pmetric.Metric, metadata cWMetricMetadata, logger *zap.Logger) dataPoints {\n\tmetricMetadata := deltaMetricMetadata{\n\t\tadjustToDelta: false,\n\t\tretainInitialValueForDelta: metadata.retainInitialValueForDelta,\n\t\tmetricName: pmd.Name(),\n\t\tnamespace: metadata.namespace,\n\t\tlogGroup: metadata.logGroup,\n\t\tlogStream: metadata.logStream,\n\t}\n\n\tvar dps dataPoints\n\n\t//exhaustive:enforce\n\tswitch pmd.Type() {\n\tcase pmetric.MetricTypeGauge:\n\t\tmetric := pmd.Gauge()\n\t\tdps = numberDataPointSlice{\n\t\t\tmetricMetadata,\n\t\t\tmetric.DataPoints(),\n\t\t}\n\tcase pmetric.MetricTypeSum:\n\t\tmetric := pmd.Sum()\n\t\tmetricMetadata.adjustToDelta = metric.AggregationTemporality() == pmetric.AggregationTemporalityCumulative\n\t\tdps = numberDataPointSlice{\n\t\t\tmetricMetadata,\n\t\t\tmetric.DataPoints(),\n\t\t}\n\tcase pmetric.MetricTypeHistogram:\n\t\tmetric := pmd.Histogram()\n\t\tdps = histogramDataPointSlice{\n\t\t\tmetricMetadata,\n\t\t\tmetric.DataPoints(),\n\t\t}\n\tcase pmetric.MetricTypeExponentialHistogram:\n\t\tmetric := pmd.ExponentialHistogram()\n\t\tdps = exponentialHistogramDataPointSlice{\n\t\t\tmetricMetadata,\n\t\t\tmetric.DataPoints(),\n\t\t}\n\tcase pmetric.MetricTypeSummary:\n\t\tmetric := pmd.Summary()\n\t\t// For summaries coming from the prometheus receiver, the sum and count are cumulative, whereas for summaries\n\t\t// coming from other sources, e.g. SDK, the sum and count are delta by being accumulated and reset periodically.\n\t\t// In order to ensure metrics are sent as deltas, we check the receiver attribute (which can be injected by\n\t\t// attribute processor) from resource metrics. If it exists, and equals to prometheus, the sum and count will be\n\t\t// converted.\n\t\t// For more information: https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/prometheusreceiver/DESIGN.md#summary\n\t\tmetricMetadata.adjustToDelta = metadata.receiver == prometheusReceiver\n\t\tdps = summaryDataPointSlice{\n\t\t\tmetricMetadata,\n\t\t\tmetric.DataPoints(),\n\t\t}\n\tdefault:\n\t\tlogger.Warn(\"Unhandled metric data type.\",\n\t\t\tzap.String(\"DataType\", pmd.Type().String()),\n\t\t\tzap.String(\"Name\", pmd.Name()),\n\t\t\tzap.String(\"Unit\", pmd.Unit()),\n\t\t)\n\t}\n\n\treturn dps\n}", "func (o BigQueryDatasetSourceResponseOutput) Dataset() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BigQueryDatasetSourceResponse) string { return v.Dataset }).(pulumi.StringOutput)\n}", "func (s *BasePlSqlParserListener) EnterSet_container_data(ctx *Set_container_dataContext) {}", "func (m *DeviceHealthAttestationState) GetDataExcutionPolicy()(*string) {\n val, err := m.GetBackingStore().Get(\"dataExcutionPolicy\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (*ListDatasetsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_automl_v1_service_proto_rawDescGZIP(), []int{3}\n}", "func (o *PermissionOptionsPagination) GetData() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Data\n}", "func (*RunnableResourcePolicySet_Metadata) Descriptor() ([]byte, []int) {\n\treturn file_cerbos_runtime_v1_runtime_proto_rawDescGZIP(), []int{1, 0}\n}", "func dataset4release(dasquery dasql.DASQuery) []string {\n\tspec := dasquery.Spec\n\tinst := dasquery.Instance\n\tvar out []string\n\tapi := \"datasets\"\n\trelease := spec[\"release\"].(string)\n\tfurl := fmt.Sprintf(\"%s/%s?release_version=%s\", DBSUrl(inst), api, release)\n\tparent := spec[\"parent\"]\n\tif parent != nil {\n\t\tfurl = fmt.Sprintf(\"%s&parent_dataset=%s\", furl, parent.(string))\n\t}\n\tstatus := spec[\"status\"]\n\tif status != nil {\n\t\tfurl = fmt.Sprintf(\"%s&dataset_access_type=%s\", furl, status.(string))\n\t}\n\tclient := utils.HttpClient()\n\tresp := utils.FetchResponse(client, furl, \"\") // \"\" specify optional args\n\trecords := DBSUnmarshal(api, resp.Data)\n\tfor _, rec := range records {\n\t\tif rec[\"name\"] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tdataset := rec[\"name\"].(string)\n\t\tif !utils.InList(dataset, out) {\n\t\t\tout = append(out, dataset)\n\t\t}\n\t}\n\treturn out\n}", "func Analysis(event *api.Event, dmanager *docker.Manager) error {\n\timagename, ok := event.Data[\"image-name\"]\n\ttagname, ok2 := event.Data[\"tag-name\"]\n\n\tif !ok || !ok2 {\n\t\treturn fmt.Errorf(\"Unable to retrieve image name\")\n\t}\n\timageName := imagename.(string) + \":\" + tagname.(string)\n\n\tvulnerabilities, err := AnalysisImage(dmanager, imageName)\n\n\tif err != nil {\n\t\tlog.Errorf(\"clair analysis %s err: %v\", imageName, err)\n\t\treturn err\n\t}\n\tevent.Version.SecurityCheck = true\n\tfor _, vulnerability := range vulnerabilities {\n\t\tif vulnerability.Severity == string(Medium) ||\n\t\t\tvulnerability.Severity == string(High) ||\n\t\t\tvulnerability.Severity == string(Critical) ||\n\t\t\tvulnerability.Severity == string(Defcon1) {\n\t\t\tsecurity := api.Security{}\n\t\t\tsecurity.Name = vulnerability.Name\n\t\t\tsecurity.Description = vulnerability.Description\n\t\t\tsecurity.Severity = vulnerability.Severity\n\t\t\tevent.Version.SecurityInfo = append(event.Version.SecurityInfo, security)\n\t\t}\n\t}\n\n\treturn nil\n}", "func dataToSg(name string, d *schema.ResourceData) go_thunder.ServiceGroup {\n\t//\tlogger := util.GetLoggerInstance()\n\tvar s go_thunder.ServiceGroup\n\n\tvar sInstance go_thunder.ServiceGroupInstance\n\n\tsInstance.ConnRate = d.Get(\"conn_rate\").(int)\n\tsInstance.ResetOnServerSelectionFail = d.Get(\"reset_on_server_selection_fail\").(int)\n\tsInstance.HealthCheckDisable = d.Get(\"health_check_disable\").(int)\n\tsInstance.Protocol = d.Get(\"protocol\").(string)\n\tsInstance.TrafficReplicationMirrorIPRepl = d.Get(\"traffic_replication_mirror_ip_repl\").(int)\n\tsInstance.ResetPriorityAffinity = d.Get(\"reset_priority_affinity\").(int)\n\tsInstance.MinActiveMember = d.Get(\"min_active_member\").(int)\n\tsInstance.StatsDataAction = d.Get(\"stats_data_action\").(string)\n\tsInstance.TrafficReplicationMirrorDaRepl = d.Get(\"traffic_replication_mirror_da_repl\").(int)\n\tsInstance.TemplatePolicyShared = d.Get(\"template_policy_shared\").(string)\n\tsInstance.RptExtServer = d.Get(\"rpt_ext_server\").(int)\n\tsInstance.TemplatePort = d.Get(\"template_port\").(string)\n\tsInstance.ConnRateGracePeriod = d.Get(\"conn_rate_grace_period\").(int)\n\tsInstance.L4SessionUsageDuration = d.Get(\"l4_session_usage\").(int)\n\tsInstance.UUID = d.Get(\"uuid\").(string)\n\tsInstance.BackupServerEventLog = d.Get(\"backup_server_event_log\").(int)\n\tsInstance.LcMethod = d.Get(\"lc_method\").(string)\n\tsInstance.PseudoRoundRobin = d.Get(\"pseudo_round_robin\").(int)\n\tsInstance.SharedPartitionPolicyTemplate = d.Get(\"shared_partition_policy_template\").(int)\n\tsInstance.L4SessionUsageRevertRate = d.Get(\"l4_session_usage_revert_rate\").(int)\n\tsInstance.SharedPartitionSvcgrpHealthCheck = d.Get(\"shared_partition_svcgrp_health_check\").(int)\n\tsInstance.TemplateServer = d.Get(\"template_server\").(string)\n\tsInstance.SvcgrpHealthCheckShared = d.Get(\"svcgrp_health_check_shared\").(string)\n\tsInstance.TrafficReplicationMirror = d.Get(\"traffic_replication_mirror\").(int)\n\tsInstance.L4SessionRevertDuration = d.Get(\"l4_session_revert_duration\").(int)\n\tsInstance.TrafficReplicationMirrorSaDaRepl = d.Get(\"traffic_replication_mirror_sa_da_repl\").(int)\n\tsInstance.LbMethod = d.Get(\"lb_method\").(string)\n\tsInstance.StatelessAutoSwitch = d.Get(\"stateless_auto_switch\").(int)\n\tsInstance.MinActiveMemberAction = d.Get(\"min_active_member_action\").(string)\n\tsInstance.L4SessionUsage = d.Get(\"l4_session_usage\").(int)\n\tsInstance.ExtendedStats = d.Get(\"extended_stats\").(int)\n\tsInstance.ConnRateRevertDuration = d.Get(\"conn_rate_revert_duration\").(int)\n\tsInstance.StrictSelect = d.Get(\"strict_select\").(int)\n\tsInstance.Name = d.Get(\"name\").(string)\n\tsInstance.TrafficReplicationMirrorSaRepl = d.Get(\"traffic_replication_mirror_sa_repl\").(int)\n\tsInstance.ReportDelay = d.Get(\"report_delay\").(int)\n\tsInstance.ConnRateLog = d.Get(\"conn_rate_log\").(int)\n\tsInstance.L4SessionUsageLog = d.Get(\"l4_session_usage_log\").(int)\n\tsInstance.ConnRateDuration = d.Get(\"conn_rate_duration\").(int)\n\tsInstance.StatelessLbMethod = d.Get(\"stateless_lb_method\").(string)\n\tsInstance.TemplatePolicy = d.Get(\"template_policy\").(string)\n\tsInstance.StatelessLbMethod2 = d.Get(\"stateless_lb_method2\").(string)\n\tsInstance.UserTag = d.Get(\"user_tag\").(string)\n\tsInstance.SampleRspTime = d.Get(\"sample_rsp_time\").(int)\n\tsInstance.TopFastest = d.Get(\"top_fastest\").(int)\n\tsInstance.ConnRevertRate = d.Get(\"conn_revert_rate\").(int)\n\tsInstance.L4SessionUsageGracePeriod = d.Get(\"l4_session_usage_grace_period\").(int)\n\tsInstance.PriorityAffinity = d.Get(\"priority_affinity\").(int)\n\tsInstance.TopSlowest = d.Get(\"top_slowest\").(int)\n\tsInstance.HealthCheck = d.Get(\"health_check\").(string)\n\n\tpriorityCount := d.Get(\"priorities.#\").(int)\n\tsInstance.Priority = make([]go_thunder.Priorities, 0, priorityCount)\n\tfor i := 0; i < priorityCount; i++ {\n\t\tvar pr go_thunder.Priorities\n\t\tprefix := fmt.Sprintf(\"priorities.%d\", i)\n\t\tpr.Priority = d.Get(prefix + \".priority\").(int)\n\t\tpr.PriorityAction = d.Get(prefix + \".priority_action\").(string)\n\n\t\tsInstance.Priority = append(sInstance.Priority, pr)\n\t}\n\n\tsamplingCount := d.Get(\"sampling_enable.#\").(int)\n\tsInstance.Counters1 = make([]go_thunder.SamplingEnable, 0, samplingCount)\n\tfor i := 0; i < samplingCount; i++ {\n\t\tvar sm go_thunder.SamplingEnable\n\t\tprefix := fmt.Sprintf(\"sampling_enable.%d\", i)\n\t\tsm.Counters1 = d.Get(prefix + \".counters1\").(string)\n\n\t\tsInstance.Counters1 = append(sInstance.Counters1, sm)\n\t}\n\n\t//NEED TO FIGURE OUT IF VALUE IS PROVIDED IN TF FILE OR DEFAULT IS BEING USED\n\t//\tvar as Reset\n\t//\tas.AutoSwitch = d.Get(\"reset.0.auto_switch\").(int)\n\t//\tlogger.Println(\"[INFO] Auto switch is- \", d.Get(\"reset.0.auto_switch\").(int))\n\t//\tsInstance.AutoSwitch = as\n\n\tmemberCount := d.Get(\"member_list.#\").(int)\n\tsInstance.Host = make([]go_thunder.MemberList, 0, memberCount)\n\tfor i := 0; i < memberCount; i++ {\n\t\tvar ml go_thunder.MemberList\n\t\tprefix := fmt.Sprintf(\"member_list.%d\", i)\n\t\tml.FqdnName = d.Get(prefix + \".fqdn_name\").(string)\n\t\tml.Host = d.Get(prefix + \".host\").(string)\n\t\tml.MemberPriority = d.Get(prefix + \".member_priority\").(int)\n\t\tml.MemberState = d.Get(prefix + \".member_state\").(string)\n\t\tml.MemberStatsDataDisable = d.Get(prefix + \".member_stats_data_disable\").(int)\n\t\tml.MemberTemplate = d.Get(prefix + \".member_template\").(string)\n\t\tml.Name = d.Get(prefix + \".name\").(string)\n\t\tml.Port = d.Get(prefix + \".port\").(int)\n\t\tml.ResolveAs = d.Get(prefix + \".resolve_as\").(string)\n\t\tml.ServerIpv6Addr = d.Get(prefix + \".server_ipv6_addr\").(string)\n\t\tml.UUID = d.Get(prefix + \".uuid\").(string)\n\t\tml.UserTag = d.Get(prefix + \".user_tag\").(string)\n\n\t\tsampleCount := d.Get(prefix + \".sampling_enable.#\").(int)\n\t\tml.Counters1 = make([]go_thunder.SamplingEnable, sampleCount, sampleCount)\n\n\t\tfor x := 0; x < sampleCount; x++ {\n\t\t\tvar s go_thunder.SamplingEnable\n\t\t\tmapEntity(d.Get(fmt.Sprintf(\"%s.sampling_enable.%d\", prefix, x)).(map[string]interface{}), &s)\n\t\t\tml.Counters1[x] = s\n\t\t}\n\n\t\tsInstance.Host = append(sInstance.Host, ml)\n\t}\n\n\ts.Name = sInstance\n\n\treturn s\n}", "func (o OutputTypeOutput) Datasource() pulumi.AnyOutput {\n\treturn o.ApplyT(func(v OutputType) interface{} { return v.Datasource }).(pulumi.AnyOutput)\n}", "func (p *ProviderData) Data() *ProviderData { return p }", "func (b TestableBuild) TestData() []settings.TestBundle {\n\tbasic := resp.MiloBuild{\n\t\tSummary: resp.BuildComponent{\n\t\t\tLabel: \"Test swarming build\",\n\t\t\tStatus: resp.Success,\n\t\t\tStarted: time.Date(2016, 1, 2, 15, 4, 5, 999999999, time.UTC),\n\t\t\tFinished: time.Date(2016, 1, 2, 15, 4, 6, 999999999, time.UTC),\n\t\t\tDuration: time.Second,\n\t\t},\n\t}\n\tresults := []settings.TestBundle{\n\t\t{\n\t\t\tDescription: \"Basic successful build\",\n\t\t\tData: templates.Args{\"Build\": basic},\n\t\t},\n\t}\n\tc := context.Background()\n\tc, _ = testclock.UseTime(c, time.Date(2016, time.March, 14, 11, 0, 0, 0, time.UTC))\n\tfor _, tc := range getTestCases() {\n\t\tbuild, err := swarmingBuildImpl(c, \"foo\", \"debug\", tc)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"Error while processing %s: %s\", tc, err))\n\t\t}\n\t\tresults = append(results, settings.TestBundle{\n\t\t\tDescription: tc,\n\t\t\tData: templates.Args{\"Build\": build},\n\t\t})\n\t}\n\treturn results\n}", "func (*FindEnabledHTTPFirewallRuleSetConfigResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_http_firewall_rule_set_proto_rawDescGZIP(), []int{4}\n}", "func (e Executor) Gather() (data []interface{}, failures []error) {\n\tif e.Output == nil {\n\t\te.Output = ioutil.Discard\n\t}\n\tdata = make([]interface{}, 0, len(e.On))\n\tfailures = make([]error, 0)\n\n\tresponses, err := e.run(true)\n\tif err != nil {\n\t\tfailures = append(failures, err)\n\t\treturn\n\t}\n\tfor i := range responses {\n\t\tif responses[i].Error != nil {\n\t\t\tfailures = append(failures, responses[i].Error)\n\t\t\tcontinue\n\t\t}\n\t\tdatum := responses[i].Data\n\t\tif datum == nil {\n\t\t\tfailures = append(failures, errors.New(fmt.Sprintf(\"Response %d did not return any data\", i)))\n\t\t\tcontinue\n\t\t}\n\t\tdata = append(data, datum)\n\t}\n\treturn\n}", "func ReturnEachTypeOfVulnerability() []horusec.Vulnerability {\n\treturn []horusec.Vulnerability{\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.GoSec,\n\t\t\tLanguage: languages.Go,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.SecurityCodeScan,\n\t\t\tLanguage: languages.DotNet,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.Brakeman,\n\t\t\tLanguage: languages.Ruby,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.NpmAudit,\n\t\t\tLanguage: languages.Javascript,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.YarnAudit,\n\t\t\tLanguage: languages.Javascript,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.Bandit,\n\t\t\tLanguage: languages.Python,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.Bandit,\n\t\t\tLanguage: languages.Python,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tLanguage: languages.Leaks,\n\t\t\tSeverity: severity.High,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.GitLeaks,\n\t\t\tLanguage: languages.Leaks,\n\t\t\tSeverity: severity.High,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.SpotBugs,\n\t\t\tLanguage: languages.Java,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t\t{\n\t\t\tLine: \"\",\n\t\t\tColumn: \"\",\n\t\t\tConfidence: \"\",\n\t\t\tFile: \"\",\n\t\t\tCode: \"\",\n\t\t\tDetails: \"\",\n\t\t\tSecurityTool: tools.SpotBugs,\n\t\t\tLanguage: languages.Kotlin,\n\t\t\tSeverity: severity.Low,\n\t\t},\n\t}\n}", "func getDataInstances(repo Repo, config dvid.Config) ([]DataService, error) {\n\t// Since we can have names separated by commas, split them\n\tnamesString, found, err := config.GetString(\"data\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdatanames := strings.Split(namesString, \",\")\n\n\tvar datalist []DataService\n\tif !found || len(datanames) == 0 {\n\t\t// use all data instances\n\t\tdata, err := repo.GetAllData()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, dataservice := range data {\n\t\t\tdatalist = append(datalist, dataservice)\n\t\t}\n\t\treturn datalist, nil\n\t}\n\t// use only those data instances given\n\tfor _, name := range datanames {\n\t\tdataservice, err := repo.GetDataByName(dvid.DataString(name))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdatalist = append(datalist, dataservice)\n\t}\n\treturn datalist, nil\n}", "func FileDescriptorSet() *descriptor.FileDescriptorSet {\n\t// We just need ONE of the service names to look up the FileDescriptorSet.\n\tret, err := discovery.GetDescriptorSet(\"admin.Driver\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}", "func (o LookupCaPoolIamPolicyResultOutput) PolicyData() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupCaPoolIamPolicyResult) string { return v.PolicyData }).(pulumi.StringOutput)\n}", "func (*ComputedUserset) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_namespace_proto_rawDescGZIP(), []int{7}\n}", "func CollectBillingInfoData(context context.T) (data []model.BillingInfoData) {\n\n\tlog := context.Log()\n\n\tlog.Infof(\"Getting %v data\", GathererName)\n\n\tif isOnPremInstance(context.Identity()) {\n\t\tlog.Infof(\"Do not call Billing info, On-Premises instance\")\n\t\treturn\n\t}\n\n\tidentityDocument, err := queryIdentityDocument()\n\tif err != nil {\n\t\tlog.Errorf(\"GetInstanceIdentityDocument failed with error: %v\", err.Error())\n\t\treturn\n\t}\n\tlog.Infof(\"Instance identity document %v\", identityDocument)\n\n\tdata = parseInstanceIdentityDocumentOutput(context, identityDocument)\n\tlog.Infof(\"Parsed BillingInfo output data %v\", data)\n\treturn data\n\n}", "func (*ReadResponse_Tupleset) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_acl_service_proto_rawDescGZIP(), []int{2, 0}\n}", "func (e *Exporter) gatherData() ([]*Datum, error) {\n\n\tdata := []*Datum{}\n\n\tresponses, err := asyncHTTPGets(e.TargetURLs, e.APIToken)\n\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\tfor _, response := range responses {\n\n\t\t// Github can at times present an array, or an object for the same data set.\n\t\t// This code checks handles this variation.\n\t\tif isArray(response.body) {\n\t\t\tds := []*Datum{}\n\t\t\tjson.Unmarshal(response.body, &ds)\n\t\t\tdata = append(data, ds...)\n\t\t}\n\n\t\tlog.Infof(\"API data fetched for environment: %s\", response.targetURL.Environment)\n\t}\n\n\t//return data, rates, err\n\treturn data, nil\n\n}", "func (o DatasourceSetOutput) DatasourceType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasourceSet) *string { return v.DatasourceType }).(pulumi.StringPtrOutput)\n}", "func (C *Commander) GetData(writer http.ResponseWriter, request *http.Request) {\n\tvar error model.Error\n\tdb := database.DbConn()\n\tdefer func() {\n\t\terr := db.Close()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t}()\n\tif strings.Contains(Role, \"program manager\") == true {\n\t\tvar Offset int\n\t\tPages := request.URL.Query()[\"Pages\"]\n\t\tfmt.Println(Pages)\n\t\tif Pages[0] != \"\" {\n\t\t\tlimit, err := strconv.Atoi(request.URL.Query().Get(\"limit\"))\n\t\t\tif limit != 10 && limit != 20 && limit != 50 {\n\t\t\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t\t\terror.Message = \"Incorrect Limit Value\"\n\t\t\t\tjson.NewEncoder(writer).Encode(error)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ti1, _ := strconv.Atoi(Pages[0])\n\t\t\tfmt.Println(i1)\n\t\t\tOffset = 10 * i1\n\t\t\tcount, _ := db.Query(\"SELECT COUNT(Id) FROM sub_project_manager WHERE sub_project_id in (SELECT id FROM sub_project WHERE project_id in (SELECT id FROM project WHERE program_manager_id in (SELECT id FROM program_manager where program_manager_email = ?)))\", UserName)\n\t\t\tdefer func() {\n\t\t\t\terr := count.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t}\n\t\t\t}()\n\t\t\tGetManagerDetails, err := db.Query(\"call GetAllManagerDetailsData(?, ?, ?)\", UserName, Offset, limit)\n\t\t\tif err != nil {\n\t\t\t\tWriteLogFile(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\terr := GetManagerDetails.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t}\n\t\t\t}()\n\t\t\tvar Total int\n\t\t\tvar ManagerDetailData model.Project\n\t\t\tvar ManagerDetailsData []model.Project\n\t\t\tfor GetManagerDetails.Next() {\n\t\t\t\tGetManagerDetails.Scan(&ManagerDetailData.ProjectName, &ManagerDetailData.SubProjectName, &ManagerDetailData.ManagerName, &ManagerDetailData.ManagerEmailID, &ManagerDetailData.Id)\n\t\t\t\tManagerDetailsData = append(ManagerDetailsData, ManagerDetailData)\n\t\t\t}\n\t\t\tif count.Next() != false {\n\t\t\t\tcount.Scan(&Total)\n\t\t\t} else {\n\t\t\t\tTotal = 0\n\t\t\t}\n\t\t\tvar PaginationFormat model.Pagination\n\t\t\tPaginationFormat.TotalData = Total\n\t\t\tPaginationFormat.Limit = limit\n\t\t\tPaginationFormat.Data = ManagerDetailsData\n\t\t\tx1 := Total / limit\n\t\t\tx := Total % limit\n\t\t\tif x == 0 {\n\t\t\t\tPaginationFormat.TotalPages = x1\n\t\t\t} else {\n\t\t\t\tPaginationFormat.TotalPages = x1 + 1\n\t\t\t}\n\t\t\tx, _ = strconv.Atoi(Pages[0])\n\t\t\tif PaginationFormat.TotalPages != 0 {\n\t\t\t\tx1 = x + 1\n\t\t\t}\n\t\t\tPaginationFormat.Page = x1\n\t\t\tsetupResponse(&writer, request)\n\t\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\twriter.WriteHeader(http.StatusOK)\n\t\t\tjson.NewEncoder(writer).Encode(PaginationFormat)\n\t\t} else {\n\t\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t\terror.Message = \"Incorrect Page Value\"\n\t\t\tjson.NewEncoder(writer).Encode(error)\n\t\t\treturn\n\n\t\t}\n\t} else {\n\t\twriter.WriteHeader(http.StatusNotFound)\n\t}\n}", "func DatasetHandler(ctor model.MetadataStorageCtor) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// get dataset name\n\t\tdataset := pat.Param(r, \"dataset\")\n\n\t\t// get metadata client\n\t\tstorage, err := ctor()\n\t\tif err != nil {\n\t\t\thandleError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\t// get dataset summary\n\t\tres, err := storage.FetchDataset(dataset, false, false)\n\t\tif err != nil {\n\t\t\thandleError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\t// marshal data\n\t\terr = handleJSON(w, DatasetResult{\n\t\t\tDataset: res,\n\t\t})\n\t\tif err != nil {\n\t\t\thandleError(w, errors.Wrap(err, \"unable marshal dataset result into JSON\"))\n\t\t\treturn\n\t\t}\n\t}\n}", "func (o OutputResponseOutput) Datasource() pulumi.AnyOutput {\n\treturn o.ApplyT(func(v OutputResponse) interface{} { return v.Datasource }).(pulumi.AnyOutput)\n}", "func (o *PrivilegedTextDataAllOf) GetData() string {\n\tif o == nil || o.Data == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Data\n}", "func (o JobIamPolicyOutput) PolicyData() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *JobIamPolicy) pulumi.StringOutput { return v.PolicyData }).(pulumi.StringOutput)\n}", "func (o DatasetAccessAuthorizedDatasetDatasetOutput) DatasetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DatasetAccessAuthorizedDatasetDataset) string { return v.DatasetId }).(pulumi.StringOutput)\n}", "func (m *metadata) GetData() (result map[string]interface{}) {\n\tresult = map[string]interface{}{}\n\n\tif m.name != \"\" {\n\t\tresult[\"name\"] = m.name\n\t}\n\n\tif m.description != \"\" {\n\t\tresult[\"description\"] = m.description\n\t}\n\n\tif len(m.actions) > 0 {\n\t\tresult[\"actions\"] = m.actions\n\t}\n\n\treturn\n}", "func (client *DatasetClient) getDatasetsByWorkspaceHandleResponse(resp *azcore.Response) (DatasetListResponseResponse, error) {\n\tvar val *DatasetListResponse\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DatasetListResponseResponse{}, err\n\t}\n\treturn DatasetListResponseResponse{RawResponse: resp.Response, DatasetListResponse: val}, nil\n}", "func (*RunnableDerivedRolesSet) Descriptor() ([]byte, []int) {\n\treturn file_cerbos_runtime_v1_runtime_proto_rawDescGZIP(), []int{3}\n}", "func (req *Request) DataList() []Data {\n\treturn []Data{req}\n}", "func (o LookupConsentStoreIamPolicyResultOutput) PolicyData() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupConsentStoreIamPolicyResult) string { return v.PolicyData }).(pulumi.StringOutput)\n}", "func (o JobExtractSourceTableOutput) DatasetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v JobExtractSourceTable) *string { return v.DatasetId }).(pulumi.StringPtrOutput)\n}", "func FileDescriptorSet() *descriptorpb.FileDescriptorSet {\n\t// We just need ONE of the service names to look up the FileDescriptorSet.\n\tret, err := discovery.GetDescriptorSet(\"luci.resultdb.v1.Deriver\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}", "func (d *Driver) GetMetadata(id string) (map[string]string, error) {\n\tlogrus.Debugf(\"secureoverlay2: GetMetadata called w. id: %s\", id)\n\tdir := d.dir(id)\n\tif _, err := os.Stat(dir); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetadata := map[string]string{\n\t\t\"WorkDir\": path.Join(dir, \"work\"),\n\t\t\"MergedDir\": path.Join(dir, \"merged\"),\n\t\t\"UpperDir\": path.Join(dir, \"diff\"),\n\t}\n\n\tlowerDirs, err := d.getLowerDirs(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(lowerDirs) > 0 {\n\t\tmetadata[\"LowerDir\"] = strings.Join(lowerDirs, \":\")\n\t}\n\n\t// additional data\n\ts, err := d.getSecurityMetaDataForID(id, \"\")\n\tswitch {\n\tcase err == nil:\n\t\tif s.RequiresConfidentiality {\n\t\t\t// embedd security meta-data if it is a secured image.\n\t\t\t// Note: only including it for secured images allows non-secured images still\n\t\t\t// to work with Manifest Schema 1 of registry. For secured images, in particular with\n\t\t\t// integrity, Schema 2 is essential to get the secure content-addressable nature of the image.\n\n\t\t\t// do some clean-up of unneeded params to declutter config/docker history\n\t\t\tif !s.RequiresConfidentiality {\n\t\t\t\ts.KeyHandle = \"\"\n\t\t\t\ts.KeyType = \"\"\n\t\t\t\ts.KeyTypeOption = \"\"\n\t\t\t\ts.KeyDesc = \"\"\n\t\t\t\ts.KeySize = \"\"\n\t\t\t\ts.KeyFilePath = \"\"\n\t\t\t\ts.CryptCipher = \"\"\n\t\t\t}\n\t\t\tbytes, _ := s.Encode()\n\t\t\tlogrus.Debugf(\"secureoverlay2: GetMetadata, adding (encoded) security meta-data %s\", s)\n\t\t\tmetadata[\"security-meta-data\"] = string(bytes)\n\t\t} else {\n\t\t\tctx = context.WithValue(context.TODO(), \"\", \"\")\n\t\t\tlogrus.Debug(\"secureoverlay2: GetMetadata, security meta-data indicates unsecured layer, skip security meta data addition\")\n\t\t}\n\tcase os.IsNotExist(err):\n\t\tlogrus.Debugf(\"secureoverlay2: GetMetadata, no security meta-data found to be added: %v\", err)\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\tlogrus.Debugf(\"secureoverlay2: GetMetadata return w. metadata: %v\", metadata)\n\n\treturn metadata, nil\n}", "func FileDescriptorSet() *descriptor.FileDescriptorSet {\n\t// We just need ONE of the service names to look up the FileDescriptorSet.\n\tret, err := discovery.GetDescriptorSet(\"admin.Config\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}", "func (o DatasetAccessTypeOutput) Dataset() DatasetAccessDatasetPtrOutput {\n\treturn o.ApplyT(func(v DatasetAccessType) *DatasetAccessDataset { return v.Dataset }).(DatasetAccessDatasetPtrOutput)\n}", "func ExtractAllData() (AllDataStruct, int) {\n\tvar wifiData, lengthWiFiData = ExtractWifiData()\n\tvar browserData, lengthBrowserData = ExtractBrowserCredentials()\n\tvar credmanData, lengthCredmanData = ExtractCredmanData()\n\tvar sysadminData = ExtractSysadminData()\n\n\tvar outDataStruct AllDataStruct\n\n\tif lengthWiFiData > 0 {\n\t\toutDataStruct.WifiData = wifiData\n\t}\n\tif lengthBrowserData > 0 {\n\t\toutDataStruct.BrowserData = browserData\n\t}\n\tif lengthCredmanData > 0 {\n\t\toutDataStruct.CredmanData = credmanData\n\t}\n\toutDataStruct.SysadminData = sysadminData\n\n\treturn outDataStruct, lengthCredmanData + lengthBrowserData + lengthWiFiData\n}", "func GetAllData() string {\n\tlogger.Info(\"Trying to get all data from wechat...\")\n\theaders := GenerateWechatRequestHeaders()\n\tresp, err := req.Get(GET_ALL_DATA_URL, headers)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t}\n\tdefer resp.Response().Body.Close()\n\n\tb, _ := ioutil.ReadAll(resp.Response().Body)\n\ttext := string(b)\n\t//匹配wx.cgiData=开头,seajs.use结尾的字符串,需要再把开头和结尾去掉\n\tregExp := `(wx.cgiData=)([\\w\\W]*)(seajs.use)`\n\treg := regexp.MustCompile(regExp)\n\tmatchedText := string(reg.Find([]byte(text)))\n\tmatchedText = strings.ReplaceAll(matchedText, \"wx.cgiData=\", \"\")\n\tmatchedText = strings.ReplaceAll(matchedText, \"seajs.use\", \"\")\n\tmatchedText = strings.ReplaceAll(matchedText, \";\", \"\")\n\tlogger.Info(\"Find matched text\\n %s\", matchedText)\n\tfilename := path.Join(path.Dir(BASE_DIR), \"/assets/users_original.txt\")\n\tlogger.Info(\"Save it into file %s\", filename)\n\tioutil.WriteFile(filename, []byte(matchedText), 0644)\n\treturn filename\n}", "func (d *Driver) getAllData() (map[string][]byte, error) {\n\t// check if the db file exists\n\t_, err := os.Stat(d.secretsDataFilePath)\n\tif err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\t// the file will be created later on a store()\n\t\t\treturn make(map[string][]byte), nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tfile, err := os.Open(d.secretsDataFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tbyteValue, err := io.ReadAll(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsecretData := new(map[string][]byte)\n\terr = json.Unmarshal([]byte(byteValue), secretData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn *secretData, nil\n}", "func (acir *awsContainerInsightReceiver) collectData(ctx context.Context) error {\n\tvar mds []pmetric.Metrics\n\tif acir.cadvisor == nil && acir.k8sapiserver == nil {\n\t\terr := errors.New(\"both cadvisor and k8sapiserver failed to start\")\n\t\tacir.settings.Logger.Error(\"Failed to collect stats\", zap.Error(err))\n\t\treturn err\n\t}\n\n\tif acir.cadvisor != nil {\n\t\tmds = append(mds, acir.cadvisor.GetMetrics()...)\n\t}\n\n\tif acir.k8sapiserver != nil {\n\t\tmds = append(mds, acir.k8sapiserver.GetMetrics()...)\n\t}\n\n\tfor _, md := range mds {\n\t\terr := acir.nextConsumer.ConsumeMetrics(ctx, md)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o SchematizedDataResponseOutput) Data() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SchematizedDataResponse) string { return v.Data }).(pulumi.StringOutput)\n}", "func GetStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) ([]experimentTypes.ApplicationUnderTest, error) {\n\n\tstatefulsetList, err := appsv1StatefulsetClient.List(metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel})\n\tif err != nil || len(statefulsetList.Items) == 0 {\n\t\treturn nil, errors.Errorf(\"Unable to find the statefulsets with matching labels, err: %v\", err)\n\t}\n\n\tappsUnderTest := []experimentTypes.ApplicationUnderTest{}\n\tfor _, app := range statefulsetList.Items {\n\t\tlog.Infof(\"[DeploymentDetails]: Found statefulset name %s with replica count %d\", app.Name, int(*app.Spec.Replicas))\n\t\tappsUnderTest = append(appsUnderTest, experimentTypes.ApplicationUnderTest{AppName: app.Name, ReplicaCount: int(*app.Spec.Replicas)})\n\t}\n\t// Applying the APP_AFFECT_PERC variable to determine the total target deployments to scale\n\treturn getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails)\n}", "func PackageInfo(cmdstr string, name string) ([]string, error) {\n\tvar result []string\n\tswitch cmdstr {\n\tcase \"cfg-files\": // returns a list of the package configuration files.\n\t\toutput, rc, err := ExecCommand(\"/bin/rpm\", \"-q\", \"-c\", name)\n\t\tif rc == 1 { // rpm package for 'name' was not found.\n\t\t\treturn nil, nil\n\t\t} else if rc == 0 { // add the package name the file belongs to.\n\t\t\tlog.Debugf(\"output from cfg-files query: %s\\n\", string(output))\n\t\t\tfiles := strings.Split(string(output), \"\\n\")\n\t\t\tfor ii := range files {\n\t\t\t\tresult = append(result, strings.TrimSpace(files[ii]))\n\t\t\t}\n\t\t\tlog.Debugf(\"result length: %d, result: %s\\n\", len(result), string(output))\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"file-query\": // returns the rpm name that owns the file 'name'\n\t\toutput, rc, err := ExecCommand(\"/bin/rpm\", \"-q\", \"-f\", name)\n\t\tif rc == 1 { // file is not part of any package.\n\t\t\treturn nil, nil\n\t\t} else if rc == 0 { // add the package name the file belongs to.\n\t\t\tlog.Debugf(\"output from file-query: %s\\n\", string(output))\n\t\t\tresult = append(result, string(strings.TrimSpace(string(output))))\n\t\t\tlog.Debugf(\"result length: %d, result: %s\\n\", len(result), string(output))\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"pkg-provides\": // returns the package name that provides 'name'\n\t\toutput, rc, err := ExecCommand(\"/bin/rpm\", \"-q\", \"--whatprovides\", name)\n\t\tlog.Debugf(\"pkg-provides - name: %s, output: %s\\n\", name, output)\n\t\tif rc == 1 { // no package provides 'name'\n\t\t\treturn nil, nil\n\t\t} else if rc == 0 {\n\t\t\tpkgs := strings.Split(string(output), \"\\n\")\n\t\t\tfor ii := range pkgs {\n\t\t\t\tresult = append(result, strings.TrimSpace(pkgs[ii]))\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\treturn nil, errors.New(\"rpm -q --whatprovides '\" + name + \"' returned: \" + err.Error())\n\t\t}\n\tcase \"pkg-query\": // returns the package name for 'name'.\n\t\toutput, rc, err := ExecCommand(\"/bin/rpm\", \"-q\", name)\n\t\tif rc == 1 { // the package is not installed.\n\t\t\treturn nil, nil\n\t\t} else if rc == 0 { // add the rpm name\n\t\t\tresult = append(result, string(strings.TrimSpace(string(output))))\n\t\t} else if err != nil {\n\t\t\treturn nil, errors.New(\"rpm -q '\" + name + \"' returned: \" + err.Error())\n\t\t}\n\tcase \"pkg-requires\": // returns a list of packages that requires package 'name'\n\t\toutput, rc, err := ExecCommand(\"/bin/rpm\", \"-q\", \"--whatrequires\", name)\n\t\tif rc == 1 { // no package reuires package 'name'\n\t\t\treturn nil, nil\n\t\t} else if rc == 0 {\n\t\t\tpkgs := strings.Split(string(output), \"\\n\")\n\t\t\tfor ii := range pkgs {\n\t\t\t\tresult = append(result, strings.TrimSpace(pkgs[ii]))\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\treturn nil, errors.New(\"rpm -q --whatrequires '\" + name + \"' returned: \" + err.Error())\n\t\t}\n\t}\n\treturn result, nil\n}" ]
[ "0.51251394", "0.5124929", "0.50155234", "0.49713492", "0.4917132", "0.48301443", "0.47200906", "0.47083902", "0.4706918", "0.46999493", "0.46994445", "0.4674119", "0.46486816", "0.46382493", "0.4610453", "0.4561087", "0.45523733", "0.45350423", "0.45172432", "0.44807452", "0.44346622", "0.4405722", "0.4366104", "0.43389177", "0.43388087", "0.4328111", "0.43244994", "0.43195745", "0.4317386", "0.43124413", "0.43058315", "0.42976853", "0.4291633", "0.42844486", "0.4272665", "0.42681283", "0.4262976", "0.42495823", "0.42416075", "0.42393813", "0.4222299", "0.42208067", "0.42163357", "0.42139167", "0.4212995", "0.4212995", "0.4211687", "0.41967556", "0.41931045", "0.4184299", "0.4180238", "0.41786972", "0.41766417", "0.41740796", "0.41675293", "0.41636977", "0.41580448", "0.41478038", "0.41355324", "0.41309482", "0.4128181", "0.41277668", "0.41234213", "0.41221634", "0.4119295", "0.4117249", "0.41147396", "0.41146922", "0.41117838", "0.41116017", "0.41065902", "0.41054243", "0.4101232", "0.40990207", "0.40989432", "0.4093902", "0.40913635", "0.40853432", "0.4082916", "0.40799725", "0.40787244", "0.40731558", "0.40725693", "0.40721944", "0.40681335", "0.40598905", "0.40565524", "0.4056088", "0.40548047", "0.40532997", "0.4052138", "0.4052124", "0.40503895", "0.4044497", "0.40384072", "0.40364763", "0.40362704", "0.40236783", "0.402084", "0.40204632" ]
0.72126865
0
NewDataGatherer creates a new instance of LightDataGatherer
func NewDataGatherer( dropAccessor drop.Accessor, blobsAccessor blob.CollectionAccessor, recsAccessor object.RecordCollectionAccessor, indexReplicaAccessor object.IndexBucketAccessor, ) *LightDataGatherer { return &LightDataGatherer{ dropAccessor: dropAccessor, blobsAccessor: blobsAccessor, recsAccessor: recsAccessor, indexReplicaAccessor: indexReplicaAccessor, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGatherer(ctx context.Context, opts *options.Options) (*Gatherer, error) {\n\tclient, err := opts.Client()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create notes client: %w\", err)\n\t}\n\treturn &Gatherer{\n\t\tclient: client,\n\t\tcontext: ctx,\n\t\toptions: opts,\n\t}, nil\n}", "func New(\n\tgatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig *rest.Config, anonymizer *anonymization.Anonymizer,\n) *Gatherer {\n\treturn &Gatherer{\n\t\tgatherKubeConfig: gatherKubeConfig,\n\t\tgatherProtoKubeConfig: gatherProtoKubeConfig,\n\t\tmetricsGatherKubeConfig: metricsGatherKubeConfig,\n\t\tanonymizer: anonymizer,\n\t\tstartTime: time.Now(),\n\t}\n}", "func NewNewData() *NewData {\n\tthis := NewData{}\n\treturn &this\n}", "func NewData(ctx *gin.Context) Data {\n\treturn Data{\n\t\tData: webbase.NewData(\"Golinks\", ctx),\n\t}\n}", "func (w *World) NewData() {\n\tw.Data = &ScenarioData{}\n\tw.Client = NewClient(w.serverUrl)\n}", "func newDataStore() *dataStore {\n\tds := dataStore{\n\t\tdata: make(map[string]Info),\n\t}\n\treturn &ds\n}", "func newLightFetcher(h *clientHandler) *lightFetcher {\n\tf := &lightFetcher{\n\t\thandler: h,\n\t\tchain: h.backend.blockchain,\n\t\tpeers: make(map[*peer]*fetcherPeerInfo),\n\t\tdeliverChn: make(chan fetchResponse, 100),\n\t\trequested: make(map[uint64]fetchRequest),\n\t\ttimeoutChn: make(chan uint64),\n\t\trequestTrigger: make(chan struct{}, 1),\n\t\tsyncDone: make(chan *peer),\n\t\tcloseCh: make(chan struct{}),\n\t\tmaxConfirmedTd: big.NewInt(0),\n\t}\n\th.backend.peers.notify(f)\n\n\tf.wg.Add(1)\n\tgo f.syncLoop()\n\treturn f\n}", "func NewData() *Data {\n d := &Data{}\n d.hashMap = make(map[int]string)\n return d\n}", "func createGathering(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar gathering Gathering\n\t_ = json.NewDecoder(r.Body).Decode(&gathering)\n\tgathering.ID = strconv.Itoa(rand.Intn(10000000)) //mock not safe\n\tgatherings = append(gatherings, gathering)\n\tjson.NewEncoder(w).Encode(gathering)\n}", "func newDataUpdateTracker() *dataUpdateTracker {\n\td := &dataUpdateTracker{\n\t\tCurrent: dataUpdateFilter{\n\t\t\tidx: 1,\n\t\t},\n\t\tdebug: serverDebugLog,\n\t\tinput: make(chan string, dataUpdateTrackerQueueSize),\n\t\tsave: make(chan struct{}, 1),\n\t\tsaveExited: make(chan struct{}),\n\t}\n\td.Current.bf = d.newBloomFilter()\n\td.dirty = true\n\treturn d\n}", "func NewData(g func(ctx context.Context) (*grpc.ClientConn, error)) *Data {\n\treturn &Data{g}\n}", "func NewDataWare() Dataware {\n\tconf := config.MyConfig()\n\tif conf.Develop.DatawareFake {\n\t\tlogrus.Info(\"in dataware fake mode so use faked dataware\")\n\t\treturn newDwFake()\n\t}\n\n\tinitGorm()\n\tdialect, dsn := conf.Database.Dsn()\n\tdb, err := gorm.Open(dialect, dsn)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\tinitDB(db)\n\tlogrus.Infof(\"connect database(%s) by dsn: %s\", dialect, dsn)\n\n\treturn newDwGorm(db)\n}", "func NewData() Data {\n\treturn Data(\"GO\")\n}", "func NewGathererMock(t minimock.Tester) *GathererMock {\n\tm := &GathererMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.GatherMock = mGathererMockGather{mock: m}\n\n\treturn m\n}", "func (c *Crawler) newFetcher(height uint64) {\n\t\n\t// Stop previous fetcher\n\tif c.fetcherStop != nil {\n\t\tc.fetcherStop <- true\n\t}\n\t\n\t// Both channels to be closed by fetcher task\n\tc.fetcherStop = make(chan bool)\n\tc.fetcherBlocks = make(chan blockRecord, FetcherBlockBufferSize)\n\n\t//\n\tgo fetcher(c.rpcConfig, height, c.fetcherBlocks, c.fetcherStop)\n}", "func newWithDatastorer(name string, datastorer datastorer) (dsdb *DatastoreDB, err error) {\n\tdsdb = new(DatastoreDB)\n\tdsdb.kind = name\n\tdsdb.datastorer = datastorer\n\n\terr = dsdb.connect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = dsdb.testDB()\n\tif err = dsdb.testDB(); err != nil {\n\t\tdsdb.Close()\n\t\treturn nil, err\n\t}\n\n\treturn dsdb, nil\n}", "func newDatastore(dir string) (ds.Batching, error) {\n\t// Create the datastore directory if it doesn't exist yet.\n\tif err := os.MkdirAll(dir, 0755); err != nil {\n\t\treturn nil, xerrors.Errorf(\"failed to create directory %s for DAG store datastore: %w\", dir, err)\n\t}\n\n\t// Create a new LevelDB datastore\n\tdstore, err := levelds.NewDatastore(dir, &levelds.Options{\n\t\tCompression: ldbopts.NoCompression,\n\t\tNoSync: false,\n\t\tStrict: ldbopts.StrictAll,\n\t\tReadOnly: false,\n\t})\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"failed to open datastore for DAG store: %w\", err)\n\t}\n\t// Keep statistics about the datastore\n\tmds := measure.New(\"measure.\", dstore)\n\treturn mds, nil\n}", "func NewGatherInfo(gather string, rv reflect.Value) *GatherInfo {\n\tgatherFunc := gatherFunctions[gather].function\n\treturn &GatherInfo{\n\t\tname: runtime.FuncForPC(reflect.ValueOf(gatherFunc).Pointer()).Name(),\n\t\tresult: rv.Interface().(gatherResult),\n\t\tfunction: gatherFunc,\n\t\tcanFail: gatherFunctions[gather].canFail,\n\t\trvString: rv.String(),\n\t}\n}", "func NewLightning(cfg *config.SubTaskConfig, cli *clientv3.Client, workerName string) *LightningLoader {\n\tlightningCfg := makeGlobalConfig(cfg)\n\tcore := lightning.New(lightningCfg)\n\tloader := &LightningLoader{\n\t\tcfg: cfg,\n\t\tcli: cli,\n\t\tcore: core,\n\t\tlightningConfig: lightningCfg,\n\t\tlogger: log.With(zap.String(\"task\", cfg.Name), zap.String(\"unit\", \"lightning-load\")),\n\t\tworkerName: workerName,\n\t}\n\treturn loader\n}", "func newDataset(epoch uint64) interface{} {\n\tds := &dataset{\n\t\tepoch: epoch,\n\t\tdateInit: 0,\n\t\tdataset: make([]uint64, TBLSIZE*DATALENGTH*PMTSIZE*32),\n\t}\n\t//truehashTableInit(ds.evenDataset)\n\n\treturn ds\n}", "func newPhilosopherDataPtr(respChannel chan string) *philosopherData {\n\tpd := new(philosopherData)\n\tpd.Init(respChannel)\n\treturn pd\n}", "func NewPusher(g prometheus.Gatherer) *Pusher {\n\treturn &Pusher{\n\t\tURL: \"https://telemetry.influxdata.com/metrics/job/influxdb\",\n\t\tGather: &pr.Filter{\n\t\t\tGatherer: g,\n\t\t\tMatcher: telemetryMatcher,\n\t\t},\n\t\tClient: &http.Client{\n\t\t\tTransport: http.DefaultTransport,\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t\tPushFormat: expfmt.FmtText,\n\t}\n}", "func Wrap(g prometheus.Gatherer) apm.MetricsGatherer {\n\treturn gatherer{g}\n}", "func NewData() *Data {\n\treturn &Data{}\n}", "func NewData() *Data {\n\tnmap := make(map[interface{}]int)\n\treturn &Data{\n\t\tm: nmap,\n\t}\n\t// return make(Data)\n}", "func New() *Collector { return &Collector{} }", "func NewTelemetry(i http.Handler, name string) Telemetry {\n\tr := metrics.NewMeter()\n\ts := metrics.NewMeter()\n\tf := metrics.NewMeter()\n\n\ttme := metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015))\n\n\tmetrics.Register(name+\"_requests\", r)\n\tmetrics.Register(name+\"_success\", s)\n\tmetrics.Register(name+\"_failure\", f)\n\tmetrics.Register(name+\"_time\", tme)\n\n\tt := Telemetry{inner: i, rCount: r, sCount: s, fCount: f, tmr: tme}\n\treturn t\n}", "func (e *Exporter) newFetcher(hostname string) *Fetcher {\n\treturn NewFetcher(hostname, e.chAccessInfo.Username, e.chAccessInfo.Password, e.chAccessInfo.Port)\n}", "func (d *dataUpdateTracker) newBloomFilter() bloomFilter {\n\treturn bloomFilter{bloom.NewWithEstimates(dataUpdateTrackerEstItems, dataUpdateTrackerFP)}\n}", "func New() *Data {\n\treturn &Data{\n\t\tValues: make(map[string]DataType),\n\t\tData: make(map[string]interface{}),\n\t\tunexportedData: make(map[string]interface{}),\n\t\tcache: make(map[string]interface{}),\n\t}\n}", "func NewData(name, datadir string) (*Data, error) {\n\tdata := &Data{}\n\tdata.name = name\n\tdata.datadir = datadir\n\tdata.mutex = new(sync.Mutex)\n\terr := data.init()\n\tif err != nil {\n\t\tlog.Printf(\"data.init Error : %+v\", err)\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}", "func newExporter(cfg component.Config, set exporter.CreateSettings) (*baseExporter, error) {\n\toCfg := cfg.(*Config)\n\n\tif oCfg.Endpoint == \"\" {\n\t\treturn nil, errors.New(\"OTLP exporter config requires an Endpoint\")\n\t}\n\n\tuserAgent := fmt.Sprintf(\"%s/%s (%s/%s)\",\n\t\tset.BuildInfo.Description, set.BuildInfo.Version, runtime.GOOS, runtime.GOARCH)\n\n\treturn &baseExporter{config: oCfg, settings: set.TelemetrySettings, userAgent: userAgent}, nil\n}", "func New() *Datamuse {\n\tu, _ := url.Parse(\"https://api.datamuse.com\")\n\treturn &Datamuse{\n\t\tapiURL: u,\n\t}\n}", "func newChecker(ds DataSource, notifiers Notifiers) *Checker {\n\treturn &Checker{\n\t\tds: ds,\n\t\tnotifiers: notifiers,\n\t}\n}", "func (p *LightningPool) new(ctx context.Context) (*amqp.Channel, error) {\n\treturn p.conn.Channel(ctx)\n}", "func NewCheckGatherer(check checker) *Gatherer {\n\treturn &Gatherer{check: check}\n}", "func NewGenData() *GenData {\n\tgd := &GenData{}\n\treturn gd\n}", "func NewData() *Data {\n\treturn new(Data)\n}", "func newGauge(namespace, subsystem, name string, labelNames []string, client *statsd.Statter, isPrometheusEnabled bool) *Gauge {\n\topts := prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t}\n\tvec := prometheus.NewGaugeVec(opts, labelNames)\n\tif isPrometheusEnabled {\n\t\tprometheus.MustRegister(vec)\n\t}\n\n\treturn &Gauge{\n\t\twatcher: vec,\n\t\tlabels: labelNames,\n\t\tclient: client,\n\t\tprefix: strings.Join([]string{namespace, subsystem, name}, \".\"),\n\t}\n}", "func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) {\n\t// Initialize the Data for this data type\n\tbasedata, err := datastore.NewDataService(dtype, uuid, id, name, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata := &Data{\n\t\tData: basedata,\n\t\tProperties: Properties{},\n\t}\n\treturn data, nil\n}", "func newMeasurementData(role uint8) MeasurementData {\n\treturn MeasurementData{\n\t\tMeasurementHeaderData{\n\t\t\t0, // latencySpin\n\t\t\ttrue, // latencyValid\n\t\t\tfalse, // blocking\n\t\t\tstatusInvalid, // latencyStatus\n\t\t\tfalse, // loss\n\t\t\ttrue, // latencyValidEdge\n\t\t},\n\t\t0, // maxPacketNumber\n\t\trole, // role\n\t\ttime.Now(), // latencyRxEdgeTime\n\t\t0xff, // lastRxLatencySpin\n\t\tfalse, // generatingEdge\n\t\tstatusInvalid, // incommingLatencyStatus\n }\n}", "func NewEducationFeedback()(*EducationFeedback) {\n m := &EducationFeedback{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func NewFlattener(configuration *DataTransformerConf) *Flattener {\n\n\tconfiguration.isSHAKE = isShakeAlgorithm(configuration.HashingAlgorithm)\n\n\tf := &Flattener{\n\t\tdataProcessorCore: dataProcessorCore{\n\t\t\tconfiguration: configuration,\n\t\t\tpointMap: sync.Map{},\n\t\t\tterminateChan: make(chan struct{}, 1),\n\t\t\tloggers: logh.CreateContextualLogger(\"pkg\", \"timeline/flattener\"),\n\t\t},\n\t}\n\n\tf.parent = f\n\n\treturn f\n}", "func newDataStore(driver, dsn string) (*dataStore, error) {\r\n\r\n\tvar this *dataStore\r\n\r\n\tif db, err := sqlx.Open(driver, dsn); err != nil {\r\n\t\treturn nil, err\r\n\t} else if err := db.Ping(); err != nil {\r\n\t\treturn nil, err\r\n\t} else {\r\n\t\tthis = &dataStore{\r\n\t\t\tDB: db,\r\n\t\t\tconnPool: new(ConnPool),\r\n\t\t\tnamedStmts: make(map[string]map[string]*namedStmt),\r\n\t\t}\r\n\t}\r\n\r\n\treturn this, nil\r\n}", "func NewLightningPool(conn *Connector) *LightningPool {\n\treturn &LightningPool{\n\t\tconn: conn,\n\t}\n}", "func NewCollector(url, token, xSecret string) (*Collector, error) {\n\tc := Collector{}\n\n\tif url == \"\" {\n\t\treturn nil, fmt.Errorf(\"URL should not be empty\")\n\t}\n\tc.dadataAPIURL = url\n\tif token == \"\" {\n\t\treturn nil, fmt.Errorf(\"Token should not be empty. Please specify it via DADATA_TOKEN env var\")\n\t}\n\tc.dadataToken = token\n\tif xSecret == \"\" {\n\t\treturn nil, fmt.Errorf(\"X-Secret should not be empty. Please specify it via DADATA_X_SECRET env var\")\n\t}\n\tc.dadataXSecret = xSecret\n\n\terr := c.dadataCheck()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_scrapes_total\",\n\t\tHelp: \"Count of total scrapes\",\n\t})\n\n\tc.failedBalanceScrapes = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_failed_balance_scrapes_total\",\n\t\tHelp: \"Count of failed balance scrapes\",\n\t})\n\n\tc.failedStatsScrapes = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_failed_stats_scrapes_total\",\n\t\tHelp: \"Count of failed stats scrapes\",\n\t})\n\n\tc.CurrentBalance = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tName: \"current_balance\",\n\t\tHelp: \"Current balance on Dadata\",\n\t})\n\n\tc.ServicesMerging = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: \"services\",\n\t\tName: \"merging_total\",\n\t\tHelp: \"Merging count for today\",\n\t})\n\n\tc.ServicesSuggestions = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: \"services\",\n\t\tName: \"suggestions_total\",\n\t\tHelp: \"Suggestions count for today\",\n\t})\n\n\tc.ServicesClean = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: \"services\",\n\t\tName: \"clean_total\",\n\t\tHelp: \"Clean count for today\",\n\t})\n\n\treturn &c, nil\n}", "func newDataSourceData(dInfo *backend.DataSourceInstanceSettings) (*dataSourceData, error) {\n\td := dataSourceData{}\n\terr := jsoniter.Unmarshal(dInfo.JSONData, &d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif d.QueryTimeoutRaw == \"\" {\n\t\td.QueryTimeout = time.Second * 30\n\t} else {\n\t\tif d.QueryTimeout, err = time.ParseDuration(d.QueryTimeoutRaw); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif d.ServerTimeoutValue, err = formatTimeout(d.QueryTimeout); err != nil {\n\t\treturn nil, err\n\t}\n\n\td.Secret = dInfo.DecryptedSecureJSONData[\"clientSecret\"]\n\treturn &d, nil\n}", "func NewNewDataWithDefaults() *NewData {\n\tthis := NewData{}\n\treturn &this\n}", "func New(serviceName string, environment string) LoggerWrapper {\n\tlogStore = &loggerWrapper{logrus.New().WithField(\"service\", serviceName).WithField(\"environment\", environment)}\n\tif environment == \"production\" {\n\t\tlogStore.SetFormat(&logrus.JSONFormatter{})\n\t}\n\n\t// fmt.Println(\"Adding hook\")\n\t// hook := logrusly.NewLogglyHook(\"71000042-f956-4c7e-987d-8694a20695a8\", \"https://logs-01.loggly.com/bulk/\", logrus.InfoLevel, serviceName)\n\t// logStore.Logger.Hooks.Add(hook)\n\treturn logStore\n}", "func newmetric(name string, kind metricKind, tags []string, common bool) *metric {\n\treturn &metric{\n\t\tname: name,\n\t\tkind: kind,\n\t\ttags: append([]string{}, tags...),\n\t\tcommon: common,\n\t}\n}", "func (c *Configuration) NewMeter(name string, options ...Option) (metric.Meter, error) {\n\tif !c.Enabled {\n\t\treturn metric.NoopProvider{}.Meter(name), nil\n\t}\n\n\tif c.AgentEndpoint == \"\" {\n\t\treturn metric.Meter{}, fmt.Errorf(\"missing agent address, please set environment variable %s\", envAgentEndpoint)\n\t}\n\n\topts := applyOptions(options...)\n\texporter := sotlp.SingletonExporter()\n\tif exporter == nil {\n\t\texp, err := otlp.NewExporter(otlp.WithInsecure(),\n\t\t\totlp.WithAddress(c.AgentEndpoint),\n\t\t\totlp.WithReconnectionPeriod(time.Minute),\n\t\t\totlp.WithGRPCDialOption(grpc.WithTimeout(5*time.Second)))\n\t\tif err != nil {\n\t\t\treturn metric.Meter{}, fmt.Errorf(\"failed to create the collector exporter: %w\", err)\n\t\t}\n\t\texporter = exp\n\t\tsotlp.SetExporter(exporter)\n\t\topts.Logger.With(zap.String(\"agentEndpoint\", c.AgentEndpoint)).Info(\"success to otlp agent\")\n\t}\n\t// exporter.Stop()\n\n\tif meterPusher == nil {\n\t\tmeterPusher = push.New(\n\t\t\tbasic.New(\n\t\t\t\tsimple.NewWithExactDistribution(),\n\t\t\t\texporter,\n\t\t\t),\n\t\t\texporter,\n\t\t\tpush.WithPeriod(30*time.Second),\n\t\t\t//push.WithTimeout(10*time.Second),\n\t\t)\n\t\tmeterProvider = meterPusher.Provider()\n\t\tmeterPusher.Start()\n\t\topts.Logger.With(zap.String(\"agentEndpoint\", c.AgentEndpoint)).Info(\"success to create metric pusher and start to push metric\")\n\t}\n\n\treturn meterProvider.Meter(name), nil\n}", "func newDataSetConstructor(\n\tdb, rp string, onConflictConvertIntToFloat bool,\n\tclient influx.Client,\n\ttagExplorer discovery.TagExplorer,\n\tfieldExplorer discovery.FieldExplorer) dataSetConstructor {\n\treturn &defaultDSConstructor{\n\t\tdatabase: db,\n\t\trp: rp,\n\t\tinfluxClient: client,\n\t\ttagExplorer: tagExplorer,\n\t\tfieldExplorer: fieldExplorer,\n\t\tonConflictConvertIntToFloat: onConflictConvertIntToFloat,\n\t}\n}", "func NewFulfillment() {\n\t\n}", "func New(client *httpclient.Client, baseURL string) Data {\n\td := Data{\n\t\tclient: client,\n\t\tbaseURL: baseURL,\n\t}\n\n\treturn d\n}", "func CreateGofer(id int, goferQueue chan chan JobRequest) Gofer {\n\n gofer := Gofer {\n GoferID: id,\n JobChan: make(chan JobRequest),\n GoferQueue: goferQueue,\n ExitChan: make(chan bool),\n }\n\n return gofer\n}", "func New(serviceName, env, source string) *Datadog {\n\tdatadog, err := statsd.New(source)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\t// Get hostname\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\thost = \"undefined\"\n\t}\n\n\t// Get service name\n\tif len(serviceName) < 1 {\n\t\tlog.Fatal(errors.New(\"Datadog service name should be provided\"))\n\t}\n\n\tdatadog.Namespace = fmt.Sprintf(\"enterprise_%s.\", serviceName)\n\tdatadog.Tags = append(datadog.Tags, \"env:\"+env, \"host:\"+host)\n\n\tlog.Println(\"Datadog initialized...\")\n\n\treturn &Datadog{\n\t\tclient: datadog,\n\t}\n}", "func makeDeviceCollectorGroupObject(d *schema.ResourceData) (output lmv1.RestCollectorGroup) {\n\n\toutput = lmv1.RestCollectorGroup{\n\t\tName: d.Get(\"name\").(string),\n\t\tDescription: d.Get(\"description\").(string),\n\t}\n\n\treturn\n}", "func Create(ctx context.Context, name string, opts map[string]interface{}) (Gatherer, error) {\n\tcreatorMu.RLock()\n\tc, ok := creators[name]\n\tcreatorMu.RUnlock()\n\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%s gatherer creator not registered\", name)\n\t}\n\n\tg, err := c.Create(ctx, opts)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Logger.Debugf(\"%s gatherer created\", name)\n\treturn g, nil\n}", "func prepareData(ctx context.Context, targets int, fetcher func() *point.Points) *data {\n\tdata := &data{\n\t\tData: &Data{Points: point.NewPoints()},\n\t\tb: make(chan io.ReadCloser, 1),\n\t\te: make(chan error, targets),\n\t\tmut: sync.RWMutex{},\n\t\twg: sync.WaitGroup{},\n\t}\n\tdata.wg.Add(1)\n\n\textraPoints := make(chan *point.Points, 1)\n\n\tgo func() {\n\t\t// add extraPoints. With NameToID\n\t\tdefer func() {\n\t\t\tdata.wg.Done()\n\t\t\tclose(extraPoints)\n\t\t}()\n\n\t\t// First check is context is already done\n\t\tif err := contextIsValid(ctx); err != nil {\n\t\t\tdata.e <- fmt.Errorf(\"prepareData failed: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase extraPoints <- fetcher():\n\t\t\tp := <-extraPoints\n\t\t\tif p != nil {\n\t\t\t\tdata.mut.Lock()\n\t\t\t\tdefer data.mut.Unlock()\n\n\t\t\t\textraList := p.List()\n\t\t\t\tfor i := 0; i < len(extraList); i++ {\n\t\t\t\t\tdata.Points.AppendPoint(\n\t\t\t\t\t\tdata.Points.MetricID(p.MetricName(extraList[i].MetricID)),\n\t\t\t\t\t\textraList[i].Value,\n\t\t\t\t\t\textraList[i].Time,\n\t\t\t\t\t\textraList[i].Timestamp,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\tcase <-ctx.Done():\n\t\t\tdata.e <- fmt.Errorf(\"prepareData failed: %w\", ctx.Err())\n\t\t\treturn\n\t\t}\n\t}()\n\treturn data\n}", "func newAverageTracker(factor int, statsdC *statsd.Client) *AverageTracker {\n\ta := &AverageTracker{\n\t\trec: make(map[string]averageTrackerRecord),\n\t\tch: make(chan struct{}, 1),\n\t\tstatsdC: statsdC,\n\t\tfactor: factor,\n\t}\n\n\ta.unlock()\n\treturn a\n}", "func newAverageTracker(factor int, statsdC *statsd.Client) *AverageTracker {\n\ta := &AverageTracker{\n\t\trec: make(map[string]averageTrackerRecord),\n\t\tch: make(chan struct{}, 1),\n\t\tstatsdC: statsdC,\n\t\tfactor: factor,\n\t}\n\n\ta.unlock()\n\treturn a\n}", "func newData(name string, data []byte) *FsData {\n\treturn &FsData{*newFile(name), data}\n}", "func NewDatapool(sleep time.Duration, open ConnFunc) Pool {\n\tdp := &datapool{\n\t\tOpen: open,\n\t\tSleep: sleep,\n\t}\n\n\tdp.cond.L = dp.mu.RLocker()\n\treturn dp\n}", "func New(pools []string) *Exporter {\n\treturn &Exporter{\n\t\tcollectors: []prometheus.Collector{\n\t\t\tNewZpoolCollector(pools),\n\t\t\tNewDatasetCollector(pools),\n\t\t},\n\t}\n}", "func newCrdtBroadcaster(ctx context.Context, api pb.PubSubAPIClient, topic string) (*crdtBroadcaster, error) {\n\tclient, err := api.PubSub(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := client.Send(&pb.PubSubRequest{\n\t\tRequestType: pb.PSREQTYPE_PS_SUBSCRIBE,\n\t\tTopics: []string{topic},\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\tnext := make(chan []byte)\n\tb := &crdtBroadcaster{\n\t\ttopic: topic,\n\t\tclient: client,\n\t\tnext: next,\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tresp, err := client.Recv()\n\t\t\tif err != nil {\n\t\t\t\tb.err = err\n\t\t\t\tclose(next)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, m := range resp.GetMessage() {\n\t\t\t\tnext <- m.GetData()\n\t\t\t}\n\t\t}\n\t}()\n\treturn b, nil\n}", "func NewCollector(rcClientId string, kubernetesClusterId string) TelemetryCollector {\n\treturn &telemetryCollector{\n\t\tclient: httputils.NewResetClient(httpClientResetInterval, httpClientFactory(httpClientTimeout)),\n\t\thost: utils.GetMainEndpoint(config.Datadog, mainEndpointPrefix, mainEndpointUrlKey),\n\t\tuserAgent: \"Datadog Cluster Agent\",\n\t\trcClientId: rcClientId,\n\t\tkubernetesClusterId: kubernetesClusterId,\n\t}\n}", "func newDataHandler(prices []DataPoint) *DataHandler {\n\treturn &DataHandler{\n\t\tPrices: prices,\n\t}\n}", "func darpGraphGather(l string) {\n\n\tm := make(map[string]string)\n\tm[\"g0\"] = l\n\tn := 1\n\n\tfor {\n\t\tselect {\n\t\tcase l := <-darpGraphChan:\n\t\t\tm[fmt.Sprintf(\"g%d\", n)] = l\n\t\t\tn++\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\t\tif n >= graphMaxSend {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tdarp.TellMyMasters(\"graphdata\", m)\n}", "func GetGatherer() prometheus.Gatherer {\n\treturn registry\n}", "func newFlowTestDsInfo(body []byte, statusCode int, requestCallback func(req *http.Request) error) *es.DatasourceInfo {\n\tclient := http.Client{\n\t\tTransport: &queryDataTestRoundTripper{body: body, statusCode: statusCode, requestCallback: requestCallback},\n\t}\n\n\tconfiguredFields := es.ConfiguredFields{\n\t\tTimeField: \"testtime\",\n\t\tLogMessageField: \"line\",\n\t\tLogLevelField: \"lvl\",\n\t}\n\n\treturn &es.DatasourceInfo{\n\t\tInterval: \"Daily\",\n\t\tDatabase: \"[testdb-]YYYY.MM.DD\",\n\t\tConfiguredFields: configuredFields,\n\t\tTimeInterval: \"1s\",\n\t\tURL: \"http://localhost:9200\",\n\t\tHTTPClient: &client,\n\t\tMaxConcurrentShardRequests: 42,\n\t\tIncludeFrozen: false,\n\t\tXPack: true,\n\t}\n}", "func Wrap(r metrics.Registry) elasticapm.MetricsGatherer {\n\treturn gatherer{r}\n}", "func NewDeviceHealth()(*DeviceHealth) {\n m := &DeviceHealth{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func newManager(id int, wg *sync.WaitGroup, pr int64, cr float64, ps float64) *Manager {\n\tvar weather Weather\n\tweather.initializeWeather()\n\tweather.generateWeather()\n\tforecast, multiplier := weather.getWeather()\n\tfmt.Printf(\"\\nCURRENT FORECAST: %s\\n\", forecast)\n\n\tproductsRate = pr\n\tcustomerRate = cr * multiplier\n\tprocessSpeed = ps\n\n\tcustomerStatusChan = make(chan int, 256)\n\tcheckoutChangeStatusChan = make(chan int, 256)\n\n\t// Default to 1 Checkout when the store opens\n\tnumberOfCheckoutsOpen = 1\n\n\treturn &Manager{id: id, wg: wg}\n}", "func (ot *openTelemetryWrapper) newResource(\n\twebEngineName,\n\twebEngineVersion string,\n) (*resource.Resource, error) {\n\treturn resource.Merge(resource.Default(), resource.NewSchemaless(\n\t\tsemconv.WebEngineName(webEngineName),\n\t\tsemconv.WebEngineVersion(webEngineVersion),\n\t))\n}", "func newDHT(r *Router) *dht {\n\td := &dht{\n\t\tr: r,\n\t\tfinger: make(map[types.PublicKey]dhtEntry),\n\t}\n\treturn d\n}", "func newDataInstance(repo datastore.Repo, t *testing.T, name dvid.DataString) *Data {\n\tconfig := dvid.NewConfig()\n\tconfig.SetVersioned(true)\n\tdataservice, err := repo.NewData(labelsT, name, config)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to create labels64 instance %q: %s\\n\", name, err.Error())\n\t}\n\tlabels, ok := dataservice.(*Data)\n\tif !ok {\n\t\tt.Errorf(\"Can't cast labels data service into Data\\n\")\n\t}\n\treturn labels\n}", "func NewCollector(period time.Duration, collectFunc func() []Measurement) *Collector {\n\tcollector := &Collector{\n\t\tperiod: period,\n\t\tcollectFunc: collectFunc,\n\t\tlastSendingDate: -1,\n\t}\n\n\tif sources == nil {\n\t\tsources = make([]DataSource, 0)\n\t\tgo sendingLoop()\n\t}\n\n\tif UseGlobalEngine {\n\t\tcollector.Engine = Engine\n\t} else {\n\t\tcollector.Engine = &req.Engine{}\n\t}\n\n\tsources = append(sources, collector)\n\n\treturn collector\n}", "func newCMLogger(name string, chainId string, logger *zap.SugaredLogger, logLevel log.LOG_LEVEL) *CMLogger {\n\treturn &CMLogger{name: name, chainId: chainId, SugaredLogger: logger, logLevel: logLevel}\n}", "func newInfluxDBLoggerRunnable() RunnableIFC {\n\treturn newRunnable(newInfluxDBLogger())\n}", "func newSampler(extraRate float64, maxTPS float64) *Sampler {\n\ts := &Sampler{\n\t\tBackend: NewMemoryBackend(defaultDecayPeriod, defaultDecayFactor),\n\t\textraRate: extraRate,\n\t\tmaxTPS: maxTPS,\n\t\trateThresholdTo1: defaultSamplingRateThresholdTo1,\n\t\tsignatureScoreOffset: atomic.NewFloat(0),\n\t\tsignatureScoreSlope: atomic.NewFloat(0),\n\t\tsignatureScoreFactor: atomic.NewFloat(0),\n\n\t\texit: make(chan struct{}),\n\t}\n\n\ts.SetSignatureCoefficients(initialSignatureScoreOffset, defaultSignatureScoreSlope)\n\n\treturn s\n}", "func newStorage(\n\tmachineName,\n\ttablename,\n\tdatabase,\n\tusername,\n\tpassword,\n\tinfluxdbHost string,\n\tisSecure bool,\n\tbufferDuration time.Duration,\n) (*influxdbStorage, error) {\n\tconfig := &influxdb.ClientConfig{\n\t\tHost: influxdbHost,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tDatabase: database,\n\t\tIsSecure: isSecure,\n\t}\n\tclient, err := influxdb.NewClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// TODO(monnand): With go 1.3, we cannot compress data now.\n\tclient.DisableCompression()\n\n\tret := &influxdbStorage{\n\t\tclient: client,\n\t\tmachineName: machineName,\n\t\ttableName: tablename,\n\t\tbufferDuration: bufferDuration,\n\t\tlastWrite: time.Now(),\n\t\tseries: make([]*influxdb.Series, 0),\n\t}\n\tret.readyToFlush = ret.defaultReadyToFlush\n\treturn ret, nil\n}", "func New() *Gopher {\n\tg := &Gopher{\n\t\tdelay: 1000 * time.Millisecond,\n\t\tactivity: Waiting,\n\t\tcolor: White,\n\t\tw: colorable.NewColorableStdout(),\n\t\tdone: make(chan struct{}, 1),\n\t}\n\treturn g\n}", "func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) {\n\treturn nil, fmt.Errorf(\"DVID version does not support labelsurf at this time.\")\n\t/*\n\t\t// Make sure we have a valid DataService source\n\t\tsourcename, found, err := c.GetString(\"Source\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !found {\n\t\t\treturn nil, fmt.Errorf(\"Cannot make labelsurf data without valid 'Source' specifying an associated labelvol.\")\n\t\t}\n\t\tvar data datastore.DataService\n\t\tdata, err = datastore.GetDataByUUID(uuid, dvid.InstanceName(sourcename))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdtype := data.GetType()\n\t\tif dtype.GetTypeName() != \"labelvol\" {\n\t\t\treturn nil, fmt.Errorf(\"Source data must be of type 'labelvol', not %s\", dtype.GetTypeName())\n\t\t}\n\n\t\t// Initialize the Data for this data type\n\t\tbasedata, err := datastore.NewDataService(dtype, uuid, id, name, c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &Data{basedata, dvid.DataName(sourcename)}, nil\n\t*/\n}", "func NewDataProvider() DataProvider {\n\treturn &DefaultDataProvider{}\n}", "func newWatcher(loader *Loader, uri string, interval time.Duration, onStop func()) *watcher {\n\treturn &watcher{\n\t\tstate: isCreated,\n\t\tupdatedAt: 0,\n\t\tloader: loader,\n\t\turi: uri,\n\t\tupdates: make(chan Update, 1),\n\t\tinterval: interval,\n\t\tonStop: onStop,\n\t}\n}", "func NewData(title string, ginctx *gin.Context) Data {\n\tdata := Data{\n\t\tTitle: title,\n\t\tBinVersion: version.Version(),\n\t}\n\torg, err := ctx.GetOrg(ginctx)\n\tif err == nil {\n\t\tdata.Ctx.Org = org.Name\n\t}\n\tuser, err := ctx.GetUser(ginctx)\n\tif err == nil {\n\t\tdata.Ctx.User = user.Email\n\t\tdata.Ctx.LoggedIn = true\n\t}\n\tdata.Ctx.AuthEnabled = ctx.IsAuthEnabled(ginctx)\n\treturn data\n}", "func (s *Trainer) NewFeaturizer() train.Featurizer {\n\t// The sink featurize method can be called in parallel normally, so\n\t// nothing is created\n\treturn s\n}", "func newSinkMetaHandler(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tvars := mux.Vars(r)\n\tpluginName := vars[\"name\"]\n\n\tlanguage := getLanguage(r)\n\tptrMetadata, err := meta.GetSinkMeta(pluginName, language)\n\tif err != nil {\n\t\thandleError(w, err, \"\", logger)\n\t\treturn\n\t}\n\tjsonResponse(ptrMetadata, w, logger)\n}", "func newDatasource() datasource.ServeOpts {\n\tim := datasource.NewInstanceManager(newDataSourceInstance)\n\n\tds := &redisDatasource{\n\t\tim: im,\n\t}\n\n\t// Returns datasource.ServeOpts\n\treturn datasource.ServeOpts{\n\t\tQueryDataHandler: ds,\n\t\tCheckHealthHandler: ds,\n\t}\n}", "func newDatasource() datasource.ServeOpts {\n\tim := datasource.NewInstanceManager(newDataSourceInstance)\n\n\tds := &redisDatasource{\n\t\tim: im,\n\t}\n\n\t// Returns datasource.ServeOpts\n\treturn datasource.ServeOpts{\n\t\tQueryDataHandler: ds,\n\t\tCheckHealthHandler: ds,\n\t}\n}", "func newLocalService(config fab.EndpointConfig, mspID string, opts ...coptions.Opt) *LocalService {\n\tlogger.Debug(\"Creating new local discovery service\")\n\n\ts := &LocalService{mspID: mspID}\n\ts.service = newService(config, s.queryPeers, opts...)\n\treturn s\n}", "func New() *differ {\n\treturn &differ{}\n}", "func newPoolCollector(config monitoring.MetricsConfig, logger *zap.Logger,\n\tspectrumClient spectrumservice.Client) (Collector, error) {\n\n\tlabelPool := []string{\"pool_name\", \"storage_system\"}\n\n\tproperties := make(map[string]*prometheus.Desc)\n\n\tfor _, p := range config.Metrics.Pools.Properties {\n\t\tproperties[p.PropertyName] = prometheus.NewDesc(p.PrometheusName, p.PrometheusHelp, labelPool, nil)\n\t}\n\n\treturn &poolCollector{\n\t\tibmSpectrumClient: spectrumClient,\n\t\tlogger: logger.Sugar(),\n\t\tproperties: properties,\n\t}, nil\n}", "func NewDI() DIer {\n\td := new(DI)\n\td.store = make(map[string]interface{})\n\treturn d\n}", "func NewCollector() Collector {\n\treturn Collector{client: NewClient(time.Second * 5)}\n}", "func NewLedger(endpoint string, authToken string) *Ledger {\n\tif endpoint == \"\" {\n\t\tendpoint = os.Getenv(\"LEDGER_ENDPOINT\")\n\t}\n\tif authToken == \"\" {\n\t\tauthToken = os.Getenv(\"LEDGER_AUTH_TOKEN\")\n\t}\n\treturn &Ledger{endpoint: endpoint, authToken: authToken}\n}", "func dataToLogging(name string, d *schema.ResourceData) go_thunder.Logging {\n\tvar s go_thunder.Logging\n\n\tvar sInstance go_thunder.LoggingInstance\n\n\tsInstance.PoolShared = d.Get(\"pool_shared\").(string)\n\tsInstance.Name = d.Get(\"name\").(string)\n\tsInstance.Format = d.Get(\"format\").(string)\n\tsInstance.Auto = d.Get(\"auto\").(string)\n\tsInstance.KeepEnd = d.Get(\"keep_end\").(int)\n\tsInstance.LocalLogging = d.Get(\"local_logging\").(int)\n\tsInstance.Mask = d.Get(\"mask\").(string)\n\tsInstance.TemplateTCPProxyShared = d.Get(\"template_tcp_proxy_shared\").(string)\n\tsInstance.SharedPartitionTCPProxyTemplate = d.Get(\"shared_partition_tcp_proxy_template\").(int)\n\tsInstance.KeepStart = d.Get(\"keep_start\").(int)\n\tsInstance.ServiceGroup = d.Get(\"service_group\").(string)\n\tsInstance.PcreMask = d.Get(\"pcre_mask\").(string)\n\tsInstance.UserTag = d.Get(\"user_tag\").(string)\n\tsInstance.TCPProxy = d.Get(\"tcp_proxy\").(string)\n\tsInstance.SharedPartitionPool = d.Get(\"shared_partition_pool\").(int)\n\tsInstance.Pool = d.Get(\"pool\").(string)\n\n\ts.Name = sInstance\n\n\treturn s\n}", "func NewUnpacker(log logrus.FieldLogger, name string) *Unpacker {\n\treturn &Unpacker{\n\t\tlog: log,\n\t\tname: name,\n\t}\n}", "func newFlatplate(re float64, cp float64, fidelity string, ignoreType string) ransuq.Dataset {\n\tvar basepath, baseconfig string\n\tflatplateBase := filepath.Join(gopath, \"data\", \"ransuq\", \"flatplate\")\n\tif cp == 0 {\n\t\tbasepath = flatplateBase\n\t\tbaseconfig = filepath.Join(basepath, \"base_flatplate_config.cfg\")\n\t} else {\n\t\tbasepath = filepath.Join(flatplateBase, \"pressuregradient\")\n\t\tbaseconfig = filepath.Join(basepath, \"base\", \"base_flatplate_config.cfg\")\n\t}\n\n\trestring := strconv.FormatFloat(re, 'g', -1, 64)\n\tcpstring := strconv.FormatFloat(cp, 'g', -1, 64)\n\n\tname := \"Flatplate_Re_\" + restring\n\tif cp != 0 {\n\t\tname += \"_Cp_\" + cpstring\n\t}\n\n\t// Change the + in the exponent to an underscore\n\tb := []byte(name)\n\tfor i := range b {\n\t\tif b[i] == '+' {\n\t\t\tb[i] = '_'\n\t\t}\n\t}\n\n\tname = string(b)\n\n\twd := filepath.Join(basepath, fidelity, name)\n\n\t// Create the working directory for writing if it does not exist\n\terr := os.MkdirAll(wd, 0700)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Create the driver\n\tdrive := &driver.Driver{\n\t\tName: name,\n\t\tConfig: \"config.cfg\",\n\t\tWd: wd,\n\t\tFancyName: \"Re \" + restring,\n\t\tStdout: name + \"_log.txt\",\n\t}\n\n\tbaseconfigFile, err := os.Open(baseconfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Load in the existing\n\terr = drive.LoadFrom(baseconfigFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// set mesh file to be the base mesh file but use relative path\n\n\tfullMeshFilename := filepath.Join(flatplateBase, drive.Options.MeshFilename)\n\n\trelMeshFilename, err := filepath.Rel(wd, fullMeshFilename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdrive.Options.MeshFilename = relMeshFilename\n\n\t// set other things\n\tdrive.Options.ReynoldsNumber = re\n\n\tdrive.Options.RefTemperature = drive.Options.FreestreamTemperature\n\n\t//get the freestream pressure and density\n\tgamma := drive.Options.GammaValue\n\tgasConst := drive.Options.GasConstant\n\tpressure, density := nondimensionalize.Values(drive.Options.FreestreamTemperature, drive.Options.ReynoldsNumber, drive.Options.MachNumber, gasConst, drive.Options.ReynoldsLength, gamma)\n\tdrive.Options.RefPressure = pressure\n\tdrive.Options.RefDensity = density\n\ttotalT := nondimensionalize.TotalTemperature(drive.Options.FreestreamTemperature, drive.Options.MachNumber, gamma)\n\ttotalP := nondimensionalize.TotalPressure(pressure, drive.Options.MachNumber, gamma)\n\ttotalTString := strconv.FormatFloat(totalT, 'g', 16, 64)\n\ttotalPString := strconv.FormatFloat(totalP, 'g', 16, 64)\n\n\t//pString := strconv.FormatFloat(pressure, 'g', 16, 64)\n\t//drive.Options.MarkerInlet = &su2types.Inlet{\"( inlet, \" + totalTString + \", \" + totalPString + \", 1.0, 0.0, 0.0 )\"}\n\tdrive.Options.MarkerInlet = &su2types.Inlet{Strings: []string{\"inlet\", totalTString, totalPString, \"1\", \"0\", \"0\"}}\n\tif cp == 0 {\n\t\tdrive.Options.MarkerOutlet = &su2types.StringDoubleList{\n\t\t\tStrings: []string{\"outlet\", \"farfield\"},\n\t\t\tDoubles: []float64{pressure, pressure},\n\t\t}\n\t} else {\n\t\tinletVelocity := drive.Options.MachNumber * nondimensionalize.SpeedOfSound(gamma, gasConst, drive.Options.FreestreamTemperature)\n\t\tplateLength := 2.0\n\t\tdynamicPressure := (1.0 / 2) * density * inletVelocity * inletVelocity * plateLength\n\t\tinletPressure := pressure\n\t\toutletPressure := inletPressure + cp*dynamicPressure\n\t\tdrive.Options.MarkerSym = []string{\"symmetry\", \"farfield\"}\n\t\tdrive.Options.MarkerOutlet = &su2types.StringDoubleList{\n\t\t\tStrings: []string{\"outlet\"},\n\t\t\tDoubles: []float64{outletPressure},\n\t\t}\n\t}\n\t//drive.Options.MarkerOutlet = &su2types.StringDoubleList{\"( outlet, \" + pString + \", farfield, \" + pString + \" )\"}\n\n\tswitch fidelity {\n\tcase \"low\":\n\t\tdrive.Options.ResidualReduction = 0.2\n\tcase \"med\":\n\t\tdrive.Options.ResidualReduction = 5\n\t\tif cp != 0 {\n\t\t\tdrive.Options.ResidualReduction = 4\n\t\t}\n\tcase \"high\":\n\t\tdrive.Options.ResidualReduction = 7\n\tdefault:\n\t\tpanic(\"bad fidelity\")\n\t}\n\n\t//\tvar ignoreFunc func(d []float64) bool\n\t//\tvar ignoreNames []string\n\n\tignoreNames, ignoreFunc := GetIgnoreData(ignoreType)\n\n\t// Create an SU2 datawrapper from it\n\treturn &datawrapper.SU2{\n\t\tDriver: drive,\n\t\tSu2Caller: driver.Serial{}, // TODO: Need to figure out how to do this better\n\t\tIgnoreNames: ignoreNames,\n\t\tIgnoreFunc: ignoreFunc,\n\t\tName: name,\n\t\tComparisonPostprocessor: datawrapper.FlatplatePostprocessor{},\n\t}\n}", "func New(config Config) zapcore.WriteSyncer {\n\treturn &gelf{Config: config}\n}" ]
[ "0.6009672", "0.5827135", "0.57258046", "0.56654805", "0.5659624", "0.5626994", "0.5557039", "0.5552011", "0.54799265", "0.5363846", "0.5299342", "0.529503", "0.5236536", "0.5228199", "0.5216133", "0.5192747", "0.514851", "0.5139744", "0.51370186", "0.5133357", "0.51052064", "0.5102685", "0.5094551", "0.5067187", "0.5053971", "0.50469244", "0.5043248", "0.50025254", "0.5000928", "0.49925604", "0.4975274", "0.49505532", "0.49011832", "0.4896037", "0.48835006", "0.48797023", "0.4879572", "0.48728207", "0.48707727", "0.48651886", "0.48633325", "0.48527393", "0.48456037", "0.4837874", "0.48286736", "0.4819773", "0.48121315", "0.4808281", "0.48041502", "0.4794369", "0.47912", "0.47910646", "0.4771669", "0.47530013", "0.47528568", "0.4747382", "0.4739843", "0.47381732", "0.47377273", "0.47323737", "0.47323737", "0.4724927", "0.4719214", "0.4718445", "0.4708577", "0.47069645", "0.4703027", "0.47003165", "0.4694946", "0.46938497", "0.46913633", "0.46893933", "0.46880463", "0.46878737", "0.46873593", "0.4686413", "0.4664184", "0.46637344", "0.46617693", "0.46611854", "0.46519956", "0.46473396", "0.46455872", "0.4640717", "0.46371177", "0.4624034", "0.46225357", "0.46214417", "0.46162206", "0.46162206", "0.4614741", "0.46129113", "0.4609973", "0.46075666", "0.46034554", "0.4601835", "0.46012098", "0.459795", "0.45850426", "0.45799753" ]
0.7516035
0
ForPulseAndJet returns HeavyPayload message for a provided pulse and a jetID
func (d *LightDataGatherer) ForPulseAndJet( ctx context.Context, pn insolar.PulseNumber, jetID insolar.JetID, ) (*message.HeavyPayload, error) { dr, err := d.dropAccessor.ForPulse(ctx, jetID, pn) if err != nil { inslogger.FromContext(ctx).Error("synchronize: can't fetch a drop") return nil, err } bls := d.blobsAccessor.ForPulse(ctx, jetID, pn) records := d.recsAccessor.ForPulse(ctx, jetID, pn) indexes := d.indexReplicaAccessor.ForPNAndJet(ctx, pn, jetID) return &message.HeavyPayload{ JetID: jetID, PulseNum: pn, IndexBuckets: convertIndexBuckets(ctx, indexes), Drop: drop.MustEncode(&dr), Blobs: convertBlobs(bls), Records: convertRecords(ctx, records), }, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (db *DB) GetHeavySyncedPulse(ctx context.Context, jetID core.RecordID) (pn core.PulseNumber, err error) {\n\tvar buf []byte\n\tbuf, err = db.get(ctx, prefixkey(scopeIDSystem, jetID[:], []byte{sysLastSyncedPulseOnHeavy}))\n\tif err == nil {\n\t\tpn = core.NewPulseNumber(buf)\n\t} else if err == ErrNotFound {\n\t\terr = nil\n\t}\n\treturn\n}", "func (m *IndexCollectionAccessorMock) ForPulseAndJet(p context.Context, p1 insolar.PulseNumber, p2 insolar.JetID) (r map[insolar.ID]Lifeline) {\n\tcounter := atomic.AddUint64(&m.ForPulseAndJetPreCounter, 1)\n\tdefer atomic.AddUint64(&m.ForPulseAndJetCounter, 1)\n\n\tif len(m.ForPulseAndJetMock.expectationSeries) > 0 {\n\t\tif counter > uint64(len(m.ForPulseAndJetMock.expectationSeries)) {\n\t\t\tm.t.Fatalf(\"Unexpected call to IndexCollectionAccessorMock.ForPulseAndJet. %v %v %v\", p, p1, p2)\n\t\t\treturn\n\t\t}\n\n\t\tinput := m.ForPulseAndJetMock.expectationSeries[counter-1].input\n\t\ttestify_assert.Equal(m.t, *input, IndexCollectionAccessorMockForPulseAndJetInput{p, p1, p2}, \"IndexCollectionAccessor.ForPulseAndJet got unexpected parameters\")\n\n\t\tresult := m.ForPulseAndJetMock.expectationSeries[counter-1].result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the IndexCollectionAccessorMock.ForPulseAndJet\")\n\t\t\treturn\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.ForPulseAndJetMock.mainExpectation != nil {\n\n\t\tinput := m.ForPulseAndJetMock.mainExpectation.input\n\t\tif input != nil {\n\t\t\ttestify_assert.Equal(m.t, *input, IndexCollectionAccessorMockForPulseAndJetInput{p, p1, p2}, \"IndexCollectionAccessor.ForPulseAndJet got unexpected parameters\")\n\t\t}\n\n\t\tresult := m.ForPulseAndJetMock.mainExpectation.result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the IndexCollectionAccessorMock.ForPulseAndJet\")\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.ForPulseAndJetFunc == nil {\n\t\tm.t.Fatalf(\"Unexpected call to IndexCollectionAccessorMock.ForPulseAndJet. %v %v %v\", p, p1, p2)\n\t\treturn\n\t}\n\n\treturn m.ForPulseAndJetFunc(p, p1, p2)\n}", "func (svc *OTXPulseDetailService) Get(pulseID string) (*Pulse, error) {\n\treq, err := svc.client.newRequest(http.MethodGet, PulseDetailURLPath+pulseID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar p Pulse\n\tif err := svc.client.do(req, &p); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &p, nil\n}", "func (s *Storage) GetPulse(pulseNumber int64) (models.Pulse, error) {\n\ttimer := prometheus.NewTimer(GetPulseDuration)\n\tdefer timer.ObserveDuration()\n\n\tvar pulse models.Pulse\n\terr := s.db.Where(\"pulse_number = ?\", pulseNumber).First(&pulse).Error\n\tif err != nil {\n\t\treturn pulse, err\n\t}\n\n\tpulse = s.updateNextPulse(pulse)\n\tpulse = s.updatePrevPulse(pulse)\n\n\treturn pulse, err\n}", "func (s *Storage) GetPulseByPrev(prevPulse models.Pulse) (models.Pulse, error) {\n\ttimer := prometheus.NewTimer(GetPulseByPrevDuration)\n\tdefer timer.ObserveDuration()\n\n\tvar pulse models.Pulse\n\terr := s.db.Where(\"prev_pulse_number = ?\", prevPulse.PulseNumber).First(&pulse).Error\n\treturn pulse, err\n}", "func (db *DB) GetPulse(ctx context.Context, num core.PulseNumber) (*record.PulseRecord, error) {\n\tbuf, err := db.get(ctx, prefixkey(scopeIDPulse, num.Bytes()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdec := codec.NewDecoder(bytes.NewReader(buf), &codec.CborHandle{})\n\tvar rec record.PulseRecord\n\terr = dec.Decode(&rec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rec, nil\n}", "func (s *Storage) GetJetDrops(pulse models.Pulse) ([]models.JetDrop, error) {\n\ttimer := prometheus.NewTimer(GetJetDropsDuration)\n\tdefer timer.ObserveDuration()\n\n\tvar jetDrops []models.JetDrop\n\terr := s.db.Where(\"pulse_number = ?\", pulse.PulseNumber).Find(&jetDrops).Error\n\treturn jetDrops, err\n}", "func (huo *HistorytakingUpdateOne) AddPulse(i int) *HistorytakingUpdateOne {\n\thuo.mutation.AddPulse(i)\n\treturn huo\n}", "func (db *DB) GetPulse(num core.PulseNumber) (*record.PulseRecord, error) {\n\tbuf, err := db.Get(prefixkey(scopeIDPulse, num.Bytes()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdec := codec.NewDecoder(bytes.NewReader(buf), &codec.CborHandle{})\n\tvar rec record.PulseRecord\n\terr = dec.Decode(&rec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rec, nil\n}", "func (hu *HistorytakingUpdate) AddPulse(i int) *HistorytakingUpdate {\n\thu.mutation.AddPulse(i)\n\treturn hu\n}", "func (db *DB) SetHeavySyncedPulse(ctx context.Context, jetID core.RecordID, pulsenum core.PulseNumber) error {\n\treturn db.Update(ctx, func(tx *TransactionManager) error {\n\t\treturn tx.set(ctx, prefixkey(scopeIDSystem, jetID[:], []byte{sysLastSyncedPulseOnHeavy}), pulsenum.Bytes())\n\t})\n}", "func (x *Big) Payload() Payload {\n\tif !x.IsNaN(0) {\n\t\treturn 0\n\t}\n\treturn Payload(x.compact)\n}", "func (m *ParcelMock) Pulse() (r insolar.PulseNumber) {\n\tcounter := atomic.AddUint64(&m.PulsePreCounter, 1)\n\tdefer atomic.AddUint64(&m.PulseCounter, 1)\n\n\tif len(m.PulseMock.expectationSeries) > 0 {\n\t\tif counter > uint64(len(m.PulseMock.expectationSeries)) {\n\t\t\tm.t.Fatalf(\"Unexpected call to ParcelMock.Pulse.\")\n\t\t\treturn\n\t\t}\n\n\t\tresult := m.PulseMock.expectationSeries[counter-1].result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the ParcelMock.Pulse\")\n\t\t\treturn\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.PulseMock.mainExpectation != nil {\n\n\t\tresult := m.PulseMock.mainExpectation.result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the ParcelMock.Pulse\")\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.PulseFunc == nil {\n\t\tm.t.Fatalf(\"Unexpected call to ParcelMock.Pulse.\")\n\t\treturn\n\t}\n\n\treturn m.PulseFunc()\n}", "func (db *DB) GetReplicatedPulse(ctx context.Context, jet core.RecordID) (core.PulseNumber, error) {\n\tk := prefixkey(scopeIDSystem, jet[:], []byte{sysReplicatedPulse})\n\tbuf, err := db.get(ctx, k)\n\tif err != nil {\n\t\tif err == ErrNotFound {\n\t\t\terr = nil\n\t\t}\n\t\treturn 0, err\n\t}\n\treturn core.NewPulseNumber(buf), nil\n}", "func (d *Decoder) Pulse() Pulse {\n\tvar p pulse\n\tfor {\n\t\tp = <-d.c\n\t\tif p.sec == 0 {\n\t\t\td.decodeDate(p.l, uint32(p.h))\n\t\t\tbreak\n\t\t}\n\t\tif d.date.Sec >= 0 || p.sec < 0 {\n\t\t\td.date.Sec = p.sec\n\t\t\tbreak\n\t\t}\n\t}\n\treturn Pulse{d.date, p.stamp}\n}", "func (m *IndexCollectionAccessorMock) ForPulseAndJetMinimockCounter() uint64 {\n\treturn atomic.LoadUint64(&m.ForPulseAndJetCounter)\n}", "func (huo *HistorytakingUpdateOne) SetPulse(i int) *HistorytakingUpdateOne {\n\thuo.mutation.ResetPulse()\n\thuo.mutation.SetPulse(i)\n\treturn huo\n}", "func BrightnessPulse() {\n\tfor {\n\t\tfor i := 255; i >= 0; i-- {\n\t\t\ts := strconv.Itoa(i)\n\t\t\tBrightnessFileHandler(s)\n\t\t\ttime.Sleep(25 * time.Millisecond)\n\t\t}\n\t\tfor i := 1; i <= 255; i++ {\n\t\t\ts := strconv.Itoa(i)\n\t\t\tBrightnessFileHandler(s)\n\t\t\ttime.Sleep(25 * time.Millisecond)\n\t\t}\n\t}\n}", "func (r *Reader) dialAndGetResponse(handshakeDur time.Duration, boostPerfFlag bool) ([]Pulse, error) {\n\tvar arr *C.int32_t\n\tvar arrLen C.int32_t\n\tvar list []int32\n\tvar hsDurUsec C.int32_t = C.int32_t(handshakeDur / time.Microsecond)\n\tvar boost C.int32_t = 0\n\tvar err2 *C.Error\n\tif boostPerfFlag {\n\t\tboost = 1\n\t}\n\n\t// return array: [pulse, duration, pulse, duration, ...]\n\tres := C.dial_DHTxx_and_read(C.int32_t(r.pin), hsDurUsec, boost, &arr, &arrLen, &err2)\n\tif res == -1 {\n\t\tvar err error\n\t\tif err2 != nil {\n\t\t\tmsg := C.GoString(err2.message)\n\t\t\terr = errors.New(fmt.Sprintf(\"error during call C.dial_DHTxx_and_read(): %v\", msg))\n\t\t\tC.free_error(err2)\n\t\t} else {\n\t\t\terr = errors.New(\"error during call C.dial_DHTxx_and_read()\")\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer C.free(unsafe.Pointer(arr))\n\n\t// convert original C array arr to Go slice list\n\th := (*reflect.SliceHeader)(unsafe.Pointer(&list))\n\th.Data = uintptr(unsafe.Pointer(arr))\n\th.Len = int(arrLen)\n\th.Cap = int(arrLen)\n\tpulses := make([]Pulse, len(list)/2)\n\n\t// convert original int array ([pulse, duration, pulse, duration, ...])\n\t// to Pulse struct array\n\tfor i := 0; i < len(list)/2; i++ {\n\t\tvar value byte = 0\n\t\tif list[i*2] != 0 {\n\t\t\tvalue = 1\n\t\t}\n\n\t\tpulses[i] = Pulse{\n\t\t\tValue: value,\n\t\t\tDuration: time.Duration(list[i*2+1]) * time.Microsecond,\n\t\t}\n\t}\n\n\treturn pulses, nil\n}", "func (s *Storage) GetSequentialPulse() (models.Pulse, error) {\n\ttimer := prometheus.NewTimer(GetSequentialPulseDuration)\n\tdefer timer.ObserveDuration()\n\n\tvar pulses []models.Pulse\n\terr := s.db.Where(\"is_sequential = ?\", true).Order(\"pulse_number desc\").Limit(1).Find(&pulses).Error\n\tif err != nil {\n\t\treturn models.Pulse{}, err\n\t}\n\tif len(pulses) == 0 {\n\t\treturn models.Pulse{}, nil\n\t}\n\treturn pulses[0], err\n}", "func (db *DB) GetLastPulseAsLightMaterial(ctx context.Context) (core.PulseNumber, error) {\n\tbuf, err := db.get(ctx, prefixkey(scopeIDSystem, []byte{sysLastPulseAsLightMaterial}))\n\tif err != nil {\n\t\tif err == ErrNotFound {\n\t\t\terr = nil\n\t\t}\n\t\treturn 0, err\n\t}\n\treturn core.NewPulseNumber(buf), nil\n}", "func (rw *DataRW) payload(msg Msg) []byte {\n\tbuffer := bytes.NewBuffer(make([]byte, 0, 65536))\n\t_, err := io.Copy(buffer, msg.Payload)\n\tif err != nil {\n\t\treturn nil\n\t}\n\ttemp := buffer.Bytes()\n\tlength := len(temp)\n\tvar body []byte\n\t//are we wasting more than 5% space?\n\tif cap(temp) > (length + length/5) {\n\t\tbody = make([]byte, length)\n\t\tcopy(body, temp)\n\t} else {\n\t\tbody = temp\n\t}\n\treturn body\n}", "func NewPulse(signal chan interface{}) *Pulse {\n\tpulse := &Pulse{\n\t\tsignal: signal,\n\t\tlen: 0,\n\t}\n\tpulse.listen()\n\treturn pulse\n}", "func (*AlertingCondition_Spec_TimeSeries_CombineThreshold) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_alerting_condition_proto_rawDescGZIP(), []int{0, 0, 0, 2}\n}", "func (LP72 LongPoll72OutBound) BuildAndByte(params ...interface{}) ([]byte, error) {\n\n\tcashableMoneyInCents := params[0].(uint64)\n\trestrictedMoneyInCents := params[1].(uint64)\n\tnonRestrictedMoneyInCents := params[2].(uint64)\n\n\tbinAssetNo := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(binAssetNo, AssetNumber)\n\n\tLP72.Address = 0x01\n\tLP72.Command = 0x72\n\tLP72.TransferCode = 0x00\n\tLP72.TransactionIndex = 0x00\n\tLP72.CashableBCD = bcd.FromUint(cashableMoneyInCents, 5)\n\tLP72.RestrictedBCD = bcd.FromUint(restrictedMoneyInCents, 5)\n\tLP72.NonRestrictedBCD = bcd.FromUint(nonRestrictedMoneyInCents, 5)\n\tLP72.TransferFlags = 0x00\n\tLP72.AssetNumber = binAssetNo\n\tLP72.RegistrationKey = RegistrationKey\n\tLP72.Expiration = []byte{0x01, 0x01, 0x20, 0x25}\n\tLP72.PoolID = []byte{0x00, 0x00}\n\tLP72.ReceiptDataLength = 0x00\n\tLP72.LockTimeout = []byte{0x44, 0xCD}\n\n\tdate := jodaTime.Format(\"YYYYMMddHHmmssSSS\", time.Now())\n\n\tLP72.TransactionID = []byte(date)\n\tLP72.TransactionIDLength = byte(len(LP72.TransactionID))\n\n\tif params[3].(uint64) != 0 {\n\t\tLP72.TransferType = 0x80\n\t} else {\n\t\tLP72.TransferType = 0x00\n\t}\n\n\tmessageLen := (1 + 1 + 1 + 5 + 5 + 5 + 1 + 4 + 20 + 1 + len(LP72.TransactionID) + 4 + 2 + 1 + 2)\n\tLP72.Length = byte(messageLen)\n\n\tmessageData := []byte{\n\t\tLP72.Address,\n\t\tLP72.Command,\n\t\tLP72.Length,\n\t\tLP72.TransferCode,\n\t\tLP72.TransactionIndex,\n\t\tLP72.TransferType,\n\t}\n\tmessageData = append(messageData, LP72.CashableBCD...)\n\tmessageData = append(messageData, LP72.RestrictedBCD...)\n\tmessageData = append(messageData, LP72.NonRestrictedBCD...)\n\tmessageData = append(messageData, LP72.TransferFlags)\n\tmessageData = append(messageData, LP72.AssetNumber...)\n\tmessageData = append(messageData, LP72.RegistrationKey...)\n\tmessageData = append(messageData, LP72.TransactionIDLength)\n\tmessageData = append(messageData, LP72.TransactionID...)\n\tmessageData = append(messageData, LP72.Expiration...)\n\tmessageData = append(messageData, LP72.PoolID...)\n\tmessageData = append(messageData, LP72.ReceiptDataLength)\n\tmessageData = append(messageData, LP72.LockTimeout...)\n\n\tcrc := CalculateCRC(0, messageData)\n\n\tmessageData = append(messageData, crc...)\n\treturn messageData, nil\n\n}", "func (p *Character) Heavy(playerChan PlayerChannel) {\n\tinput := actions.Action{}\n\tinput.Attack.StartTime = time.Now()\n\n\tinput.Attack.Damage = p.Moveset.Heavy[\"damage\"]\n\tinput.Attack.Range = p.Moveset.Heavy[\"range\"]\n\n\tplayerChan <- &input\n}", "func (hu *HistorytakingUpdate) SetPulse(i int) *HistorytakingUpdate {\n\thu.mutation.ResetPulse()\n\thu.mutation.SetPulse(i)\n\treturn hu\n}", "func (s *Storage) GetPulses(fromPulse *int64, timestampLte, timestampGte, pulseNumberLte, pulseNumberLt, pulseNumberGte, pulseNumberGt *int64, sortByAsc bool, limit, offset int) ([]models.Pulse, int, error) {\n\ttimer := prometheus.NewTimer(GetPulsesDuration)\n\tdefer timer.ObserveDuration()\n\n\tquery := s.db.Model(&models.Pulse{})\n\tquery = filterByTimestamp(query, timestampLte, timestampGte)\n\tquery = filterByPulseNumber(query, pulseNumberLte, pulseNumberLt, pulseNumberGte, pulseNumberGt)\n\tif sortByAsc {\n\t\tquery = query.Order(\"pulse_number asc\")\n\t} else {\n\t\tquery = query.Order(\"pulse_number desc\")\n\t}\n\n\tvar err error\n\tif fromPulse != nil {\n\t\tquery = query.Where(\"pulse_number <= ?\", &fromPulse)\n\t}\n\n\tpulses, total, err := getPulses(query, limit, offset)\n\tif err != nil {\n\t\treturn nil, 0, errors.Wrap(err, \"error while select pulses from db\")\n\t}\n\n\t// set real NextPulseNumber and PrevPulseNumber to every pulse (if we don't know it, set -1)\n\tpulsesLen := len(pulses)\n\tif sortByAsc {\n\t\tfor i := pulsesLen - 1; i > 0; i-- {\n\t\t\tif pulses[i].PrevPulseNumber == pulses[i-1].PulseNumber {\n\t\t\t\tpulses[i-1].NextPulseNumber = pulses[i].PulseNumber\n\t\t\t} else {\n\t\t\t\tpulses[i].PrevPulseNumber = -1\n\t\t\t\tpulses[i-1].NextPulseNumber = -1\n\t\t\t}\n\t\t}\n\n\t\tif pulsesLen > 0 {\n\t\t\tpulses[0] = s.updatePrevPulse(pulses[0])\n\t\t\tpulses[pulsesLen-1] = s.updateNextPulse(pulses[pulsesLen-1])\n\t\t}\n\t} else {\n\t\tfor i := 0; i < pulsesLen-1; i++ {\n\t\t\tif pulses[i].PrevPulseNumber == pulses[i+1].PulseNumber {\n\t\t\t\tpulses[i+1].NextPulseNumber = pulses[i].PulseNumber\n\t\t\t} else {\n\t\t\t\tpulses[i+1].NextPulseNumber = -1\n\t\t\t\tpulses[i].PrevPulseNumber = -1\n\t\t\t}\n\t\t}\n\n\t\tif pulsesLen > 0 {\n\t\t\tpulses[0] = s.updateNextPulse(pulses[0])\n\t\t\tpulses[pulsesLen-1] = s.updatePrevPulse(pulses[pulsesLen-1])\n\t\t}\n\t}\n\treturn pulses, total, err\n}", "func (m *Message) DSP() (*DSP, error) {\n\tps, err := m.Parse(\"DSP\")\n\tpst, ok := ps.(*DSP)\n\tif ok {\n\t\treturn pst, err\n\t}\n\treturn nil, err\n}", "func BuildGetPayload(stationGetID string, stationGetAuth string) (*station.GetPayload, error) {\n\tvar err error\n\tvar id int32\n\t{\n\t\tvar v int64\n\t\tv, err = strconv.ParseInt(stationGetID, 10, 32)\n\t\tid = int32(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid value for id, must be INT32\")\n\t\t}\n\t}\n\tvar auth *string\n\t{\n\t\tif stationGetAuth != \"\" {\n\t\t\tauth = &stationGetAuth\n\t\t}\n\t}\n\tv := &station.GetPayload{}\n\tv.ID = id\n\tv.Auth = auth\n\n\treturn v, nil\n}", "func BuildGetPayload(stationGetID string, stationGetAuth string) (*station.GetPayload, error) {\n\tvar err error\n\tvar id int32\n\t{\n\t\tvar v int64\n\t\tv, err = strconv.ParseInt(stationGetID, 10, 32)\n\t\tid = int32(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid value for id, must be INT32\")\n\t\t}\n\t}\n\tvar auth string\n\t{\n\t\tauth = stationGetAuth\n\t}\n\tv := &station.GetPayload{}\n\tv.ID = id\n\tv.Auth = auth\n\n\treturn v, nil\n}", "func (s *Strategy) GetBaseData(d data.Handler) (signal.Signal, error) {\n\tif d == nil {\n\t\treturn signal.Signal{}, common.ErrNilArguments\n\t}\n\tlatest := d.Latest()\n\tif latest == nil {\n\t\treturn signal.Signal{}, common.ErrNilEvent\n\t}\n\treturn signal.Signal{\n\t\tBase: latest.GetBase(),\n\t\tClosePrice: latest.GetClosePrice(),\n\t\tHighPrice: latest.GetHighPrice(),\n\t\tOpenPrice: latest.GetOpenPrice(),\n\t\tLowPrice: latest.GetLowPrice(),\n\t}, nil\n}", "func (s *Storage) CompletePulse(pulseNumber int64) error {\n\ttimer := prometheus.NewTimer(CompletePulseDuration)\n\tdefer timer.ObserveDuration()\n\treturn s.db.Transaction(func(tx *gorm.DB) error {\n\t\tpulse := models.Pulse{PulseNumber: pulseNumber}\n\t\tupdate := tx.Model(&pulse).Update(models.Pulse{IsComplete: true})\n\t\tif update.Error != nil {\n\t\t\treturn errors.Wrap(update.Error, \"error while updating pulse completeness\")\n\t\t}\n\t\trowsAffected := update.RowsAffected\n\t\tif rowsAffected == 0 {\n\t\t\treturn errors.Errorf(\"try to complete not existing pulse with number %d\", pulseNumber)\n\t\t}\n\t\tif rowsAffected != 1 {\n\t\t\treturn errors.Errorf(\"several rows were affected by update for pulse with number %d to complete, it was not expected\", pulseNumber)\n\t\t}\n\t\treturn nil\n\t})\n}", "func (r *Reader) decodePulses(pulses []Pulse) (float32, float32, error) {\n\tif len(pulses) >= 82 && len(pulses) <= 85 {\n\t\tpulses = pulses[len(pulses)-82:]\n\t} else {\n\t\tr.printPulseArray(pulses)\n\n\t\treturn 0, 0, fmt.Errorf(\"cannot decode pulse array received from DHTxx sensor, since incorrect length: %d\", len(pulses))\n\t}\n\n\tpulses = pulses[:80]\n\t// any bit low signal part\n\ttLow := 50 * time.Microsecond\n\t// 0 bit high signal part\n\ttHigh0 := 27 * time.Microsecond\n\t// 1 bit high signal part\n\ttHigh1 := 70 * time.Microsecond\n\n\t// decode 1st byte\n\tb0, err := r.decodeByte(tLow, tHigh0, tHigh1, 0, pulses)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// decode 2nd byte\n\tb1, err := r.decodeByte(tLow, tHigh0, tHigh1, 16, pulses)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// decode 3rd byte\n\tb2, err := r.decodeByte(tLow, tHigh0, tHigh1, 32, pulses)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// decode 4th byte\n\tb3, err := r.decodeByte(tLow, tHigh0, tHigh1, 48, pulses)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// decode 5th byte: control sum to verify all data received from sensor\n\tsum, err := r.decodeByte(tLow, tHigh0, tHigh1, 64, pulses)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// produce data consistency check\n\tcalcSum := byte(b0 + b1 + b2 + b3)\n\tif sum != calcSum {\n\t\terr := errors.New(fmt.Sprintf(\n\t\t\t\"CRCs do not match: checksum from sensor(%v) != calculated checksum(%v=%v+%v+%v+%v)\",\n\t\t\tsum, calcSum, b0, b1, b2, b3))\n\n\t\treturn 0, 0, err\n\t}\n\n\tr.logger.Debugf(\"CRCs verified: checksum from sensor(%v) = calculated checksum(%v=%v+%v+%v+%v)\", sum, calcSum, b0, b1, b2, b3)\n\n\t// debug output for 5 bytes\n\tr.logger.Debugf(\"Decoded from DHTxx sensor: [%d, %d, %d, %d, %d]\", b0, b1, b2, b3, sum)\n\n\t// extract temperature and humidity depending on sensor type\n\tvar temperature, humidity float32\n\n\tif r.sensorType == DHT11 {\n\t\thumidity = float32(b0)\n\t\ttemperature = float32(b2)\n\t} else if r.sensorType == DHT12 {\n\t\thumidity = float32(b0) + float32(b1)/10.0\n\t\ttemperature = float32(b2) + float32(b3)/10.0\n\t\tif b3&0x80 != 0 {\n\t\t\ttemperature *= -1.0\n\t\t}\n\t} else if r.sensorType == DHT22 {\n\t\thumidity = (float32(b0)*256 + float32(b1)) / 10.0\n\t\ttemperature = (float32(b2&0x7F)*256 + float32(b3)) / 10.0\n\t\tif b2&0x80 != 0 {\n\t\t\ttemperature *= -1.0\n\t\t}\n\t}\n\n\t// additional check for data correctness\n\tif humidity > 100.0 {\n\t\treturn 0, 0, fmt.Errorf(\"humidity value exceeds 100%%: %v\", humidity)\n\t} else if humidity == 0 {\n\t\treturn 0, 0, fmt.Errorf(\"humidity value cannot be zero\")\n\t}\n\n\t// success\n\treturn temperature, humidity, nil\n}", "func PackTrigger(id *big.Int, trig triggerWrapper) ([]byte, error) {\n\tvar trigger []byte\n\tvar err error\n\n\t// construct utils abi\n\tutilsABI, err := abi.JSON(strings.NewReader(automation_utils_2_1.AutomationUtilsABI))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrABINotParsable, err)\n\t}\n\n\t// pack trigger based on upkeep type\n\tupkeepType, ok := getUpkeepTypeFromBigInt(id)\n\tif !ok {\n\t\treturn nil, ErrInvalidUpkeepID\n\t}\n\tswitch upkeepType {\n\tcase ocr2keepers.ConditionTrigger:\n\t\ttrig := automation_utils_2_1.KeeperRegistryBase21ConditionalTrigger{\n\t\t\tBlockNum: trig.BlockNum,\n\t\t\tBlockHash: trig.BlockHash,\n\t\t}\n\t\ttrigger, err = utilsABI.Pack(\"_conditionalTrigger\", &trig)\n\tcase ocr2keepers.LogTrigger:\n\t\tlogTrig := automation_utils_2_1.KeeperRegistryBase21LogTrigger{\n\t\t\tBlockNum: trig.BlockNum,\n\t\t\tBlockHash: trig.BlockHash,\n\t\t\tLogIndex: trig.LogIndex,\n\t\t\tTxHash: trig.TxHash,\n\t\t}\n\t\ttrigger, err = utilsABI.Pack(\"_logTrigger\", &logTrig)\n\tdefault:\n\t\terr = fmt.Errorf(\"unknown trigger type: %d\", upkeepType)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn trigger[4:], nil\n}", "func (device *BarometerBricklet) GetDebouncePeriod() (debounce uint32, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionGetDebouncePeriod), buf.Bytes())\n\tif err != nil {\n\t\treturn debounce, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 12 {\n\t\t\treturn debounce, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 12)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn debounce, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &debounce)\n\n\t}\n\n\treturn debounce, nil\n}", "func UnpackTrigger(id *big.Int, raw []byte) (triggerWrapper, error) {\n\t// construct utils abi\n\tutilsABI, err := abi.JSON(strings.NewReader(automation_utils_2_1.AutomationUtilsABI))\n\tif err != nil {\n\t\treturn triggerWrapper{}, fmt.Errorf(\"%w: %s\", ErrABINotParsable, err)\n\t}\n\n\tupkeepType, ok := getUpkeepTypeFromBigInt(id)\n\tif !ok {\n\t\treturn triggerWrapper{}, ErrInvalidUpkeepID\n\t}\n\tswitch upkeepType {\n\tcase ocr2keepers.ConditionTrigger:\n\t\tunpacked, err := utilsABI.Methods[\"_conditionalTrigger\"].Inputs.Unpack(raw)\n\t\tif err != nil {\n\t\t\treturn triggerWrapper{}, fmt.Errorf(\"%w: failed to unpack conditional trigger\", err)\n\t\t}\n\t\tconverted, ok := abi.ConvertType(unpacked[0], new(automation_utils_2_1.KeeperRegistryBase21ConditionalTrigger)).(*automation_utils_2_1.KeeperRegistryBase21ConditionalTrigger)\n\t\tif !ok {\n\t\t\treturn triggerWrapper{}, fmt.Errorf(\"failed to convert type\")\n\t\t}\n\t\ttriggerW := triggerWrapper{\n\t\t\tBlockNum: converted.BlockNum,\n\t\t}\n\t\tcopy(triggerW.BlockHash[:], converted.BlockHash[:])\n\t\treturn triggerW, nil\n\tcase ocr2keepers.LogTrigger:\n\t\tunpacked, err := utilsABI.Methods[\"_logTrigger\"].Inputs.Unpack(raw)\n\t\tif err != nil {\n\t\t\treturn triggerWrapper{}, fmt.Errorf(\"%w: failed to unpack log trigger\", err)\n\t\t}\n\t\tconverted, ok := abi.ConvertType(unpacked[0], new(automation_utils_2_1.KeeperRegistryBase21LogTrigger)).(*automation_utils_2_1.KeeperRegistryBase21LogTrigger)\n\t\tif !ok {\n\t\t\treturn triggerWrapper{}, fmt.Errorf(\"failed to convert type\")\n\t\t}\n\t\ttriggerW := triggerWrapper{\n\t\t\tBlockNum: converted.BlockNum,\n\t\t\tLogIndex: converted.LogIndex,\n\t\t}\n\t\tcopy(triggerW.BlockHash[:], converted.BlockHash[:])\n\t\tcopy(triggerW.TxHash[:], converted.TxHash[:])\n\t\treturn triggerW, nil\n\tdefault:\n\t\treturn triggerWrapper{}, fmt.Errorf(\"unknown trigger type: %d\", upkeepType)\n\t}\n}", "func microsecondsPerPulse(bpm float32) time.Duration {\n\treturn time.Duration((float32(Minute) * float32(Microsecond)) / (float32(Ppqn) * bpm))\n}", "func (ps pulseSlave) Pulse(p syncosc.Pulse) error {\n\tfmt.Printf(\"%d\\n\", p.Count)\n\treturn nil\n}", "func (r *Reader) decodeByte(tLow, tHigh0, tHigh1 time.Duration, start int, pulses []Pulse) (byte, error) {\n\tif len(pulses)-start < 16 {\n\t\treturn 0, fmt.Errorf(\"cannot decode byte since range between index (%d) and array length (%d) is less than 16\", start, len(pulses))\n\t}\n\n\tHIGH_DUR_MAX := tLow + tHigh1\n\tHIGH_LOW_DUR_AVG := ((tLow+tHigh1)/2 + (tLow+tHigh0)/2) / 2\n\n\tvar b int = 0\n\tfor i := 0; i < 8; i++ {\n\t\tpulseL := pulses[start+i*2]\n\t\tpulseH := pulses[start+i*2+1]\n\t\tif pulseL.Value != 0 {\n\t\t\treturn 0, fmt.Errorf(\"low edge value expected at index %d\", start+i*2)\n\t\t}\n\t\tif pulseH.Value == 0 {\n\t\t\treturn 0, fmt.Errorf(\"high edge value expected at index %d\", start+i*2+1)\n\t\t}\n\n\t\t// const HIGH_DUR_MAX = (70 + (70 + 54)) / 2 * time.Microsecond\n\t\t// Calc average value between 24us (bit 0) and 70us (bit 1).\n\t\t// Everything that less than this param is bit 0, bigger - bit 1.\n\t\t// const HIGH_LOW_DUR_AVG = (24 + (70-24)/2) * time.Microsecond\n\t\tif pulseH.Duration > HIGH_DUR_MAX {\n\t\t\treturn 0, fmt.Errorf(\"high edge value duration %v exceed maximum expected %v\", pulseH.Duration, HIGH_DUR_MAX)\n\t\t}\n\n\t\tif pulseH.Duration > HIGH_LOW_DUR_AVG {\n\t\t\tb = b | (1 << uint(7-i))\n\t\t}\n\t}\n\n\treturn byte(b), nil\n}", "func getDataWaterQuality(inputData *WaterQualityGraphWaterlevelAnalystInput, q string, wqType string) (*WaterQualityWaterlevelOutput, error) {\n\n\tdb, err := pqx.Open()\n\tif err != nil {\n\t\treturn nil, errors.NewEvent(eventcode.EventNetworkCriticalUnableConDB, err)\n\t}\n\n\tp := []interface{}{inputData.DatetimeStart, inputData.DatetimeEnd}\n\tif wqType != \"\" {\n\t\tp = append(p, inputData.WaterlevelStation)\n\t} else {\n\t\tp = append(p, inputData.WaterQualityStation)\n\t}\n\tfmt.Println(q)\n\trows, err := db.Query(q, p...)\n\tif err != nil {\n\t\treturn nil, pqx.GetRESTError(err)\n\t}\n\tdefer rows.Close()\n\n\tdataRow := &WaterQualityWaterlevelOutput{}\n\tdateOutput := make([]*WaterQualityGraphOutputData, 0)\n\tfor rows.Next() {\n\t\tvar (\n\t\t\tstationName pqx.JSONRaw\n\t\t\tdatetime time.Time\n\t\t\tvalue sql.NullFloat64\n\t\t)\n\t\trows.Scan(&stationName, &datetime, &value)\n\n\t\tdd := &WaterQualityGraphOutputData{}\n\t\tdataRow.SeriesName = stationName.JSON()\n\t\tdd.Name = udt.DatetimeFormat(datetime, \"datetime\")\n\t\tdd.Value = ValidData(value.Valid, value.Float64)\n\t\tdateOutput = append(dateOutput, dd)\n\t}\n\tdataRow.Data = dateOutput\n\treturn dataRow, nil\n}", "func (tangle *Tangle) Payload(payloadID payload.ID) *payload.CachedPayload {\n\treturn &payload.CachedPayload{CachedObject: tangle.payloadStorage.Load(payloadID.Bytes())}\n}", "func PCMPEQW(mx, x operand.Op) { ctx.PCMPEQW(mx, x) }", "func (hjd HeavyJobDetails) AsBasicJobDetails() (BasicJobDetails, bool) {\n\treturn &hjd, true\n}", "func (b *testBroker) Push(pipe *Pipeline, j *Job) (string, error) {\n\tif err := b.isServing(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := b.queue(pipe)\n\tif q == nil {\n\t\treturn \"\", fmt.Errorf(\"undefined testQueue `%s`\", pipe.Name())\n\t}\n\n\tid, err := uuid.NewV4()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq.push(id.String(), j, 0, j.Options.DelayDuration())\n\n\treturn id.String(), nil\n}", "func (m *Pump) Pump(string) (func() (phono.Buffer, error), error) {\n\treturn func() (phono.Buffer, error) {\n\t\tif Limit(m.Messages()) >= m.Limit {\n\t\t\treturn nil, io.EOF\n\t\t}\n\t\ttime.Sleep(m.Interval)\n\n\t\tb := phono.Buffer(make([][]float64, m.NumChannels))\n\t\tfor i := range b {\n\t\t\tb[i] = make([]float64, m.BufferSize)\n\t\t\tfor j := range b[i] {\n\t\t\t\tb[i][j] = m.Value\n\t\t\t}\n\t\t}\n\t\tm.counter.Advance(b)\n\t\treturn b, nil\n\t}, nil\n}", "func (m *Message) BHS() (*BHS, error) {\n\tps, err := m.Parse(\"BHS\")\n\tpst, ok := ps.(*BHS)\n\tif ok {\n\t\treturn pst, err\n\t}\n\treturn nil, err\n}", "func PulseHandler(db *postgres.DB) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\terr := db.Ping()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"failed to connect to DB\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(w, \"ok\")\n\t})\n}", "func (db *DB) GetReplicatedPulse(ctx context.Context) (core.PulseNumber, error) {\n\tbuf, err := db.get(ctx, prefixkey(scopeIDSystem, []byte{sysReplicatedPulse}))\n\tif err != nil {\n\t\tif err == ErrNotFound {\n\t\t\terr = nil\n\t\t}\n\t\treturn 0, err\n\t}\n\treturn core.NewPulseNumber(buf), nil\n}", "func GetPipelineBuildJob(db gorp.SqlExecutor, store cache.Store, id int64) (*sdk.PipelineBuildJob, error) {\n\tvar pbJobGorp PipelineBuildJob\n\tif err := db.SelectOne(&pbJobGorp, `\n\t\tSELECT *\n\t\tFROM pipeline_build_job\n\t\tWHERE id = $1\n\t`, id); err != nil {\n\t\treturn nil, err\n\t}\n\th := sdk.Hatchery{}\n\tif store.Get(keyBookJob(pbJobGorp.ID), &h) {\n\t\tpbJobGorp.BookedBy = h\n\t}\n\tpbJob := sdk.PipelineBuildJob(pbJobGorp)\n\treturn &pbJob, nil\n}", "func (bcp *BotCommandPayload) Trigger() (*tdlib.Message, *BotError) {\n\t_, err := bcp.bot.TelegramClient.client.GetCallbackQueryAnswer(bcp.bot.ChatID, bcp.MsgID, bcp.payloadData)\n\tif err != nil {\n\t\tswitch err.Error() {\n\t\tcase \"timeout\":\n\t\t\treturn nil, &BotError{\n\t\t\t\tErr: fmt.Errorf(\"GetCallbackQueryAnswer [%v] failed: %s\", bcp.Data, err),\n\t\t\t\tErrType: BotErrWarn,\n\t\t\t\tBot: bcp.bot,\n\t\t\t\tCommandType: bcp,\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, &BotError{\n\t\t\t\tErr: fmt.Errorf(\"GetCallbackQueryAnswer [%v] failed: %s\", bcp.Data, err),\n\t\t\t\tErrType: BotErrError,\n\t\t\t\tBot: bcp.bot,\n\t\t\t\tCommandType: bcp,\n\t\t\t}\n\t\t}\n\t}\n\tm, err := bcp.bot.TelegramClient.client.GetMessage(bcp.bot.ChatID, bcp.MsgID)\n\tif err != nil {\n\t\treturn nil, &BotError{\n\t\t\tErr: fmt.Errorf(\"GetMessage [%d] failed: %s\", bcp.MsgID, err),\n\t\t\tErrType: BotErrError,\n\t\t\tBot: bcp.bot,\n\t\t\tCommandType: bcp,\n\t\t}\n\t}\n\treturn m, nil\n}", "func (hjd HeavyJobDetails) AsHeavyJobDetails() (*HeavyJobDetails, bool) {\n\treturn &hjd, true\n}", "func (ch Channel) PolyAftertouch(key, pressure uint8) []byte {\n\treturn channelMessage2(ch.Index(), 10, key, pressure)\n}", "func hear(w http.ResponseWriter, r *http.Request) {\n\n\tmethod := r.Method\n\tif strings.ToLower(method) != \"post\" {\n\t\tcon_m := mess{}\n\t\tcon_m.CloseTime = 3\n\t\tcon_m.Content = \"colose Dat\"\n\t\thj := []mess{con_m}\n\n\t\tmetdat := Dat{}\n\t\tmetdat.OnlineNum = 2\n\t\tmetdat.WinRate = 2\n\t\tmetdat.Broadcast = hj\n\t\tdat := messData{}\n\t\tdat.Code = 1\n\t\tdat.Msg = \"error\"\n\t\tdat.Data = metdat\n\n\t\tcont, err := json.Marshal(dat)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(cont)\n\t\tfmt.Println(\"one\")\n\t\treturn\n\t}\n}", "func (e Timing) Payload() interface{} {\n\treturn map[string]float64{\n\t\t\"min\": e.Min,\n\t\t\"max\": e.Max,\n\t\t\"val\": e.Value,\n\t\t\"cnt\": e.Count,\n\t}\n}", "func (m *IndexCollectionAccessorMock) ForPulseAndJetFinished() bool {\n\t// if expectation series were set then invocations count should be equal to expectations count\n\tif len(m.ForPulseAndJetMock.expectationSeries) > 0 {\n\t\treturn atomic.LoadUint64(&m.ForPulseAndJetCounter) == uint64(len(m.ForPulseAndJetMock.expectationSeries))\n\t}\n\n\t// if main expectation was set then invocations count should be greater than zero\n\tif m.ForPulseAndJetMock.mainExpectation != nil {\n\t\treturn atomic.LoadUint64(&m.ForPulseAndJetCounter) > 0\n\t}\n\n\t// if func was set then invocations count should be greater than zero\n\tif m.ForPulseAndJetFunc != nil {\n\t\treturn atomic.LoadUint64(&m.ForPulseAndJetCounter) > 0\n\t}\n\n\treturn true\n}", "func (s *SlcLogger) buildPayload(slackChannel, color, message string, titleParam []string) ([]byte, error) {\n\n\tvar title string\n\tif len(titleParam) == 0 {\n\t\ttitle = s.DefaultTitle\n\t} else {\n\t\ttitle = titleParam[0]\n\t}\n\n\ta := &attachment{Text: message, Title: title, Color: color}\n\tattachments := []attachment{*a}\n\n\treturn json.Marshal(payload{\n\t\tChannel: slackChannel,\n\t\tUserName: s.UserName,\n\t\tIconURL: s.IconURL,\n\t\tAttachments: attachments,\n\t})\n}", "func (ch Channel) Aftertouch(pressure uint8) []byte {\n\treturn channelMessage1(ch.Index(), 13, pressure)\n}", "func (s *Storage) SavePulse(pulse models.Pulse) error {\n\ttimer := prometheus.NewTimer(SavePulseDuration)\n\tdefer timer.ObserveDuration()\n\n\terr := s.db.Set(\"gorm:insert_option\", \"\"+\n\t\t\"ON CONFLICT (pulse_number) DO UPDATE SET prev_pulse_number=EXCLUDED.prev_pulse_number, \"+\n\t\t\"next_pulse_number=EXCLUDED.next_pulse_number, timestamp=EXCLUDED.timestamp\",\n\t).Create(&pulse).Error\n\treturn errors.Wrap(err, \"error while saving pulse\")\n}", "func (c *Controller) SetJetDropData(pulse types.Pulse, jetID string) {\n\tc.jetDropRegisterLock.Lock()\n\tdefer c.jetDropRegisterLock.Unlock()\n\tif c.jetDropRegister[pulse] == nil {\n\t\tc.jetDropRegister[pulse] = map[string]struct{}{}\n\t}\n\tc.jetDropRegister[pulse][jetID] = struct{}{}\n\tIncompletePulsesQueue.Set(float64(len(c.jetDropRegister)))\n}", "func New(uid string, ipcon *ipconnection.IPConnection) (BarometerBricklet, error) {\n\tinternalIPCon := ipcon.GetInternalHandle().(IPConnection)\n\tdev, err := NewDevice([3]uint8{2, 0, 2}, uid, &internalIPCon, 0, DeviceIdentifier, DeviceDisplayName)\n\tif err != nil {\n\t\treturn BarometerBricklet{}, err\n\t}\n\tdev.ResponseExpected[FunctionGetAirPressure] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionGetAltitude] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionSetAirPressureCallbackPeriod] = ResponseExpectedFlagTrue\n\tdev.ResponseExpected[FunctionGetAirPressureCallbackPeriod] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionSetAltitudeCallbackPeriod] = ResponseExpectedFlagTrue\n\tdev.ResponseExpected[FunctionGetAltitudeCallbackPeriod] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionSetAirPressureCallbackThreshold] = ResponseExpectedFlagTrue\n\tdev.ResponseExpected[FunctionGetAirPressureCallbackThreshold] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionSetAltitudeCallbackThreshold] = ResponseExpectedFlagTrue\n\tdev.ResponseExpected[FunctionGetAltitudeCallbackThreshold] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionSetDebouncePeriod] = ResponseExpectedFlagTrue\n\tdev.ResponseExpected[FunctionGetDebouncePeriod] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionSetReferenceAirPressure] = ResponseExpectedFlagFalse\n\tdev.ResponseExpected[FunctionGetChipTemperature] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionGetReferenceAirPressure] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionSetAveraging] = ResponseExpectedFlagFalse\n\tdev.ResponseExpected[FunctionGetAveraging] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionSetI2CMode] = ResponseExpectedFlagFalse\n\tdev.ResponseExpected[FunctionGetI2CMode] = ResponseExpectedFlagAlwaysTrue\n\tdev.ResponseExpected[FunctionGetIdentity] = ResponseExpectedFlagAlwaysTrue\n\treturn BarometerBricklet{dev}, nil\n}", "func BuildGetPayload(groupGetID string, groupGetToken string) (*group.GetPayload, error) {\n\tvar id string\n\t{\n\t\tid = groupGetID\n\t}\n\tvar token string\n\t{\n\t\ttoken = groupGetToken\n\t}\n\tv := &group.GetPayload{}\n\tv.ID = id\n\tv.Token = token\n\n\treturn v, nil\n}", "func (device *LaserRangeFinderBricklet) GetDebouncePeriod() (debounce uint32, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionGetDebouncePeriod), buf.Bytes())\n\tif err != nil {\n\t\treturn debounce, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 12 {\n\t\t\treturn debounce, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 12)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn debounce, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &debounce)\n\n\t}\n\n\treturn debounce, nil\n}", "func (device *ServoBrick) GetPulseWidth(servoNum uint8) (min uint16, max uint16, err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, servoNum)\n\n\tresultBytes, err := device.device.Get(uint8(FunctionGetPulseWidth), buf.Bytes())\n\tif err != nil {\n\t\treturn min, max, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 12 {\n\t\t\treturn min, max, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 12)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn min, max, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &min)\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &max)\n\n\t}\n\n\treturn min, max, nil\n}", "func FWJRconsumer(msg *stomp.Message) ([]Lfnsite, int64, string, string, error) {\n\t//first to check to make sure there is something in msg,\n\t//otherwise we will get error:\n\t//Failed to continue - runtime error: invalid memory address or nil pointer dereference\n\t//[signal SIGSEGV: segmentation violation]\n\t//\n\tvar lfnsite []Lfnsite\n\tvar ls Lfnsite\n\tatomic.AddUint64(&msgreceived, 1)\n\tif msg == nil || msg.Body == nil {\n\t\treturn lfnsite, 0, \"\", \"\", errors.New(\"Empty message\")\n\t}\n\t//\n\tif Config.Verbose > 2 {\n\t\tlog.Println(\"*****************Source AMQ message of wmarchive*********************\")\n\t\tlog.Println(\"Source AMQ message of wmarchive: \", string(msg.Body))\n\t\tlog.Println(\"*******************End AMQ message of wmarchive**********************\")\n\t}\n\t// Define FWJR Recod\n\ttype MetaData struct {\n\t\tTs int64 `json:\"ts\"`\n\t\tJobType string `json:\"jobtype\"`\n\t\tWnName string `json:\"wn_name\"`\n\t}\n\n\ttype InputLst struct {\n\t\tLfn int `json:\"lfn\"`\n\t\tEvents int64 `json:\"events\"`\n\t\tGUID string `json:\"guid\"`\n\t}\n\ttype Step struct {\n\t\tInput []InputLst `json:\"input\"`\n\t\tSite string `json:\"site\"`\n\t}\n\ttype FWJRRecord struct {\n\t\tLFNArray []string\n\t\tLFNArrayRef []string\n\t\tFallbackFiles []int `json:\"fallbackFiles\"`\n\t\tMetadata MetaData `json:\"meta_data\"`\n\t\tSteps []Step `json:\"steps\"`\n\t}\n\tvar rec FWJRRecord\n\terr := json.Unmarshal(msg.Body, &rec)\n\tif err != nil {\n\t\tlog.Printf(\"Enable to Unmarchal input message. Error: %v\", err)\n\t\treturn lfnsite, 0, \"\", \"\", err\n\t}\n\tif Config.Verbose > 2 {\n\t\tlog.Printf(\"******PARSED FWJR record******: %+v\", rec)\n\t}\n\t// process received message, e.g. extract some fields\n\tvar ts int64\n\tvar jobtype string\n\tvar wnname string\n\t// Check the data\n\tif rec.Metadata.Ts == 0 {\n\t\tts = time.Now().Unix()\n\t} else {\n\t\tts = rec.Metadata.Ts\n\t}\n\n\tif len(rec.Metadata.JobType) > 0 {\n\t\tjobtype = rec.Metadata.JobType\n\t} else {\n\t\tjobtype = \"unknown\"\n\t}\n\n\tif len(rec.Metadata.WnName) > 0 {\n\t\twnname = rec.Metadata.WnName\n\t} else {\n\t\twnname = \"unknown\"\n\t}\n\t//\n\tfor _, v := range rec.Steps {\n\t\tls.site = v.Site\n\t\tvar goodlfn []string\n\t\tfor _, i := range v.Input {\n\t\t\tif len(i.GUID) > 0 && i.Events != 0 {\n\t\t\t\tlfn := i.Lfn\n\t\t\t\tif !insliceint(rec.FallbackFiles, lfn) {\n\t\t\t\t\tif inslicestr(rec.LFNArrayRef, \"lfn\") {\n\t\t\t\t\t\tif lfn < len(rec.LFNArray) {\n\t\t\t\t\t\t\tgoodlfn = append(goodlfn, rec.LFNArray[lfn])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\tif len(goodlfn) > 0 {\n\t\t\tls.lfn = goodlfn\n\t\t\tlfnsite = append(lfnsite, ls)\n\t\t}\n\t}\n\treturn lfnsite, ts, jobtype, wnname, nil\n}", "func onMessageRecv(client mqtt.Client, message mqtt.Message){\n\n\tvar temp models.TempReading\n\tif err := json.Unmarshal(message.Payload(), &temp); err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Received message on topic: %v Message: %+v \\n \", message.Topic(), string(message.Payload()))\n\n\tvalvePercent := RegulateTemp(temp.Value)\n\treqNum := fmt.Sprintf(\"%.2f\", valvePercent)\n\tout, err := json.Marshal(reqNum)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tPubValveValue(out,client )\n\n}", "func BuildGetPayload(productGetID string, productGetToken string) (*product.GetPayload, error) {\n\tvar id string\n\t{\n\t\tid = productGetID\n\t}\n\tvar token string\n\t{\n\t\ttoken = productGetToken\n\t}\n\tv := &product.GetPayload{}\n\tv.ID = id\n\tv.Token = token\n\n\treturn v, nil\n}", "func (GuitarBass) DiezelVH4Amplifier(){}", "func (db *DB) AddPulse(pulse core.Pulse) error {\n\treturn db.Update(func(tx *TransactionManager) error {\n\t\tvar latest core.PulseNumber\n\t\tlatest, err := tx.GetLatestPulseNumber()\n\t\tif err != nil && err != ErrNotFound {\n\t\t\treturn err\n\t\t}\n\t\tpulseRec := record.PulseRecord{\n\t\t\tPrevPulse: latest,\n\t\t\tEntropy: pulse.Entropy,\n\t\t\tPredictedNextPulse: pulse.NextPulseNumber,\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\tenc := codec.NewEncoder(&buf, &codec.CborHandle{})\n\t\terr = enc.Encode(pulseRec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = tx.Set(prefixkey(scopeIDPulse, pulse.PulseNumber.Bytes()), buf.Bytes())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn tx.Set(prefixkey(scopeIDSystem, []byte{sysLatestPulse}), pulse.PulseNumber.Bytes())\n\t})\n}", "func (device *BarometerBricklet) GetAirPressureCallbackThreshold() (option ThresholdOption, min int32, max int32, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionGetAirPressureCallbackThreshold), buf.Bytes())\n\tif err != nil {\n\t\treturn option, min, max, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 17 {\n\t\t\treturn option, min, max, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 17)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn option, min, max, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &option)\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &min)\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &max)\n\n\t}\n\n\treturn option, min, max, nil\n}", "func BuildGetPayload(salesGetID string, salesGetToken string) (*sales.GetPayload, error) {\n\tvar id string\n\t{\n\t\tid = salesGetID\n\t}\n\tvar token string\n\t{\n\t\ttoken = salesGetToken\n\t}\n\tv := &sales.GetPayload{}\n\tv.ID = id\n\tv.Token = token\n\n\treturn v, nil\n}", "func getStand(handler *handler) *fcm.Message {\n\tvar msg fcm.Message\n\t//remove type first\n\tvar b *bytes.Buffer\n\tif handler.types == FromTypeJson {\n\t\tb, _ = interfaceToByteArray(handler.raw)\n\t} else {\n\t\tb, _ = interfaceToByteArray(handler.from)\n\t}\n\tjson.Unmarshal(b.Bytes(), msg)\n\treturn &msg\n}", "func (p *MyPipeline) Heartbeat() client.Result {\n\treturn p.Pipeline.Do(\"ping\", \"ok\")\n}", "func recentMsgHandler(req *air.Request, res *air.Response) error {\n\tuid := req.Params[\"uid\"]\n\tgid := req.Params[\"gid\"]\n\tcount, _ := utils.ParseInt(req, \"count\")\n\tminMID := req.Params[\"min_mid\"]\n\tif count == 0 {\n\t\tcount = 10\n\t}\n\tif minMID == \"\" {\n\t\tif hasUnreadMsg(uid, gid) {\n\t\t\tunread := models.GetUnread(uid, gid)\n\t\t\tif unread != nil {\n\t\t\t\tminMID = unread.LastMID\n\t\t\t} else {\n\t\t\t\tminMID = \"0\"\n\t\t\t}\n\t\t} else {\n\t\t\tminMID = \"0\"\n\t\t}\n\t}\n\treturn utils.Success(res, models.GetMessagesSlice(gid, minMID, count))\n}", "func (p *PoolHeightFetcher) BuildNotifyRes(res []interface{}) (models.NotifyRes, error) {\n\tvar nres = models.NotifyRes{}\n\tvar ok bool\n\t// beam\n\tif p.Param.CoinType == \"beam\" {\n\t\treturn nres, nil\n\t}\n\t// ckb\n\tif p.Param.CoinType == \"ckb\" {\n\t\t// https://wk588.com/forum.php?mod=viewthread&tid=19665\n\t\t// \"jobId\", \"header hash\", height, \"parent hash\", cleanJob\n\t\tif nres.JobID, ok = res[0].(string); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\tif nres.Hash, ok = res[1].(string); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\tif nres.Height, ok = res[2].(float64); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\tif nres.ParentHash, ok = res[3].(string); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\tif nres.CleanJobs, ok = res[4].(bool); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\treturn nres, nil\n\t}\n\t// eth, etc\n\tif p.Param.CoinType == \"eth\" || p.Param.CoinType == \"etc\" {\n\t\tif nres.Header, ok = res[0].(string); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\tif nres.Header, ok = res[1].(string); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\tif nres.Seed, ok = res[2].(string); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\tif nres.ShareTarget, ok = res[3].(string); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\tif nres.CleanJobs, ok = res[4].(bool); !ok {\n\t\t\treturn nres, errJsonType\n\t\t}\n\t\treturn nres, nil\n\t}\n\t// default: btc, ltc, dcr\n\tif nres.JobID, ok = res[0].(string); !ok {\n\t\treturn nres, errJsonType\n\t}\n\tif nres.Hash, ok = res[1].(string); !ok {\n\t\treturn nres, errJsonType\n\t}\n\tif nres.CoinbaseTX1, ok = res[2].(string); !ok {\n\t\treturn nres, errJsonType\n\t}\n\tif nres.CoinbaseTX2, ok = res[3].(string); !ok {\n\t\treturn nres, errJsonType\n\t}\n\tif nres.BlockVersion, ok = res[5].(string); !ok {\n\t\treturn nres, errJsonType\n\t}\n\tif nres.Nbits, ok = res[6].(string); !ok {\n\t\treturn nres, errJsonType\n\t}\n\tif nres.Ntime, ok = res[7].(string); !ok {\n\t\treturn nres, errJsonType\n\t}\n\tif nres.CleanJobs, ok = res[8].(bool); !ok {\n\t\treturn nres, errJsonType\n\t}\n\treturn nres, nil\n}", "func GetPayload(ctx context.Context, hostnameData hostname.Data) *Payload {\n\tmeta := hostMetadataUtils.GetMeta(ctx, config.Datadog)\n\tmeta.Hostname = hostnameData.Hostname\n\n\tp := &Payload{\n\t\tOs: osName,\n\t\tAgentFlavor: flavor.GetFlavor(),\n\t\tPythonVersion: python.GetPythonInfo(),\n\t\tSystemStats: getSystemStats(),\n\t\tMeta: meta,\n\t\tHostTags: hostMetadataUtils.GetHostTags(ctx, false, config.Datadog),\n\t\tContainerMeta: containerMetadata.Get(1 * time.Second),\n\t\tNetworkMeta: getNetworkMeta(ctx),\n\t\tLogsMeta: getLogsMeta(),\n\t\tInstallMethod: getInstallMethod(getInstallInfoPath()),\n\t\tProxyMeta: getProxyMeta(),\n\t\tOtlpMeta: getOtlpMeta(),\n\t}\n\n\t// Cache the metadata for use in other payloads\n\tkey := buildKey(\"payload\")\n\tcache.Cache.Set(key, p, cache.NoExpiration)\n\n\treturn p\n}", "func (m *PulseManager) Set(ctx context.Context, newPulse insolar.Pulse) error {\n\tm.setLock.Lock()\n\tdefer m.setLock.Unlock()\n\tif m.stopped {\n\t\treturn errors.New(\"can't call Set method on PulseManager after stop\")\n\t}\n\n\tctx, logger := inslogger.WithField(ctx, \"new_pulse\", newPulse.PulseNumber.String())\n\tlogger.Debug(\"received pulse\")\n\n\tctx, span := instracer.StartSpan(\n\t\tctx, \"PulseManager.Set\", trace.WithSampler(trace.AlwaysSample()),\n\t)\n\tspan.AddAttributes(\n\t\ttrace.Int64Attribute(\"pulse.PulseNumber\", int64(newPulse.PulseNumber)),\n\t)\n\tdefer span.End()\n\n\t// Dealing with node lists.\n\tlogger.Debug(\"dealing with node lists.\")\n\t{\n\t\tfromNetwork := m.NodeNet.GetAccessor(newPulse.PulseNumber).GetWorkingNodes()\n\t\tif len(fromNetwork) == 0 {\n\t\t\tlogger.Errorf(\"received zero nodes for pulse %d\", newPulse.PulseNumber)\n\t\t\treturn nil\n\t\t}\n\t\ttoSet := make([]insolar.Node, 0, len(fromNetwork))\n\t\tfor _, n := range fromNetwork {\n\t\t\ttoSet = append(toSet, insolar.Node{ID: n.ID(), Role: n.Role()})\n\t\t}\n\t\terr := m.NodeSetter.Set(newPulse.PulseNumber, toSet)\n\t\tif err != nil {\n\t\t\tpanic(errors.Wrap(err, \"call of SetActiveNodes failed\"))\n\t\t}\n\t}\n\n\tstoragePulse, err := m.PulseAccessor.Latest(ctx)\n\tif err == pulse.ErrNotFound {\n\t\tstoragePulse = *insolar.GenesisPulse\n\t} else if err != nil {\n\t\treturn errors.Wrap(err, \"call of GetLatestPulseNumber failed\")\n\t}\n\n\tfor _, d := range m.dispatchers {\n\t\td.ClosePulse(ctx, storagePulse)\n\t}\n\n\terr = m.JetModifier.Clone(ctx, storagePulse.PulseNumber, newPulse.PulseNumber, false)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to clone jet.Tree fromPulse=%v toPulse=%v\", storagePulse.PulseNumber, newPulse.PulseNumber)\n\t}\n\n\tif err := m.PulseAppender.Append(ctx, newPulse); err != nil {\n\t\treturn errors.Wrap(err, \"call of AddPulse failed\")\n\t}\n\n\terr = m.LogicRunner.OnPulse(ctx, storagePulse, newPulse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, d := range m.dispatchers {\n\t\td.BeginPulse(ctx, newPulse)\n\t}\n\n\treturn nil\n}", "func (p *AuroraSchedulerManagerClient) PulseJobUpdate(ctx context.Context, key *JobUpdateKey) (r *Response, err error) {\n var _args208 AuroraSchedulerManagerPulseJobUpdateArgs\n _args208.Key = key\n var _result209 AuroraSchedulerManagerPulseJobUpdateResult\n var meta thrift.ResponseMeta\n meta, err = p.Client_().Call(ctx, \"pulseJobUpdate\", &_args208, &_result209)\n p.SetLastResponseMeta_(meta)\n if err != nil {\n return\n }\n return _result209.GetSuccess(), nil\n}", "func (pl *Payload) push(x interface{}) {\n\tmetadata, err := getHostMetadata()\n\tif err != nil {\n\t\ttelemetryLogger.Printf(\"Error getting metadata %v\", err)\n\t} else {\n\t\terr = saveHostMetadata(metadata)\n\t\tif err != nil {\n\t\t\ttelemetryLogger.Printf(\"saving host metadata failed with :%v\", err)\n\t\t}\n\t}\n\n\tif pl.len() < MaxPayloadSize {\n\t\tswitch x.(type) {\n\t\tcase DNCReport:\n\t\t\tdncReport := x.(DNCReport)\n\t\t\tdncReport.Metadata = metadata\n\t\t\tpl.DNCReports = append(pl.DNCReports, dncReport)\n\t\tcase CNIReport:\n\t\t\tcniReport := x.(CNIReport)\n\t\t\tcniReport.Metadata = metadata\n\t\t\tpl.CNIReports = append(pl.CNIReports, cniReport)\n\t\tcase NPMReport:\n\t\t\tnpmReport := x.(NPMReport)\n\t\t\tnpmReport.Metadata = metadata\n\t\t\tpl.NPMReports = append(pl.NPMReports, npmReport)\n\t\tcase CNSReport:\n\t\t\tcnsReport := x.(CNSReport)\n\t\t\tcnsReport.Metadata = metadata\n\t\t\tpl.CNSReports = append(pl.CNSReports, cnsReport)\n\t\t}\n\t}\n}", "func BuildDividePayload(calcDivideMessage string) (*calc.DividePayload, error) {\n\tvar err error\n\tvar message calcpb.DivideRequest\n\t{\n\t\tif calcDivideMessage != \"\" {\n\t\t\terr = json.Unmarshal([]byte(calcDivideMessage), &message)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid JSON for message, \\nerror: %s, \\nexample of valid JSON:\\n%s\", err, \"'{\\n \\\"dividend\\\": 6001261670528037577,\\n \\\"divisor\\\": 5850431520333673251\\n }'\")\n\t\t\t}\n\t\t}\n\t}\n\tv := &calc.DividePayload{\n\t\tDividend: int(message.Dividend),\n\t\tDivisor: int(message.Divisor),\n\t}\n\n\treturn v, nil\n}", "func (b BfPublisher) Publish(mts []plugin.Metric, cfg plugin.Config) error {\n\tserver, err := cfg.GetString(\"server\")\n\tif err != nil {\n\t\tlog.Errorf(\"unable to parse blueflood server from configs\")\n\t\treturn err\n\t}\n\trollUpNum, err := cfg.GetInt(\"rollupNum\")\n\tif err != nil {\n\t\tlog.Errorf(\"unable to parse blueflood rollUpNum from configs\")\n\t\treturn err\n\t}\n\tttlInSeconds, err := cfg.GetInt(\"ttlInSeconds\")\n\tif err != nil {\n\t\tlog.Errorf(\"unable to parse blueflood ttlInSeconds from configs\")\n\t\treturn err\n\t}\n\ttimeout, err := cfg.GetInt(\"timeout\")\n\tif err != nil {\n\t\tlog.Errorf(\"unable to parse blueflood timeout from configs\")\n\t\treturn err\n\t}\n\n\tdata := []ingestMetric{}\n\tfor _, m := range mts {\n\t\t//Ensure empty namespaces are not sent to blueflood\n\t\tif len(m.Namespace.Strings()) < 1 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch v := m.Data.(type) {\n\t\tcase float64:\n\t\t\tif math.IsNaN(m.Data.(float64)) {\n\t\t\t\tlog.Warningf(\"Data NaN and not serializable '%v': Type %T\", m.Namespace, v)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdata = append(data, ingestMetric{MetricName: Key(m.Namespace.Strings()), MetricValue: m.Data, TTLInSeconds: ttlInSeconds, CollectionTime: time.Now().Unix() * 1000})\n\t\tcase float32, int, int32, int64, uint32, uint64:\n\t\t\tdata = append(data, ingestMetric{MetricName: Key(m.Namespace.Strings()), MetricValue: m.Data, TTLInSeconds: ttlInSeconds, CollectionTime: time.Now().Unix() * 1000})\n\t\tcase string:\n\t\t\td, ok := strconv.ParseFloat(m.Data.(string), 64)\n\t\t\tif ok == nil {\n\t\t\t\tdata = append(data, ingestMetric{MetricName: Key(m.Namespace.Strings()), MetricValue: d, TTLInSeconds: ttlInSeconds, CollectionTime: time.Now().Unix() * 1000})\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Warningf(\"Unknown data received for metric '%v': Type %T\", m.Namespace, v)\n\t\t}\n\n\t\tif int64(len(data)) == rollUpNum {\n\t\t\tgo publishMetrics(data, server, timeout)\n\t\t\tdata = []ingestMetric{}\n\t\t}\n\t}\n\n\tif len(data) > 0 {\n\t\tgo publishMetrics(data, server, timeout)\n\t}\n\n\treturn nil\n}", "func HealthHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"--- RECEIVED HEALTH DATA FROM HEALTH PULSE ---\")\n\n\tauth, httpErr := auth.CheckAuth(w, r)\n\tif !auth {\n\t\thttp.Error(w, httpErr.Status, httpErr.StatusCode)\n\t\treturn\n\t}\n\n\tvar healthSample health.Data\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlog.Println(string(data))\n\n\terr = json.Unmarshal(data, &healthSample)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while unmarshaling incoming health data: %s\", err)\n\t\tlog.Println(string(data))\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc := utils.MONGOSESSION.DB(\"healthDB\").C(\"healthData\")\n\tvar results []dailyData\n\terr = c.Find(bson.M{\"date\": healthSample.Date}).All(&results)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error while finding health data entries: %s\", err)\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Calculate Min and Max heart rate\n\tminMaxHR := calcMinMaxDailyHeartRate(healthSample)\n\thealthSample.MinMaxHeartRate = minMaxHR\n\n\t// If there is no entry for the current day create one with the current sample\n\tif len(results) == 0 {\n\t\terr = c.Insert(&dailyData{Date: healthSample.Date, Data: healthSample})\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error while inserting health data entries: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"LOGGED ENTRY %s\", healthSample.Date)\n\t\treturn\n\t}\n\n\t// If there is an entry for the current day, update the entry with the current sample\n\tcolQuerier := bson.M{\"date\": healthSample.Date}\n\tchange := bson.M{\"$set\": bson.M{\"date\": healthSample.Date, \"data\": healthSample}}\n\terr = c.Update(colQuerier, change)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Infof(\"UPDATING ENTRY %s\", healthSample.Date)\n}", "func instrumentGet(inner func()) {\n\tTotalRequests.Add(1)\n\tPendingRequests.Add(1)\n\tdefer PendingRequests.Add(-1)\n\n\tstart := time.Now()\n\n\tinner()\n\n\t// Capture the histogram over 18 geometric buckets \n\tdelta := time.Since(start)\n\tswitch {\n\tcase delta < time.Millisecond:\n\t\tLatencies.Add(\"0ms\", 1)\n\tcase delta > 32768*time.Millisecond:\n\t\tLatencies.Add(\">32s\", 1)\n\tdefault:\n\t\tfor i := time.Millisecond; i < 32768*time.Millisecond; i *= 2 {\n\t\t\tif delta >= i && delta < i*2 {\n\t\t\t\tLatencies.Add(i.String(), 1)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func buildData(p *dmr.Packet, repeaterID uint32) []byte {\n\tvar data = make([]byte, 55)\n\tcopy(data[:4], DMRData)\n\tdata[4] = p.Sequence\n\tdata[5] = uint8(p.SrcID >> 16)\n\tdata[6] = uint8(p.SrcID >> 8)\n\tdata[7] = uint8(p.SrcID)\n\tdata[8] = uint8(p.DstID >> 16)\n\tdata[9] = uint8(p.DstID >> 8)\n\tdata[10] = uint8(p.DstID)\n\tdata[11] = uint8(repeaterID >> 24)\n\tdata[12] = uint8(repeaterID >> 16)\n\tdata[13] = uint8(repeaterID >> 8)\n\tdata[14] = uint8(repeaterID)\n\tdata[15] = ((p.Timeslot & 0x01) << 7) | ((p.CallType & 0x01) << 6)\n\tdata[16] = uint8(p.StreamID >> 24)\n\tdata[17] = uint8(p.StreamID >> 16)\n\tdata[18] = uint8(p.StreamID >> 8)\n\tdata[19] = uint8(p.StreamID)\n\tcopy(data[20:53], p.Data)\n\n\tdata[53] = uint8(p.BER)\n\tdata[54] = uint8(p.RSSI)\n\n\tswitch p.DataType {\n\tcase dmr.VoiceBurstB, dmr.VoiceBurstC, dmr.VoiceBurstD, dmr.VoiceBurstE, dmr.VoiceBurstF:\n\t\tdata[15] |= (0x00 << 4)\n\t\tdata[15] |= (p.DataType - dmr.VoiceBurstA)\n\t\tbreak\n\tcase dmr.VoiceBurstA:\n\t\tdata[15] |= (0x01 << 4)\n\t\tbreak\n\tdefault:\n\t\tdata[15] |= (0x02 << 4)\n\t\tdata[15] |= (p.DataType)\n\t}\n\n\treturn data\n}", "func GetPulseNumber() (insolar.PulseNumber, error) {\n\treq := GetLogicalContext().Request\n\tif req == nil {\n\t\treturn insolar.PulseNumber(0), errors.New(\"request from LogicCallContext is nil, get pulse is failed\")\n\t}\n\treturn req.GetLocal().Pulse(), nil\n}", "func MeasureWorker(channel chan Measurement, adc adcpi.Interface, flexingChannel byte, extendingChannel byte,\n speedChannel byte, speed int, interval float64) {\n\n // Measure data and write it to the channel\n // Passing true to the for loop creates an infinite loop that never stops, unless\n // The program is terminated.\n for true {\n\n // Read from the flexing muscle\n flexing := adc.ReadRaw(flexingChannel)\n\n // Read from the extending muscle\n extending := adc.ReadRaw(extendingChannel)\n\n // Read the speed value from a potentiometer, if dynamic speed is enabled\n if speed < 0 {\n speed = adc.ReadRaw(speedChannel)\n }\n\n // Send the values to the motor thread\n channel <- Measurement{ Flexing:flexing, Extending:extending, Speed:speed }\n\n // Wait for a certain amount of time\n time.Sleep(time.Duration(interval * 1000 * 1000 * 1000))\n }\n}", "func (a *Alerts) Firing() prometheus.Counter { return a.firing }", "func (e *HeavyPayload) Type() core.MessageType {\n\treturn core.TypeHeavyPayload\n}", "func (db *DB) GetLatestPulseNumber() (core.PulseNumber, error) {\n\ttx := db.BeginTransaction(false)\n\tdefer tx.Discard()\n\n\treturn tx.GetLatestPulseNumber()\n}", "func constructMQTTPayload(name string, version string, FSTime time.Time) (string, error) {\n\n\tpsd := helpers.PubSubData{\n\t\tName: name,\n\t\tVersion: version,\n\t\tFSTime: FSTime,\n\t\t// Size: size,\n\t\t// Fruit: []string{\"Apple\", \"Banana\", \"Orange\"},\n\t\t// Id: 999,\n\t\t// private: \"Unexported field\",\n\t\t// Created: time.Now(),\n\t}\n\n\tvar jsonData []byte\n\tjsonData, err := json.Marshal(psd)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// Print it in a nice readable form, unlike the one that actually gets returned\n\tvar jsonDataReadable []byte\n\tjsonDataReadable, err = json.MarshalIndent(psd, \"\", \" \")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfmt.Println(string(jsonDataReadable))\n\n\treturn string(jsonData), nil\n}", "func (r *pumpRunner) run(pipeID, componentID string, cancel <-chan struct{}, provide chan<- struct{}, consume <-chan message, meter *meter) (<-chan message, <-chan error) {\n\tout := make(chan message)\n\terrc := make(chan error, 1)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tdefer close(errc)\n\t\tcall(r.reset, pipeID, errc) // reset hook\n\t\tvar err error\n\t\tvar m message\n\t\tvar done bool // done flag\n\t\tfor {\n\t\t\t// request new message\n\t\t\tselect {\n\t\t\tcase provide <- do:\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// receive new message\n\t\t\tselect {\n\t\t\tcase m = <-consume:\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.applyTo(componentID) // apply params\n\t\t\tm.Buffer, err = r.fn() // pump new buffer\n\t\t\tif err != nil {\n\t\t\t\tswitch err {\n\t\t\t\tcase io.EOF:\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\treturn\n\t\t\t\tcase io.ErrUnexpectedEOF:\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\tdone = true\n\t\t\t\tdefault:\n\t\t\t\t\terrc <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tmeter = meter.sample(int64(m.Buffer.Size())).message()\n\t\t\tm.feedback.applyTo(componentID) // apply feedback\n\n\t\t\t// push message further\n\t\t\tselect {\n\t\t\tcase out <- m:\n\t\t\t\tif done {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn out, errc\n}", "func (db *DB) AddPulse(ctx context.Context, pulse core.Pulse) error {\n\treturn db.Update(ctx, func(tx *TransactionManager) error {\n\t\tvar latest core.PulseNumber\n\t\tlatest, err := tx.GetLatestPulseNumber(ctx)\n\t\tif err != nil && err != ErrNotFound {\n\t\t\treturn err\n\t\t}\n\t\tpulseRec := record.PulseRecord{\n\t\t\tPrevPulse: latest,\n\t\t\tEntropy: pulse.Entropy,\n\t\t\tPredictedNextPulse: pulse.NextPulseNumber,\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\tenc := codec.NewEncoder(&buf, &codec.CborHandle{})\n\t\terr = enc.Encode(pulseRec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = tx.set(ctx, prefixkey(scopeIDPulse, pulse.PulseNumber.Bytes()), buf.Bytes())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn tx.set(ctx, prefixkey(scopeIDSystem, []byte{sysLatestPulse}), pulse.PulseNumber.Bytes())\n\t})\n}", "func hostMetrics(host *Host, time int32) string {\n \n cpu := random(BigDogMinCPU,BigDogMaxCPU)\n disk := random(BigDogMinDisk,BigDogMaxDisk)\n mem := random(BigDogMinMem,BigDogMaxMem)\n json := fmt.Sprintf(`{\"series\" : \n [\n {\n \"metric\":\"system.cpu.stolen\",\n \"points\":[[%d,0]],\n \"type\":\"gauge\",\n \"host\":\"%s\",\n \"tags\":[\"%s:%s\",\"%s:%s\"]\n },\n {\n \"metric\":\"system.cpu.user\",\n \"points\":[[%d,%d]],\n \"type\":\"gauge\",\n \"host\":\"%s\",\n \"tags\":[\"%s:%s\",\"%s:%s\"]\n },\n {\n \"metric\":\"system.disk.used\",\n \"points\":[[%d,%d]],\n \"type\":\"gauge\",\n \"host\":\"%s\",\n \"tags\":[\"%s:%s\",\"%s:%s\"]\n },\n {\n \"metric\":\"system.mem.used\",\n \"points\":[[%d,%d]],\n \"type\":\"gauge\",\n \"host\":\"%s\",\n \"tags\":[\"%s:%s\",\"%s:%s\"]\n }\n ]\n }`,time,host.name,host.tags[0].name,host.tags[0].value,host.tags[1].name,host.tags[1].value,time,cpu,host.name,host.tags[0].name,host.tags[0].value,host.tags[1].name,host.tags[1].value,time,disk,host.name,host.tags[0].name,host.tags[0].value,host.tags[1].name,host.tags[1].value,time,mem,host.name,host.tags[0].name,host.tags[0].value,host.tags[1].name,host.tags[1].value)\n fmt.Println(json)\n\n return json\n}", "func BuildGetPayload(projectGetProjectID string, projectGetAuth string) (*project.GetPayload, error) {\n\tvar err error\n\tvar projectID int32\n\t{\n\t\tvar v int64\n\t\tv, err = strconv.ParseInt(projectGetProjectID, 10, 32)\n\t\tprojectID = int32(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid value for projectID, must be INT32\")\n\t\t}\n\t}\n\tvar auth *string\n\t{\n\t\tif projectGetAuth != \"\" {\n\t\t\tauth = &projectGetAuth\n\t\t}\n\t}\n\tv := &project.GetPayload{}\n\tv.ProjectID = projectID\n\tv.Auth = auth\n\n\treturn v, nil\n}", "func BuildDividePayload(calcDivideA string, calcDivideB string) (*calc.DividePayload, error) {\n\tvar err error\n\tvar a int\n\t{\n\t\tvar v int64\n\t\tv, err = strconv.ParseInt(calcDivideA, 10, strconv.IntSize)\n\t\ta = int(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid value for a, must be INT\")\n\t\t}\n\t}\n\tvar b int\n\t{\n\t\tvar v int64\n\t\tv, err = strconv.ParseInt(calcDivideB, 10, strconv.IntSize)\n\t\tb = int(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid value for b, must be INT\")\n\t\t}\n\t}\n\tv := &calc.DividePayload{}\n\tv.A = a\n\tv.B = b\n\n\treturn v, nil\n}", "func BuildMultiplyPayload(calcMultiplyA string, calcMultiplyB string) (*calc.MultiplyPayload, error) {\n\tvar err error\n\tvar a int\n\t{\n\t\tvar v int64\n\t\tv, err = strconv.ParseInt(calcMultiplyA, 10, strconv.IntSize)\n\t\ta = int(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid value for a, must be INT\")\n\t\t}\n\t}\n\tvar b int\n\t{\n\t\tvar v int64\n\t\tv, err = strconv.ParseInt(calcMultiplyB, 10, strconv.IntSize)\n\t\tb = int(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid value for b, must be INT\")\n\t\t}\n\t}\n\tv := &calc.MultiplyPayload{}\n\tv.A = a\n\tv.B = b\n\n\treturn v, nil\n}", "func (rf *Raft) pulse() {\n\tfor rf.isLeader() {\n\t\t// Start send AppendEntries RPC to the rest of cluster\n\t\tfor ii := range rf.peers {\n\t\t\tif ii == rf.me {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo func(i int) {\n\t\t\t\targs := rf.makeAppendEntriesArgs(i)\n\t\t\t\treply := AppendEntriesReply{}\n\t\t\t\trf.sendAppendEntries(i, &args, &reply)\n\t\t\t\trf.appendEntriesReplyHandler <- reply\n\t\t\t}(ii)\n\t\t}\n\n\t\ttime.Sleep(time.Duration(HeartBeatsInterval) * time.Millisecond)\n\t}\n}", "func (s *SCIONBoxController) HeartBeatFunction(w http.ResponseWriter, r *http.Request) {\n\t// get the account tied to the box\n\t// Parse the received info\n\tvar req HeartBeatQuery\n\tlog.Printf(\"new HB Query\")\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&req); err != nil {\n\t\tlog.Printf(\"Error decoding JSON: %v, %v\", r.Body, err)\n\t\ts.Error500(w, err, \"Error decoding JSON\")\n\t\treturn\n\t}\n\tvars := mux.Vars(r)\n\taccountID := vars[\"account_id\"]\n\tsecret := vars[\"secret\"]\n\tip, err := s.getSourceIP(r)\n\tif err != nil {\n\t\tlog.Printf(\"error retrieving source IP: %v\", accountID)\n\t\ts.Error500(w, err, \"Error retrieving source IP\")\n\t\treturn\n\t}\n\tvar needGen = false\n\tvar slasList []*models.SCIONLabAS\n\tfor _, ia := range req.IAList {\n\t\tslas, err := models.FindSCIONLabASByIAInt(ia.I, ia.A)\n\t\tif err != nil {\n\t\t\tif err == orm.ErrNoRows {\n\t\t\t\t// no row found AS is not a SCIONLabAS\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"no SCIONLabAS found in HB, %v %v\", req, err)\n\t\t\t\ts.Error500(w, err, \"no SCIONLabAS found in HB\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// check if IA belongs to credentials\n\t\tu, err := models.FindUserByEmail(slas.UserEmail)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error looking for user: %v\", err)\n\t\t\ts.Error500(w, err, \"Error looking for user\")\n\t\t\treturn\n\t\t}\n\t\taccount := u.Account\n\t\tif accountID != account.AccountID || secret != account.Secret {\n\t\t\tlog.Printf(\"HB requested for user with not associated IA, %v, %v\", req, slas.UserEmail)\n\t\t\ts.BadRequest(w, err, \"HB requested for user with not associated IA\")\n\t\t\treturn\n\t\t}\n\t\t// check if box needs an update\n\t\tif slas.Status == models.Update {\n\t\t\tslas.Status = models.Inactive\n\t\t\tslas.Update()\n\t\t\t// TODO Update the box !\n\t\t\treturn\n\t\t}\n\t\tneedGen, err = s.HBCheckIP(slas, ip, ia, r)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error running IP checks in HB: %v,\", err)\n\t\t\ts.Error500(w, err, \"Error running IP check in HB\")\n\t\t\treturn\n\t\t}\n\t\t// Send connections in the Database to the Box\n\t\tslasList = append(slasList, slas)\n\t}\n\tif needGen {\n\t\t// TODO generate updated gen folder for all ASes\n\t\t// Remove old gen folders/ packages\n\t\tos.RemoveAll(userPackagePath(slasList[0].UserEmail))\n\t\tos.Remove(filepath.Join(BoxPackagePath, slasList[0].UserEmail+\".tar.gz\"))\n\t\tfor _, slas := range slasList {\n\t\t\t// Generate necessary files and send them to the Bo\n\t\t\tif err := s.generateGen(slas); err != nil {\n\t\t\t\ts.Error500(w, err, \"Error generating gen folder\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\ts.serveGen(slasList[0].UserEmail, w, r)\n\t} else {\n\t\tvar iaList []ResponseIA\n\t\tfor _, slas := range slasList {\n\t\t\tcns, err := slas.GetConnectionInfo()\n\t\t\tlog.Printf(\"Got Connection Info\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error retrieving connections: %v\", err)\n\t\t\t\ts.Error500(w, err, \"Error retrieving connections\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tslas.Status = models.Active\n\t\t\tif err := slas.Update(); err != nil {\n\t\t\t\tlog.Printf(\"Error updating slas %v\", err)\n\t\t\t\ts.Error500(w, err, \"Error updating slas\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tia := ResponseIA{\n\t\t\t\tIA: addr.IA{\n\t\t\t\t\tI: slas.ISD,\n\t\t\t\t\tA: slas.ASID},\n\t\t\t\tConnections: models.OnlyCurrentConnections(cns),\n\t\t\t}\n\t\t\tiaList = append(iaList, ia)\n\t\t}\n\t\thbResponse := HBResponse{\n\t\t\tIAList: iaList,\n\t\t}\n\t\ts.JSON(hbResponse, w, r)\n\t}\n}", "func (db *DB) GetLatestPulseNumber(ctx context.Context) (core.PulseNumber, error) {\n\ttx := db.BeginTransaction(false)\n\tdefer tx.Discard()\n\n\treturn tx.GetLatestPulseNumber(ctx)\n}", "func (mmForPulse *mRecordCollectionAccessorMockForPulse) When(ctx context.Context, jetID insolar.JetID, pn insolar.PulseNumber) *RecordCollectionAccessorMockForPulseExpectation {\n\tif mmForPulse.mock.funcForPulse != nil {\n\t\tmmForPulse.mock.t.Fatalf(\"RecordCollectionAccessorMock.ForPulse mock is already set by Set\")\n\t}\n\n\texpectation := &RecordCollectionAccessorMockForPulseExpectation{\n\t\tmock: mmForPulse.mock,\n\t\tparams: &RecordCollectionAccessorMockForPulseParams{ctx, jetID, pn},\n\t}\n\tmmForPulse.expectations = append(mmForPulse.expectations, expectation)\n\treturn expectation\n}" ]
[ "0.57889396", "0.52682924", "0.52608174", "0.52144593", "0.49673986", "0.48198617", "0.48043132", "0.47157827", "0.46907073", "0.4605603", "0.4562578", "0.44162816", "0.43567464", "0.43243948", "0.43150502", "0.4286372", "0.42293996", "0.42173892", "0.42120856", "0.42044252", "0.41890675", "0.41457692", "0.4140486", "0.41348153", "0.41328955", "0.41265294", "0.41176808", "0.40837657", "0.40836254", "0.40734833", "0.40605447", "0.4059769", "0.40511912", "0.4050597", "0.40344435", "0.4022029", "0.40127817", "0.40028873", "0.40026912", "0.39966372", "0.39949805", "0.39863935", "0.39782476", "0.39715293", "0.39671198", "0.39453554", "0.3943407", "0.39368302", "0.3926924", "0.39176545", "0.39170316", "0.39159793", "0.39140734", "0.3909542", "0.39060473", "0.3893847", "0.38903773", "0.3886982", "0.38805553", "0.38739192", "0.3862757", "0.38490796", "0.3846982", "0.3842992", "0.3837366", "0.38362098", "0.38329142", "0.38278705", "0.38190758", "0.38173363", "0.38128006", "0.38110226", "0.3795549", "0.37923726", "0.37891486", "0.3781551", "0.37779146", "0.37737435", "0.3772442", "0.3768497", "0.37679496", "0.37671432", "0.37616113", "0.37601075", "0.3753791", "0.3753734", "0.37498227", "0.37424508", "0.37412065", "0.37393472", "0.3739317", "0.3739051", "0.3737977", "0.37366736", "0.37293294", "0.37276474", "0.37252814", "0.3713165", "0.37131354", "0.3709921" ]
0.7158476
0
NewOrganizations will return Organizations
func NewOrganizations(c client.ConfigProvider, config *aws.Config) *Organizations { return &Organizations{Svc: organizations.New(c, config)} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *cfService) Organizations() Organizations {\n\treturn newOrganizationAPI(c.Client)\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsCreate(ctx _context.Context, localVarOptionals *OrganizationsOrganizationsCreateOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Data.IsSet() {\n\t\tlocalVarPostBody = localVarOptionals.Data.Value()\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (c *Client) Organizations() gitprovider.OrganizationsClient {\n\treturn c.orgs\n}", "func NewOrganizations(dir string, logger chronograf.Logger) chronograf.OrganizationsStore {\n\treturn &Organizations{\n\t\tDir: dir,\n\t\tLoad: load,\n\t\tReadDir: ioutil.ReadDir,\n\t\tLogger: logger,\n\t}\n}", "func (a *OrganizationsApiService) GetOrganizations(ctx _context.Context) ApiGetOrganizationsRequest {\n\treturn ApiGetOrganizationsRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (a *api) h_POST_orgs(c *gin.Context) {\n\ta.logger.Debug(\"POST /orgs\")\n\n\taCtx := a.getAuthContext(c)\n\tif a.errorResponse(c, aCtx.AuthZSuperadmin()) {\n\t\treturn\n\t}\n\n\tvar org Organization\n\tif a.errorResponse(c, bindAppJson(c, &org)) {\n\t\treturn\n\t}\n\n\ta.logger.Info(\"New organization \", org)\n\tid, err := a.Dc.InsertOrg(a.org2morg(&org))\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\tw := c.Writer\n\turi := composeURI(c.Request, strconv.FormatInt(id, 10))\n\ta.logger.Info(\"New organization with location \", uri, \" has been just created\")\n\tw.Header().Set(\"Location\", uri)\n\tc.Status(http.StatusCreated)\n}", "func (db *PSQL) GetOrganizations() ([]*models.Organization, error) {\n\treturn nil, nil\n}", "func (a *Client) GetOrganizations(params *GetOrganizationsParams) (*GetOrganizationsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetOrganizationsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getOrganizations\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/organizations\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetOrganizationsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetOrganizationsOK), nil\n\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsAdminsCreate(ctx _context.Context, id string, localVarOptionals *OrganizationsOrganizationsAdminsCreateOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/{id}/admins/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Data.IsSet() {\n\t\tlocalVarPostBody = localVarOptionals.Data.Value()\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (s *OrganizationsService) ListOrganizations(queryParams *ListOrganizationsQueryParams) (*Organizations, *resty.Response, error) {\n\n\tpath := \"/organizations/\"\n\n\tqueryParamsString, _ := query.Values(queryParams)\n\n\tresponse, err := s.client.R().\n\t\tSetQueryString(queryParamsString.Encode()).\n\t\tSetResult(&Organizations{}).\n\t\tSetError(&Error{}).\n\t\tGet(path)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresult := response.Result().(*Organizations)\n\tif queryParams.Paginate {\n\t\titems := s.organizationsPagination(response.Header().Get(\"Link\"), 0, 0)\n\t\tfor _, organization := range items.Items {\n\t\t\tresult.AddOrganization(organization)\n\t\t}\n\t} else {\n\t\tif len(result.Items) < queryParams.Max {\n\t\t\titems := s.organizationsPagination(response.Header().Get(\"Link\"), len(result.Items), queryParams.Max)\n\t\t\tfor _, organization := range items.Items {\n\t\t\t\tresult.AddOrganization(organization)\n\t\t\t}\n\t\t}\n\t}\n\treturn result, response, err\n\n}", "func NewGetOrganizationsOK() *GetOrganizationsOK {\n\treturn &GetOrganizationsOK{}\n}", "func (m *MockDatabase) GetOrganizations() (*[]model.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizations\")\n\tret0, _ := ret[0].(*[]model.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func listOrganizations(client *chef.Client) map[string]string {\n\torgList, err := client.Organizations.List()\n\tif err != nil {\n\t\tfmt.Println(\"Issue listing orgs:\", err)\n\t}\n\treturn orgList\n}", "func (oc *OrdererConfig) Organizations() map[string]Org {\n\treturn oc.orgs\n}", "func (r *Resolver) Organizations() []*OrganizationResolver {\n\tvar result []*OrganizationResolver\n\tvar os service.OrganizationService\n\torganizations := os.All()\n\tfor _, o := range organizations {\n\t\tresult = append(result, &OrganizationResolver{&organization{o}})\n\t}\n\treturn result\n}", "func (g *GitHub) Organizations() (orgs []Organization, err error) {\n\terr = g.callGithubApi(\"GET\", \"/user/orgs\", &orgs)\n\treturn\n}", "func (m *MockOrganizationLister) Organizations(arg0 *mongodbatlas.ListOptions) (*mongodbatlas.Organizations, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Organizations\", arg0)\n\tret0, _ := ret[0].(*mongodbatlas.Organizations)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (os *OrganizationsService) List(opt *OrganizationListOptions) ([]Organization, *Response, error) {\n\tvar u string\n\n\tu = fmt.Sprintf(\"v2/organizations\")\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := os.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\torgs := new([]Organization)\n\tresp, err := os.client.Do(req, orgs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn *orgs, resp, err\n}", "func (s *OrganizationsStore) Add(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) {\n\treturn nil, fmt.Errorf(\"cannot create organization\")\n}", "func createOrganizer() {\n\tfmt.Println(\"You would have created a new Organizer if this happened\")\n}", "func (s *AccountService) ListMyOrganizations() ([]*schema.Organization, error) {\n\t// TODO: fix this\n\t// members, err := s.ListMyMemberships()\n\t// if err != nil {\n\t// \treturn nil, err\n\t// }\n\t//\n\t// orgIds := []bson.ObjectId{}\n\t// for _, member := range members {\n\t// \torgIds = append(orgIds, member.OrganizationID)\n\t// }\n\t//\n\t// orgs, err := models.Organization.FindAllIds(orgIds)\n\t// if err != nil {\n\t// \treturn nil, err\n\t// }\n\t//\n\t// return orgs, nil\n\treturn nil, nil\n}", "func NewGetOrganizationsDefault(code int) *GetOrganizationsDefault {\n\treturn &GetOrganizationsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (m *GraphBaseServiceClient) Organization()(*ic949a0bb5066d68760e8502a7f9db83f571d9e01e38fad4aadf7268188e52df0.OrganizationRequestBuilder) {\n return ic949a0bb5066d68760e8502a7f9db83f571d9e01e38fad4aadf7268188e52df0.NewOrganizationRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) Organization()(*ic949a0bb5066d68760e8502a7f9db83f571d9e01e38fad4aadf7268188e52df0.OrganizationRequestBuilder) {\n return ic949a0bb5066d68760e8502a7f9db83f571d9e01e38fad4aadf7268188e52df0.NewOrganizationRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (os *OrganizationsService) Get(ctx context.Context, id string) (res *Response, o *Organization, err error) {\n\treturn os.get(ctx, fmt.Sprintf(\"v2/organizations/%s\", id))\n}", "func orgIndex(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tdb := co.DbConnection(dbc)\n\t// DB query to get all the orgs\n\tresults, err := db.Query(\"SELECT name FROM organizations\")\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tvar organizations []string\n\tfor results.Next() {\n\t\tvar name string\n\t\terr = results.Scan(&name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t\t}\n\t\torganizations = append(organizations, name)\n\t}\n\tresults.Close()\n\tdb.Close()\n\tjsonPrint(w, organizations)\n return\n}", "func (a *OrganizationsApiService) GetOrganizationsExecute(r ApiGetOrganizationsRequest) (Organizations, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Organizations\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"OrganizationsApiService.GetOrganizations\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/organizations\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v NotFound\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Forbidden\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v InternalError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 503 {\n\t\t\tvar v TemporarilyUnavailable\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsRead(ctx _context.Context, id string, localVarOptionals *OrganizationsOrganizationsReadOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/{id}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Search.IsSet() {\n\t\tlocalVarQueryParams.Add(\"search\", parameterToString(localVarOptionals.Search.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsApplicationsCreate(ctx _context.Context, id string, localVarOptionals *OrganizationsOrganizationsApplicationsCreateOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/{id}/applications/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Data.IsSet() {\n\t\tlocalVarOptionalData, localVarOptionalDataok := localVarOptionals.Data.Value().(InlineObject44)\n\t\tif !localVarOptionalDataok {\n\t\t\treturn nil, reportError(\"data should be InlineObject44\")\n\t\t}\n\t\tlocalVarPostBody = &localVarOptionalData\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (m *Client) GetOrganizations(arg0 context.Context, arg1 *zendesk.OrganizationListOptions) ([]zendesk.Organization, zendesk.Page, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizations\", arg0, arg1)\n\tret0, _ := ret[0].([]zendesk.Organization)\n\tret1, _ := ret[1].(zendesk.Page)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (u *User) Organizations() (orgs []Organization, err error) {\n\turi := fmt.Sprintf(\"/users/%s/orgs\", u.Login)\n\terr = u.g.callGithubApi(\"GET\", uri, &orgs)\n\treturn\n}", "func CreateOrganization(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar org struct {\n\t\t\tName string `json:\"name\"`\n\t\t}\n\n\t\terr := json.NewDecoder(r.Body).Decode(&org)\n\t\tif err != nil {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusBadRequest,\n\t\t\t\thttp.StatusText(http.StatusBadRequest),\n\t\t\t\terr.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif len(org.Name) == 0 {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"provide Name in parameters\")\n\t\t\treturn\n\t\t}\n\n\t\tres, err := json.Marshal(org)\n\t\tif err != nil {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\terr.Error())\n\t\t\treturn\n\n\t\t}\n\n\t\t// Create Organization if no error\n\t\terr = handler.CreateOrganization(clients, res)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\t// grafanaclient.Exists means, that user provided\n\t\t\t// organization already exists. We return 409\n\t\t\tcase grafanaclient.Exists:\n\t\t\t\terrMsg := fmt.Sprintf(\"Organization Exists\")\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusConflict,\n\t\t\t\t\terrMsg, err.Error())\n\t\t\t\treturn\n\t\t\t// If any other error happened -> return 500 error\n\t\t\tdefault:\n\t\t\t\tlog.Logger.Error(err)\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\t\"Internal server error occured\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t}\n}", "func (i *IdentityProvider) GetOrganizations(uuid string, date time.Time) ([]string, error) {\n\tquery := fmt.Sprintf(`select distinct o.name \n\t\tfrom enrollments e, organizations o\n\t\twhere e.organization_id = o.id and\n\t\te.uuid = '%s' and\n '%s' between e.start and e.end order by e.id desc`,\n\t\tuuid, date.Format(time.RFC3339))\n\n\tvar multiOrg []string\n\terr := i.db.Select(&multiOrg, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn multiOrg, nil\n}", "func NewOrganizationsService(db *sqlx.DB) OrganizationsService {\n\treturn &organizationsService{db: db}\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsUsersCreate(ctx _context.Context, id string, localVarOptionals *OrganizationsOrganizationsUsersCreateOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/{id}/users/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Data.IsSet() {\n\t\tlocalVarPostBody = localVarOptionals.Data.Value()\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func GetAllOrgEndpoint(w http.ResponseWriter, r *http.Request) {\n\n\tvar orgs []models.Organization\n\torgs = db.GetAllOrg()\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(orgs)\n\n}", "func (r *mysqlOrganizationsRepository) Create(o Organization) (Organization, error) {\n\t// TODO: validate & sanitize\n\n\tsql, args, err := sq.Insert(\"organizations\").SetMap(sq.Eq{\n\t\t\"admin_id\": o.AdminID,\n\t\t\"name\": o.Name,\n\t}).ToSql()\n\n\tif err != nil {\n\t\tlog.Printf(\"error in organization repo: %s\", err.Error())\n\t\treturn Organization{}, err\n\t}\n\n\tres, err := r.DB.Exec(sql, args...)\n\tif err != nil {\n\t\tlog.Printf(\"error in organization repo: %s\", err.Error())\n\t\treturn Organization{}, err\n\t}\n\n\tid, err := res.LastInsertId()\n\tif err != nil {\n\t\tlog.Printf(\"error in organization repo: %s\", err.Error())\n\t\treturn Organization{}, err\n\t}\n\n\treturn Organization{ID: uint(id)}, nil\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsDelete(ctx _context.Context, id string, localVarOptionals *OrganizationsOrganizationsDeleteOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/{id}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Search.IsSet() {\n\t\tlocalVarQueryParams.Add(\"search\", parameterToString(localVarOptionals.Search.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsProjectsCreate(ctx _context.Context, id string, localVarOptionals *OrganizationsOrganizationsProjectsCreateOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/{id}/projects/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Data.IsSet() {\n\t\tlocalVarOptionalData, localVarOptionalDataok := localVarOptionals.Data.Value().(InlineObject50)\n\t\tif !localVarOptionalDataok {\n\t\t\treturn nil, reportError(\"data should be InlineObject50\")\n\t\t}\n\t\tlocalVarPostBody = &localVarOptionalData\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func getOrganizationsFromResponse(list []*OrganizationResponse) []*Organization {\n\tvar results []*Organization\n\tfor _, val := range list {\n\t\tif strings.ToLower(val.SubRequestStatus) == \"success\" {\n\t\t\tresults = append(results, &val.Organization)\n\t\t}\n\t}\n\treturn results\n}", "func ListOrganizations() error {\n\tclient, err := NewPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\torgs, _, err := client.Organizations.List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te := MarshallAndPrint(orgs)\n\treturn e\n}", "func newAwsOrganizationsPolicyAttachments(c *TrussleV1Client, namespace string) *awsOrganizationsPolicyAttachments {\n\treturn &awsOrganizationsPolicyAttachments{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func GetOrganization(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\torglist, err := handler.GetOrganizations(clients)\n\t\tif err != nil {\n\t\t\tlog.Logger.Error(err)\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"Internal server error occured\")\n\t\t\treturn\n\t\t}\n\t\tw.Write(orglist)\n\t}\n}", "func (a *api) h_GET_orgs(c *gin.Context) {\n\ta.logger.Debug(\"GET /orgs\")\n\n\taCtx := a.getAuthContext(c)\n\tif aCtx.UserLogin() == \"\" {\n\t\tc.Status(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tods, err := a.Dc.GetOrgDescs(aCtx)\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, a.morgs2orgs(ods))\n}", "func (s *adminOrganizations) List(ctx context.Context, options *AdminOrganizationListOptions) (*AdminOrganizationList, error) {\n\tif err := options.valid(); err != nil {\n\t\treturn nil, err\n\t}\n\tu := \"admin/organizations\"\n\treq, err := s.client.NewRequest(\"GET\", u, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torgl := &AdminOrganizationList{}\n\terr = req.Do(ctx, orgl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn orgl, nil\n}", "func (m *OrganizationManager) List(opts ...RequestOption) (o *OrganizationList, err error) {\n\terr = m.Request(\"GET\", m.URI(\"organizations\"), &o, applyListDefaults(opts))\n\treturn\n}", "func CreateOrgEndpoint(w http.ResponseWriter, r *http.Request) {\n\tvar org models.Organization\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&org)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\torgExist := db.GetOrgByName(org.Name)\n\tif orgExist.Name != \"\" {\n\t\tmsg := \"Org already exist in the database\"\n\t\thttp.Error(w, msg, 400)\n\t\treturn\n\t}\n\n\torg.ID = primitive.NewObjectID()\n\n\tdb.CreateOrg(org)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(org)\n\n}", "func NewListOrganizationsOK() *ListOrganizationsOK {\n\treturn &ListOrganizationsOK{}\n}", "func (m *OrganizationManager) Create(o *Organization, opts ...RequestOption) (err error) {\n\terr = m.Request(\"POST\", m.URI(\"organizations\"), &o, opts...)\n\treturn\n}", "func (o *Organizations) Add(ctx context.Context, org *chronograf.Organization) (*chronograf.Organization, error) {\n\treturn nil, fmt.Errorf(\"unable to add organizations to the filesystem\")\n}", "func (org *OrganizationService) List(ctx context.Context) ([]*Organization, error) {\n\tpath := \"me/organizations\"\n\treq, err := org.client.createRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := new(GetOrganizationsResponse)\n\terr = org.client.do(ctx, req, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif strings.ToLower(c.RequestStatus) == \"success\" {\n\t\tif len(c.Organizations) > 0 {\n\t\t\treturn getOrganizationsFromResponse(c.Organizations), nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"no organizations found\")\n\t}\n\treturn nil, fmt.Errorf(`non-success status returned from snapchat api (list organizations): %s`, c.RequestStatus)\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsUpdate(ctx _context.Context, id string, localVarOptionals *OrganizationsOrganizationsUpdateOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/{id}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Search.IsSet() {\n\t\tlocalVarQueryParams.Add(\"search\", parameterToString(localVarOptionals.Search.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Data.IsSet() {\n\t\tlocalVarPostBody = localVarOptionals.Data.Value()\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (o *Organizations) Get(ctx context.Context, query chronograf.OrganizationQuery) (*chronograf.Organization, error) {\n\torg, _, err := o.findOrg(query)\n\treturn org, err\n}", "func orgGroups(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tvars := mux.Vars(r)\n\torg := cleanInput(vars[\"org\"])\n\tdb := co.DbConnection(dbc)\n\tstmtQryOrgGrp, err := db.Prepare(\"SELECT group_name FROM org_groups where organization_name = ? ;\")\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tresults, err := stmtQryOrgGrp.Query(org)\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tvar groups []string\n\tfor results.Next() {\n\t\tvar name string\n\t\terr = results.Scan(&name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t\t}\n\t\tgroups = append(groups, name)\n\t}\n\tresults.Close()\n stmtQryOrgGrp.Close()\n\tdb.Close()\n\tgroups = co.Unique(groups)\n\tjsonPrint(w, groups)\n return\n}", "func NewGetOrganizationsUnauthorized() *GetOrganizationsUnauthorized {\n\treturn &GetOrganizationsUnauthorized{}\n}", "func (s *OrganizationsStore) Add(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) {\n\tif !s.nameIsUnique(ctx, o.Name) {\n\t\treturn nil, chronograf.ErrOrganizationAlreadyExists\n\t}\n\terr := s.client.db.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket(OrganizationsBucket)\n\t\tseq, err := b.NextSequence()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.ID = fmt.Sprintf(\"%d\", seq)\n\n\t\tv, err := internal.MarshalOrganization(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn b.Put([]byte(o.ID), v)\n\t})\n\n\treturn o, err\n}", "func orgShow(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tvars := mux.Vars(r)\n\torg := cleanInput(vars[\"org\"])\n\tdb := co.DbConnection(dbc)\n stmtQryOrg, err := db.Prepare(\"SELECT name FROM organizations where name = ? ;\")\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tresults, err := stmtQryOrg.Query(org)\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tvar orgs []string\n\tfor results.Next() {\n\t\tvar name string\n\t\terr = results.Scan(&name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t\t}\n\t\torgs = append(orgs, name)\n\t}\n\tresults.Close()\n stmtQryOrg.Close()\n\tdb.Close()\n\torgs = co.Unique(orgs)\n\tjsonPrint(w, orgs)\n return\n}", "func (r *OrganizationsService) Create(googlecloudapigeev1organization *GoogleCloudApigeeV1Organization) *OrganizationsCreateCall {\n\tc := &OrganizationsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.googlecloudapigeev1organization = googlecloudapigeev1organization\n\treturn c\n}", "func Organisms(exec boil.Executor, mods ...qm.QueryMod) organismQuery {\n\tmods = append(mods, qm.From(\"\\\"organism\\\"\"))\n\treturn organismQuery{NewQuery(exec, mods...)}\n}", "func GetAllOrganizations(iq IQ) ([]Organization, error) {\n\tdoError := func(err error) error {\n\t\treturn fmt.Errorf(\"organizations not found: %v\", err)\n\t}\n\n\tbody, _, err := iq.Get(restOrganization)\n\tif err != nil {\n\t\treturn nil, doError(err)\n\t}\n\n\tvar resp allOrgsResponse\n\tif err = json.Unmarshal(body, &resp); err != nil {\n\t\treturn nil, doError(err)\n\t}\n\n\treturn resp.Organizations, nil\n}", "func (os *OrganizationsService) Get(slug string) (*Organization, *Response, error) {\n\n\tu := fmt.Sprintf(\"v2/organizations/%s\", slug)\n\n\treq, err := os.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\torganization := new(Organization)\n\tresp, err := os.client.Do(req, organization)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn organization, resp, err\n}", "func NewGETPartnerOrganizationsOK() *GETPartnerOrganizationsOK {\n\treturn &GETPartnerOrganizationsOK{}\n}", "func getOrganization(orgGroup *cb.ConfigGroup, orgName string) (Organization, error) {\n\tpolicies, err := getPolicies(orgGroup.Policies)\n\tif err != nil {\n\t\treturn Organization{}, err\n\t}\n\n\tmsp, err := getMSPConfig(orgGroup)\n\tif err != nil {\n\t\treturn Organization{}, err\n\t}\n\n\tvar anchorPeers []Address\n\t_, ok := orgGroup.Values[AnchorPeersKey]\n\tif ok {\n\t\tanchorProtos := &pb.AnchorPeers{}\n\t\terr = unmarshalConfigValueAtKey(orgGroup, AnchorPeersKey, anchorProtos)\n\t\tif err != nil {\n\t\t\treturn Organization{}, err\n\t\t}\n\n\t\tfor _, anchorProto := range anchorProtos.AnchorPeers {\n\t\t\tanchorPeers = append(anchorPeers, Address{\n\t\t\t\tHost: anchorProto.Host,\n\t\t\t\tPort: int(anchorProto.Port),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn Organization{\n\t\tName: orgName,\n\t\tPolicies: policies,\n\t\tMSP: msp,\n\t\tAnchorPeers: anchorPeers,\n\t}, nil\n}", "func (s *OrganizationsStore) All(ctx context.Context) ([]chronograf.Organization, error) {\n\tvar orgs []chronograf.Organization\n\terr := s.each(ctx, func(o *chronograf.Organization) {\n\t\torgs = append(orgs, *o)\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn orgs, nil\n}", "func (u *UserLDAPMapping) GetOrganizationsURL() string {\n\tif u == nil || u.OrganizationsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *u.OrganizationsURL\n}", "func (s *OrganizationsServiceOp) Get(ctx context.Context, orgID string) (*Organization, *Response, error) {\n\tif orgID == \"\" {\n\t\treturn nil, nil, NewArgError(\"orgID\", \"must be set\")\n\t}\n\n\tpath := fmt.Sprintf(\"orgs/%s\", orgID)\n\n\treq, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\troot := new(Organization)\n\tresp, err := s.client.Do(ctx, req, root)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn root, resp, err\n}", "func (r *OrganizationsService) List() *OrganizationsListCall {\n\tc := &OrganizationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func (api *API) ListOrganizations() ([]Organization, ResultInfo, error) {\n\tvar r organizationResponse\n\tres, err := api.makeRequest(\"GET\", \"/user/organizations\", nil)\n\tif err != nil {\n\t\treturn []Organization{}, ResultInfo{}, errors.Wrap(err, errMakeRequestError)\n\t}\n\n\terr = json.Unmarshal(res, &r)\n\tif err != nil {\n\t\treturn []Organization{}, ResultInfo{}, errors.Wrap(err, errUnmarshalError)\n\t}\n\n\treturn r.Result, r.ResultInfo, nil\n}", "func (r *OrganizationsService) List(parent string) *OrganizationsListCall {\n\tc := &OrganizationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\treturn c\n}", "func (osc *Client) CreateOrg(ctx context.Context, companyName, signingEntityName, companyWebsite string) (*models.Organization, error) {\n\tf := logrus.Fields{\n\t\t\"functionName\": \"organization_service.CreateOrg\",\n\t\tutils.XREQUESTID: ctx.Value(utils.XREQUESTID),\n\t\t\"companyName\": companyName,\n\t\t\"signingEntityName\": signingEntityName,\n\t\t\"companyWebsite\": companyWebsite,\n\t}\n\n\tvar org *models.Organization\n\n\ttok, tokenErr := token.GetToken()\n\tif tokenErr != nil {\n\t\tlog.WithFields(f).WithError(tokenErr).Warn(\"unable to fetch token\")\n\t\treturn nil, tokenErr\n\t}\n\n\t// If not specified, use the company name as the signing entity name\n\tif signingEntityName == \"\" {\n\t\tsigningEntityName = companyName\n\t}\n\n\t//Lookup Org based on domain\n\tlookupOrg, lookupErr := osc.SearchOrgLookup(ctx, nil, &companyWebsite)\n\tif lookupErr != nil {\n\t\tlog.WithFields(f).WithError(lookupErr).Warn(\"unable to search for existing company using company website value\")\n\t\tif _, ok := lookupErr.(*organizations.LookupNotFound); !ok {\n\t\t\treturn nil, lookupErr\n\t\t}\n\t}\n\n\tif lookupOrg != nil && lookupOrg.Payload.ID != \"\" {\n\t\t// Get org based on ID\n\t\tvar updateErr error\n\t\texistingOrg, existingOrgErr := osc.GetOrganization(ctx, lookupOrg.Payload.ID)\n\t\tif existingOrgErr != nil {\n\t\t\tlog.WithFields(f).WithError(existingOrgErr).Warnf(\"unable to get organization : %s \", lookupOrg.Payload.ID)\n\t\t\treturn nil, existingOrgErr\n\t\t}\n\t\torg, updateErr = osc.UpdateOrg(ctx, existingOrg, signingEntityName)\n\t\tif updateErr != nil {\n\t\t\tlog.WithFields(f).WithError(updateErr).Warn(\"unable to update for existing company\")\n\t\t\treturn nil, updateErr\n\t\t}\n\n\t} else {\n\t\t// use linux foundation logo as default\n\t\tlinuxFoundation, err := osc.SearchOrganization(ctx, utils.TheLinuxFoundation, \"\", \"\")\n\t\tif err != nil || len(linuxFoundation) == 0 {\n\t\t\tlog.WithFields(f).WithError(err).Warn(\"unable to search Linux Foundation organization\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclientAuth := runtimeClient.BearerToken(tok)\n\t\tlogoURL := linuxFoundation[0].LogoURL\n\t\tf[\"logoURL\"] = logoURL\n\n\t\tparams := &organizations.CreateOrgParams{\n\t\t\tOrg: &models.CreateOrg{\n\t\t\t\tName: &companyName,\n\t\t\t\tWebsite: &companyWebsite,\n\t\t\t\tLogoURL: logoURL,\n\t\t\t\tSigningEntityName: []string{signingEntityName},\n\t\t\t},\n\t\t\tContext: ctx,\n\t\t}\n\n\t\tlog.WithFields(f).Debugf(\"Creating organization with params: %+v\", models.CreateOrg{\n\t\t\tName: &companyName,\n\t\t\tWebsite: &companyWebsite,\n\t\t\tLogoURL: logoURL,\n\t\t\tSigningEntityName: []string{signingEntityName},\n\t\t})\n\t\tresult, err := osc.cl.Organizations.CreateOrg(params, clientAuth)\n\t\tif err != nil {\n\t\t\tlog.WithFields(f).WithError(err).Warnf(\"Failed to create salesforce Company: %s , err: %+v \", companyName, err)\n\t\t\treturn nil, err\n\t\t}\n\t\tlog.WithFields(f).Infof(\"Company: %s successfuly created \", companyName)\n\n\t\torg = result.Payload\n\t}\n\treturn org, nil\n}", "func (s *GormOrganizationModelService) ListOrganizations(ctx context.Context, identityID uuid.UUID) ([]organization.IdentityOrganization, error) {\n\n\tdb := s.db.Model(&account.Identity{})\n\n\tfindOrganization := func(orgs []organization.IdentityOrganization, id uuid.UUID) int {\n\t\tfor i, org := range orgs {\n\t\t\tif org.OrganizationID == id {\n\t\t\t\treturn i\n\t\t\t}\n\t\t}\n\t\treturn -1\n\t}\n\n\tresults := []organization.IdentityOrganization{}\n\n\t// query for organizations in which the user is a member\n\trows, err := db.Unscoped().Raw(`SELECT \n oi.ID,\n r.name\n FROM \n resource r, \n identities oi, \n resource_type rt\n\t\tWHERE \n oi.identity_resource_id = r.resource_id \n and r.resource_type_id = rt.resource_type_id\n\t\t and rt.name = ? \n and oi.deleted_at IS NULL\n and r.deleted_at IS NULL\n and rt.deleted_at IS NULL\n and (oi.ID = ? \n OR oi.ID in (\n WITH RECURSIVE m AS (\n\t\t SELECT \n member_of \n FROM \n membership \n WHERE \n member_id = ? \n UNION SELECT \n p.member_of \n FROM \n membership p INNER JOIN m ON m.member_of = p.member_id\n ) \n select member_of from m\n ))`,\n\t\tauthorization.IdentityResourceTypeOrganization, identityID, identityID).Rows()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar id string\n\t\tvar name string\n\t\trows.Scan(&id, &name)\n\t\torganizationId, err := uuid.FromString(id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tidx := findOrganization(results, organizationId)\n\t\tif idx == -1 {\n\t\t\tresults = append(results, organization.IdentityOrganization{\n\t\t\t\tOrganizationID: organizationId,\n\t\t\t\tName: name,\n\t\t\t\tMember: true,\n\t\t\t\tRoles: []string{},\n\t\t\t})\n\t\t} else {\n\t\t\tresults[idx].Member = true\n\t\t}\n\t}\n\n\t// query for organizations for which the user has a role, or the user is a member of a team or group that has a role\n\trows, err = db.Unscoped().Raw(`SELECT \n i.id, \n r.name,\n role.name\n FROM \n identity_role ir, \n resource r, \n identities i,\n resource_type rt, role\n\t\tWHERE \n ir.resource_id = r.resource_id\n and ir.resource_id = i.identity_resource_id\n and ir.role_id = role.role_id \n and r.resource_type_id = rt.resource_type_id \n\t\t and rt.name = ? \n and ir.deleted_at IS NULL\n and r.deleted_at IS NULL\n and rt.deleted_at IS NULL\n and role.deleted_at IS NULL\n and (ir.identity_id = ? \n OR ir.identity_id in (\n WITH RECURSIVE m AS ( \n\t\t SELECT \n member_id, \n member_of \n FROM \n membership \n WHERE \n member_id = ? \n\t\t UNION SELECT \n p.member_id, \n p.member_of \n FROM \n membership p INNER JOIN m ON m.member_of = p.member_id\n )\n\t\t select member_id from m\n ))`,\n\t\tauthorization.IdentityResourceTypeOrganization, identityID, identityID).Rows()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar id string\n\t\tvar name string\n\t\tvar roleName string\n\t\trows.Scan(&id, &name, &roleName)\n\t\torganizationId, err := uuid.FromString(id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tidx := findOrganization(results, organizationId)\n\t\tif idx == -1 {\n\t\t\tresults = append(results, organization.IdentityOrganization{\n\t\t\t\tOrganizationID: organizationId,\n\t\t\t\tName: name,\n\t\t\t\tMember: false,\n\t\t\t\tRoles: []string{roleName},\n\t\t\t})\n\t\t} else {\n\t\t\tfound := false\n\t\t\t// Check if the role is already in the entry\n\t\t\tfor _, r := range results[idx].Roles {\n\t\t\t\tif r == roleName {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !found {\n\t\t\t\tresults[idx].Roles = append(results[idx].Roles, roleName)\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn results, nil\n}", "func (c *Client) OrganizationList(lr *ListRange) ([]Organization, error) {\n\treq, err := c.NewRequest(\"GET\", \"/organizations\", nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif lr != nil {\n\t\tlr.SetHeader(req)\n\t}\n\n\tvar organizationsRes []Organization\n\treturn organizationsRes, c.DoReq(req, &organizationsRes)\n}", "func buildResponse(items []*dynamodb.AttributeValue) []models.GithubOrg {\n\t// Convert to a response model\n\tvar orgs []models.GithubOrg\n\tfor _, org := range items {\n\t\tselected := true\n\t\torgs = append(orgs, models.GithubOrg{\n\t\t\tID: org.S,\n\t\t\tSelected: &selected,\n\t\t})\n\t}\n\n\treturn orgs\n}", "func NewConnectedOrganizationMembers()(*ConnectedOrganizationMembers) {\n m := &ConnectedOrganizationMembers{\n SubjectSet: *NewSubjectSet(),\n }\n odataTypeValue := \"#microsoft.graph.connectedOrganizationMembers\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func (c *Client) OrganizationList(owner bool, cursor string) ([]Organization, error) {\n\tresponse, err := c.APIGet(\"organizations/\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torgs := make([]Organization, 1)\n\terr = json.Unmarshal(response, &orgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn orgs, nil\n}", "func NewOrganizationsStore(s chronograf.OrganizationsStore, org string) *OrganizationsStore {\n\treturn &OrganizationsStore{\n\t\tstore: s,\n\t\torganization: org,\n\t}\n}", "func (c *Campaigner) OrganizationList(limit int, offset int) (response ResponseOrganizationList, err error) {\n\t// Setup.\n\tqs := url.Values{}\n\tqs.Set(\"limit\", strconv.Itoa(limit))\n\tqs.Set(\"offset\", strconv.Itoa(offset))\n\tu := url.URL{Path: \"/api/3/organizations\", RawQuery: qs.Encode()}\n\n\t// GET request.\n\tr, body, err := c.get(u.String())\n\tif err != nil {\n\t\treturn response, fmt.Errorf(\"organization list failed, HTTP failure: %s\", err)\n\t}\n\n\t// Success.\n\t// TODO(doc-mismatch): 200 != 201\n\tif r.StatusCode == http.StatusOK {\n\t\terr = json.Unmarshal(body, &response)\n\t\tif err != nil {\n\t\t\treturn response, fmt.Errorf(\"organization list failed, JSON failure: %s\", err)\n\t\t}\n\n\t\treturn response, nil\n\t}\n\n\t// Failure (API docs are not clear about errors here).\n\treturn response, fmt.Errorf(\"organization list failed, unspecified error (%d): %s\", r.StatusCode, string(body))\n}", "func (s *OrganizationsStore) All(ctx context.Context) ([]chronograf.Organization, error) {\n\terr := validOrganization(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tds, err := s.store.All(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefaultOrg, err := s.store.DefaultOrganization(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefaultOrgID := defaultOrg.ID\n\n\t// This filters organizations without allocating\n\t// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating\n\torganizations := ds[:0]\n\tfor _, d := range ds {\n\t\tid := d.ID\n\t\tswitch id {\n\t\tcase s.organization, defaultOrgID:\n\t\t\torganizations = append(organizations, d)\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn organizations, nil\n}", "func (o *Organizations) Run(params *OrgParams) error {\n\treturn run(params)\n}", "func NewListOrganizations(ctx *middleware.Context, handler ListOrganizationsHandler) *ListOrganizations {\n\treturn &ListOrganizations{Context: ctx, Handler: handler}\n}", "func (u *User) GetOrganizationsURL() string {\n\tif u == nil || u.OrganizationsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *u.OrganizationsURL\n}", "func (moa *MultiOrgAlertmanager) GetOrCreateOrgRegistry(id int64) prometheus.Registerer {\n\treturn moa.registries.GetOrCreateOrgRegistry(id)\n}", "func (t *HackidfChaincode) CreateOrg(stub shim.ChaincodeStubInterface, args []string)pb.Response{\n\tvar OrgID = args[0]\n\tvar OrgName = args[1]\n\tvar IsVerified = \"No\"\n\t// checking for an error or if the user already exists\n\tOrgAsBytes, err := stub.GetState(OrgID)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to get Organisation:\" + err.Error())\n\t}else if OrgAsBytes != nil{\n\t\treturn shim.Error(\"Organisation is already registered\")\n\t}\n\tvar Organisation = &Organisation{OrgName:OrgName, IsVerified:IsVerified}\n\tOrgJsonAsBytes, err :=json.Marshal(Organisation)\n\tif err != nil {\n\t\tshim.Error(\"Error encountered while Marshalling\")\n\t}\n\terr = stub.PutState(OrgID, OrgJsonAsBytes)\n\tif err != nil {\n\t\tshim.Error(\"Error encountered while Creating Organisation\")\n\t}\n\tfmt.Println(\"Ledger Updated Successfully\")\n\treturn shim.Success(nil)\n}", "func (c *Campaigner) OrganizationCreate(org Organization) (result ResponseOrganizationCreate, err error) {\n\tvar (\n\t\turi = \"/api/3/organizations\"\n\t\tdata = map[string]interface{}{\n\t\t\t\"organization\": org,\n\t\t}\n\t)\n\n\tr, body, err := c.post(uri, data)\n\tif err != nil {\n\t\treturn result, fmt.Errorf(\"organization creation failed, HTTP error: %s\", err)\n\t}\n\n\t// Response check.\n\tswitch r.StatusCode {\n\tcase http.StatusCreated:\n\t\terr = json.Unmarshal(body, &result)\n\t\tif err != nil {\n\t\t\treturn result, fmt.Errorf(\"organization creation failed, JSON error: %s\", err)\n\t\t}\n\n\t\treturn result, nil\n\tcase http.StatusUnprocessableEntity:\n\t\tvar apiError ActiveCampaignError\n\t\terr = json.Unmarshal(body, &apiError)\n\t\tif err != nil {\n\t\t\treturn result, fmt.Errorf(\"organization creation failed, API error unmarshall error: %s\", err)\n\t\t}\n\n\t\treturn result, apiError\n\tdefault:\n\t\treturn result, fmt.Errorf(\"organization creation failed, unspecified error (%d): %s\", r.StatusCode, string(body))\n\t}\n}", "func (r *OrganizationsService) Get(name string) *OrganizationsGetCall {\n\tc := &OrganizationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (org *OrganizationService) Get(ctx context.Context, organizationId string) (*Organization, error) {\n\tpath := fmt.Sprintf(`organizations/%s`, organizationId)\n\treq, err := org.client.createRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta := new(GetOrganizationsResponse)\n\terr = org.client.do(ctx, req, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif strings.ToLower(a.RequestStatus) == \"success\" {\n\t\tif len(a.Organizations) >= 1 {\n\t\t\tif strings.ToLower(a.Organizations[0].SubRequestStatus) == \"success\" {\n\t\t\t\treturn &a.Organizations[0].Organization, nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(`non-success status returned from snapchat api (get organization): %s`, a.RequestStatus)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"no organizations found with organization id: %s\", organizationId)\n\t}\n\treturn nil, fmt.Errorf(`non-success status returned from snapchat api (get organization): %s`, a.RequestStatus)\n}", "func FetchOrganizations() ([]*mono_models.Organization, error) {\n\tparams := clientOrgs.NewListOrganizationsParams()\n\tmemberOnly := true\n\tparams.SetMemberOnly(&memberOnly)\n\tres, err := authentication.Client().Organizations.ListOrganizations(params, authentication.ClientAuth())\n\n\tif err != nil {\n\t\treturn nil, processOrgErrorResponse(err)\n\t}\n\n\treturn res.Payload, nil\n}", "func NewOrganizationsClient(ctx context.Context, opts ...option.ClientOption) (*OrganizationsClient, error) {\n\tclientOpts := defaultOrganizationsGRPCClientOptions()\n\tif newOrganizationsClientHook != nil {\n\t\thookOpts, err := newOrganizationsClientHook(ctx, clientHookParams{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclientOpts = append(clientOpts, hookOpts...)\n\t}\n\n\tconnPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := OrganizationsClient{CallOptions: defaultOrganizationsCallOptions()}\n\n\tc := &organizationsGRPCClient{\n\t\tconnPool: connPool,\n\t\torganizationsClient: resourcemanagerpb.NewOrganizationsClient(connPool),\n\t\tCallOptions: &client.CallOptions,\n\t\toperationsClient: longrunningpb.NewOperationsClient(connPool),\n\t}\n\tc.setGoogleClientInfo()\n\n\tclient.internalClient = c\n\n\treturn &client, nil\n}", "func NewOrganismFactory() *OrganismFactory {\n\treturn &OrganismFactory{}\n}", "func New() *cobra.Command {\n\tconst (\n\t\tlong = `Commands for managing Fly organizations. list, create, show and\ndestroy organizations.\nOrganization admins can also invite or remove users from Organizations.\n`\n\t\tshort = \"Commands for managing Fly organizations\"\n\t)\n\n\t// TODO: list should also accept the --org param\n\n\torgs := command.New(\"orgs\", short, long, nil)\n\n\torgs.AddCommand(\n\t\tnewList(),\n\t\tnewShow(),\n\t\tnewInvite(),\n\t\tnewRemove(),\n\t\tnewCreate(),\n\t\tnewDelete(),\n\t)\n\n\treturn orgs\n}", "func (c *Contributor) GetOrganizationsURL() string {\n\tif c == nil || c.OrganizationsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.OrganizationsURL\n}", "func createFixturesForUserGetOrganizations(ms *ModelSuite) ([]Organization, Users) {\n\tunique := domain.GetUUID()\n\torgs := []Organization{\n\t\t{Name: fmt.Sprintf(\"Starfleet Academy-%s\", unique)},\n\t\t{Name: fmt.Sprintf(\"ACME-%s\", unique)},\n\t}\n\n\tfor i := range orgs {\n\t\torgs[i].AuthType = AuthTypeSaml\n\t\torgs[i].AuthConfig = \"{}\"\n\t\torgs[i].UUID = domain.GetUUID()\n\t\tcreateFixture(ms, &orgs[i])\n\t}\n\n\tusers := createUserFixtures(ms.DB, 1).Users\n\n\t// user is already in org 0, but need user to also be in org 1\n\tcreateFixture(ms, &UserOrganization{\n\t\tOrganizationID: orgs[1].ID,\n\t\tUserID: users[0].ID,\n\t\tAuthID: users[0].Email,\n\t\tAuthEmail: users[0].Email,\n\t})\n\n\treturn orgs, users\n}", "func (s *OrganizationsServiceOp) List(ctx context.Context, listOptions *ListOptions) ([]Organization, *Response, error) {\n\t//Add query params from listOptions\n\tpath, err := setListOptions(organizationsBasePath, listOptions)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\troot := new(OrganizationResponse)\n\tresp, err := s.client.Do(ctx, req, root)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tif l := root.Links; l != nil {\n\t\tresp.Links = l\n\t}\n\n\treturn root.Results, resp, nil\n}", "func (c *organizationsRESTClient) SearchOrganizations(ctx context.Context, req *resourcemanagerpb.SearchOrganizationsRequest, opts ...gax.CallOption) *OrganizationIterator {\n\tit := &OrganizationIterator{}\n\treq = proto.Clone(req).(*resourcemanagerpb.SearchOrganizationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*resourcemanagerpb.Organization, string, error) {\n\t\tresp := &resourcemanagerpb.SearchOrganizationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v3/organizations:search\")\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetQuery() != \"\" {\n\t\t\tparams.Add(\"query\", fmt.Sprintf(\"%v\", req.GetQuery()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOrganizations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func newOrganizationMutation(c config, op Op, opts ...organizationOption) *OrganizationMutation {\n\tm := &OrganizationMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeOrganization,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}", "func (mr *MockDatabaseMockRecorder) GetOrganizations() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetOrganizations\", reflect.TypeOf((*MockDatabase)(nil).GetOrganizations))\n}", "func (database *Database) ListOrganizations(organizations *[]Organization) error {\n\terr := database.DB.Preload(\"Users\").\n\t\tPreload(\"Activities\").\n\t\tFind(organizations).Error\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to select all organizations: %s\", err)\n\t}\n\n\treturn nil\n}", "func (c *Context) Organization() string { return c.orgName }", "func NewOrganizationsRESTClient(ctx context.Context, opts ...option.ClientOption) (*OrganizationsClient, error) {\n\tclientOpts := append(defaultOrganizationsRESTClientOptions(), opts...)\n\thttpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcallOpts := defaultOrganizationsRESTCallOptions()\n\tc := &organizationsRESTClient{\n\t\tendpoint: endpoint,\n\t\thttpClient: httpClient,\n\t\tCallOptions: &callOpts,\n\t}\n\tc.setGoogleClientInfo()\n\n\treturn &OrganizationsClient{internalClient: c, CallOptions: callOpts}, nil\n}", "func (m *MockDatabase) GetOrganizationsByGitSource(gitSource string) (*[]model.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationsByGitSource\", gitSource)\n\tret0, _ := ret[0].(*[]model.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}" ]
[ "0.6828111", "0.6554849", "0.6530076", "0.63020647", "0.6148606", "0.606935", "0.5953892", "0.59367347", "0.5910168", "0.59094334", "0.5902483", "0.58443344", "0.5835829", "0.58291733", "0.57650656", "0.57641447", "0.5744629", "0.5714246", "0.5628556", "0.56178296", "0.5598386", "0.5577042", "0.5549881", "0.5549881", "0.5543148", "0.55228144", "0.5517429", "0.5507727", "0.54926884", "0.5485471", "0.5478495", "0.5472668", "0.54708064", "0.5464628", "0.54610187", "0.5458399", "0.54557914", "0.5455295", "0.5437118", "0.54315865", "0.5431167", "0.5427684", "0.5427173", "0.5412272", "0.5409651", "0.53942513", "0.53863704", "0.53724825", "0.53714114", "0.5361704", "0.53550535", "0.53467005", "0.53406423", "0.53352255", "0.53255063", "0.5318682", "0.5288245", "0.52853554", "0.5281796", "0.5275493", "0.52640104", "0.52558655", "0.52100956", "0.52083963", "0.5175965", "0.5170637", "0.51691705", "0.5167311", "0.51633376", "0.51505136", "0.5149228", "0.5132702", "0.511692", "0.51138663", "0.5091148", "0.50867313", "0.50812155", "0.5079766", "0.50750005", "0.50651056", "0.5065082", "0.5062904", "0.5043338", "0.50419074", "0.50392866", "0.50355196", "0.5024494", "0.5021962", "0.5016389", "0.5013484", "0.50121576", "0.5006952", "0.49908462", "0.49811122", "0.4971362", "0.4935844", "0.48842308", "0.48744163", "0.48636264", "0.48376897" ]
0.67298853
1
NewOpenIDConnectWellKnownConfiguration generates a new OpenIDConnectWellKnownConfiguration.
func NewOpenIDConnectWellKnownConfiguration(c *schema.IdentityProvidersOpenIDConnect) (config OpenIDConnectWellKnownConfiguration) { config = OpenIDConnectWellKnownConfiguration{ OAuth2WellKnownConfiguration: OAuth2WellKnownConfiguration{ CommonDiscoveryOptions: CommonDiscoveryOptions{ SubjectTypesSupported: []string{ SubjectTypePublic, SubjectTypePairwise, }, ResponseTypesSupported: []string{ ResponseTypeAuthorizationCodeFlow, ResponseTypeImplicitFlowIDToken, ResponseTypeImplicitFlowToken, ResponseTypeImplicitFlowBoth, ResponseTypeHybridFlowIDToken, ResponseTypeHybridFlowToken, ResponseTypeHybridFlowBoth, }, GrantTypesSupported: []string{ GrantTypeAuthorizationCode, GrantTypeImplicit, GrantTypeClientCredentials, GrantTypeRefreshToken, }, ResponseModesSupported: []string{ ResponseModeFormPost, ResponseModeQuery, ResponseModeFragment, ResponseModeJWT, ResponseModeFormPostJWT, ResponseModeQueryJWT, ResponseModeFragmentJWT, }, ScopesSupported: []string{ ScopeOfflineAccess, ScopeOpenID, ScopeProfile, ScopeGroups, ScopeEmail, }, ClaimsSupported: []string{ ClaimAuthenticationMethodsReference, ClaimAudience, ClaimAuthorizedParty, ClaimClientIdentifier, ClaimExpirationTime, ClaimIssuedAt, ClaimIssuer, ClaimJWTID, ClaimRequestedAt, ClaimSubject, ClaimAuthenticationTime, ClaimNonce, ClaimPreferredEmail, ClaimEmailVerified, ClaimEmailAlts, ClaimGroups, ClaimPreferredUsername, ClaimFullName, }, TokenEndpointAuthMethodsSupported: []string{ ClientAuthMethodClientSecretBasic, ClientAuthMethodClientSecretPost, ClientAuthMethodClientSecretJWT, ClientAuthMethodPrivateKeyJWT, ClientAuthMethodNone, }, TokenEndpointAuthSigningAlgValuesSupported: []string{ SigningAlgHMACUsingSHA256, SigningAlgHMACUsingSHA384, SigningAlgHMACUsingSHA512, SigningAlgRSAUsingSHA256, SigningAlgRSAUsingSHA384, SigningAlgRSAUsingSHA512, SigningAlgECDSAUsingP256AndSHA256, SigningAlgECDSAUsingP384AndSHA384, SigningAlgECDSAUsingP521AndSHA512, SigningAlgRSAPSSUsingSHA256, SigningAlgRSAPSSUsingSHA384, SigningAlgRSAPSSUsingSHA512, }, }, OAuth2DiscoveryOptions: OAuth2DiscoveryOptions{ CodeChallengeMethodsSupported: []string{ PKCEChallengeMethodSHA256, }, RevocationEndpointAuthMethodsSupported: []string{ ClientAuthMethodClientSecretBasic, ClientAuthMethodClientSecretPost, ClientAuthMethodClientSecretJWT, ClientAuthMethodPrivateKeyJWT, ClientAuthMethodNone, }, RevocationEndpointAuthSigningAlgValuesSupported: []string{ SigningAlgHMACUsingSHA256, SigningAlgHMACUsingSHA384, SigningAlgHMACUsingSHA512, SigningAlgRSAUsingSHA256, SigningAlgRSAUsingSHA384, SigningAlgRSAUsingSHA512, SigningAlgECDSAUsingP256AndSHA256, SigningAlgECDSAUsingP384AndSHA384, SigningAlgECDSAUsingP521AndSHA512, SigningAlgRSAPSSUsingSHA256, SigningAlgRSAPSSUsingSHA384, SigningAlgRSAPSSUsingSHA512, }, IntrospectionEndpointAuthMethodsSupported: []string{ ClientAuthMethodClientSecretBasic, ClientAuthMethodNone, }, }, OAuth2JWTIntrospectionResponseDiscoveryOptions: &OAuth2JWTIntrospectionResponseDiscoveryOptions{ IntrospectionSigningAlgValuesSupported: []string{ SigningAlgRSAUsingSHA256, SigningAlgNone, }, }, OAuth2PushedAuthorizationDiscoveryOptions: &OAuth2PushedAuthorizationDiscoveryOptions{ RequirePushedAuthorizationRequests: c.PAR.Enforce, }, OAuth2IssuerIdentificationDiscoveryOptions: &OAuth2IssuerIdentificationDiscoveryOptions{ AuthorizationResponseIssuerParameterSupported: true, }, }, OpenIDConnectDiscoveryOptions: OpenIDConnectDiscoveryOptions{ IDTokenSigningAlgValuesSupported: []string{ SigningAlgRSAUsingSHA256, SigningAlgNone, }, UserinfoSigningAlgValuesSupported: []string{ SigningAlgRSAUsingSHA256, SigningAlgNone, }, RequestObjectSigningAlgValuesSupported: []string{ SigningAlgRSAUsingSHA256, SigningAlgRSAUsingSHA384, SigningAlgRSAUsingSHA512, SigningAlgECDSAUsingP256AndSHA256, SigningAlgECDSAUsingP384AndSHA384, SigningAlgECDSAUsingP521AndSHA512, SigningAlgRSAPSSUsingSHA256, SigningAlgRSAPSSUsingSHA384, SigningAlgRSAPSSUsingSHA512, SigningAlgNone, }, }, OpenIDConnectPromptCreateDiscoveryOptions: &OpenIDConnectPromptCreateDiscoveryOptions{ PromptValuesSupported: []string{ PromptNone, PromptConsent, }, }, OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions: &OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions{ AuthorizationSigningAlgValuesSupported: []string{ SigningAlgRSAUsingSHA256, }, }, } for _, alg := range c.Discovery.ResponseObjectSigningAlgs { if !utils.IsStringInSlice(alg, config.IDTokenSigningAlgValuesSupported) { config.IDTokenSigningAlgValuesSupported = append(config.IDTokenSigningAlgValuesSupported, alg) } if !utils.IsStringInSlice(alg, config.UserinfoSigningAlgValuesSupported) { config.UserinfoSigningAlgValuesSupported = append(config.UserinfoSigningAlgValuesSupported, alg) } if !utils.IsStringInSlice(alg, config.IntrospectionSigningAlgValuesSupported) { config.IntrospectionSigningAlgValuesSupported = append(config.IntrospectionSigningAlgValuesSupported, alg) } if !utils.IsStringInSlice(alg, config.AuthorizationSigningAlgValuesSupported) { config.AuthorizationSigningAlgValuesSupported = append(config.AuthorizationSigningAlgValuesSupported, alg) } } sort.Sort(SortedSigningAlgs(config.IDTokenSigningAlgValuesSupported)) sort.Sort(SortedSigningAlgs(config.UserinfoSigningAlgValuesSupported)) sort.Sort(SortedSigningAlgs(config.IntrospectionSigningAlgValuesSupported)) sort.Sort(SortedSigningAlgs(config.AuthorizationSigningAlgValuesSupported)) if c.EnablePKCEPlainChallenge { config.CodeChallengeMethodsSupported = append(config.CodeChallengeMethodsSupported, PKCEChallengeMethodPlain) } return config }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (opts OpenIDConnectWellKnownConfiguration) Copy() (optsCopy OpenIDConnectWellKnownConfiguration) {\n\toptsCopy = OpenIDConnectWellKnownConfiguration{\n\t\tOAuth2WellKnownConfiguration: opts.OAuth2WellKnownConfiguration.Copy(),\n\t\tOpenIDConnectDiscoveryOptions: opts.OpenIDConnectDiscoveryOptions,\n\t}\n\n\tif opts.OpenIDConnectFrontChannelLogoutDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectFrontChannelLogoutDiscoveryOptions = &OpenIDConnectFrontChannelLogoutDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectFrontChannelLogoutDiscoveryOptions = *opts.OpenIDConnectFrontChannelLogoutDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectBackChannelLogoutDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectBackChannelLogoutDiscoveryOptions = &OpenIDConnectBackChannelLogoutDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectBackChannelLogoutDiscoveryOptions = *opts.OpenIDConnectBackChannelLogoutDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectSessionManagementDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectSessionManagementDiscoveryOptions = &OpenIDConnectSessionManagementDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectSessionManagementDiscoveryOptions = *opts.OpenIDConnectSessionManagementDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectRPInitiatedLogoutDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectRPInitiatedLogoutDiscoveryOptions = &OpenIDConnectRPInitiatedLogoutDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectRPInitiatedLogoutDiscoveryOptions = *opts.OpenIDConnectRPInitiatedLogoutDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectPromptCreateDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectPromptCreateDiscoveryOptions = &OpenIDConnectPromptCreateDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectPromptCreateDiscoveryOptions = *opts.OpenIDConnectPromptCreateDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions = &OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions = *opts.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions = &OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions = *opts.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions\n\t}\n\n\tif opts.OpenIDFederationDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDFederationDiscoveryOptions = &OpenIDFederationDiscoveryOptions{}\n\t\t*optsCopy.OpenIDFederationDiscoveryOptions = *opts.OpenIDFederationDiscoveryOptions\n\t}\n\n\treturn optsCopy\n}", "func NewDeviceEnrollmentWindowsHelloForBusinessConfiguration()(*DeviceEnrollmentWindowsHelloForBusinessConfiguration) {\n m := &DeviceEnrollmentWindowsHelloForBusinessConfiguration{\n DeviceEnrollmentConfiguration: *NewDeviceEnrollmentConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.deviceEnrollmentWindowsHelloForBusinessConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func (opts OAuth2WellKnownConfiguration) Copy() (optsCopy OAuth2WellKnownConfiguration) {\n\toptsCopy = OAuth2WellKnownConfiguration{\n\t\tCommonDiscoveryOptions: opts.CommonDiscoveryOptions,\n\t\tOAuth2DiscoveryOptions: opts.OAuth2DiscoveryOptions,\n\t}\n\n\tif opts.OAuth2DeviceAuthorizationGrantDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2DeviceAuthorizationGrantDiscoveryOptions = &OAuth2DeviceAuthorizationGrantDiscoveryOptions{}\n\t\t*optsCopy.OAuth2DeviceAuthorizationGrantDiscoveryOptions = *opts.OAuth2DeviceAuthorizationGrantDiscoveryOptions\n\t}\n\n\tif opts.OAuth2MutualTLSClientAuthenticationDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2MutualTLSClientAuthenticationDiscoveryOptions = &OAuth2MutualTLSClientAuthenticationDiscoveryOptions{}\n\t\t*optsCopy.OAuth2MutualTLSClientAuthenticationDiscoveryOptions = *opts.OAuth2MutualTLSClientAuthenticationDiscoveryOptions\n\t}\n\n\tif opts.OAuth2IssuerIdentificationDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2IssuerIdentificationDiscoveryOptions = &OAuth2IssuerIdentificationDiscoveryOptions{}\n\t\t*optsCopy.OAuth2IssuerIdentificationDiscoveryOptions = *opts.OAuth2IssuerIdentificationDiscoveryOptions\n\t}\n\n\tif opts.OAuth2JWTIntrospectionResponseDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2JWTIntrospectionResponseDiscoveryOptions = &OAuth2JWTIntrospectionResponseDiscoveryOptions{}\n\t\t*optsCopy.OAuth2JWTIntrospectionResponseDiscoveryOptions = *opts.OAuth2JWTIntrospectionResponseDiscoveryOptions\n\t}\n\n\tif opts.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions = &OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions{}\n\t\t*optsCopy.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions = *opts.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions\n\t}\n\n\tif opts.OAuth2PushedAuthorizationDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2PushedAuthorizationDiscoveryOptions = &OAuth2PushedAuthorizationDiscoveryOptions{}\n\t\t*optsCopy.OAuth2PushedAuthorizationDiscoveryOptions = *opts.OAuth2PushedAuthorizationDiscoveryOptions\n\t}\n\n\treturn optsCopy\n}", "func (r *Routes) configureWellKnown(healthFunc func() bool) {\n\twellKnown := r.Group(\"/.well-known\")\n\t{\n\t\twellKnown.GET(\"/schema-discovery\", func(ctx *gin.Context) {\n\t\t\tdiscovery := struct {\n\t\t\t\tSchemaURL string `json:\"schema_url\"`\n\t\t\t\tSchemaType string `json:\"schema_type\"`\n\t\t\t\tUIURL string `json:\"ui_url\"`\n\t\t\t}{\n\t\t\t\tSchemaURL: \"/swagger.json\",\n\t\t\t\tSchemaType: \"swagger-2.0\",\n\t\t\t}\n\t\t\tctx.JSON(http.StatusOK, &discovery)\n\t\t})\n\t\twellKnown.GET(\"/health\", healthHandler(healthFunc))\n\t}\n\n\tr.GET(\"/swagger.json\", func(ctx *gin.Context) {\n\t\tctx.String(http.StatusOK, string(SwaggerJSON))\n\t})\n}", "func (r *Routes) configureWellKnown(healthFunc func() bool) {\n\twellKnown := r.Group(\"/.well-known\")\n\t{\n\t\twellKnown.GET(\"/schema-discovery\", func(ctx *gin.Context) {\n\t\t\tdiscovery := struct {\n\t\t\t\tSchemaURL string `json:\"schema_url\"`\n\t\t\t\tSchemaType string `json:\"schema_type\"`\n\t\t\t\tUIURL string `json:\"ui_url\"`\n\t\t\t}{\n\t\t\t\tSchemaURL: \"/swagger.json\",\n\t\t\t\tSchemaType: \"swagger-2.0\",\n\t\t\t}\n\t\t\tctx.JSON(http.StatusOK, &discovery)\n\t\t})\n\t\twellKnown.GET(\"/health\", healthHandler(healthFunc))\n\t}\n\n\tr.GET(\"/swagger.json\", func(ctx *gin.Context) {\n\t\tctx.String(http.StatusOK, string(SwaggerJSON))\n\t})\n}", "func CreateDeviceEnrollmentWindowsHelloForBusinessConfigurationFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewDeviceEnrollmentWindowsHelloForBusinessConfiguration(), nil\n}", "func NewLogicAppTriggerEndpointConfiguration()(*LogicAppTriggerEndpointConfiguration) {\n m := &LogicAppTriggerEndpointConfiguration{\n CustomExtensionEndpointConfiguration: *NewCustomExtensionEndpointConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.logicAppTriggerEndpointConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func (c *config) newConfig(redirect string) *oauth2.Config {\n\treturn &oauth2.Config{\n\t\tClientID: c.Client,\n\t\tClientSecret: c.Secret,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: fmt.Sprintf(\"%s/site/oauth2/authorize\", c.URL),\n\t\t\tTokenURL: fmt.Sprintf(\"%s/site/oauth2/access_token\", c.URL),\n\t\t},\n\t\tRedirectURL: fmt.Sprintf(\"%s/authorize\", redirect),\n\t}\n}", "func createOpenAPIBuilderConfig() *common.Config {\n\treturn &common.Config{\n\t\tProtocolList: []string{\"https\"},\n\t\tIgnorePrefixes: []string{\"/swaggerapi\"},\n\t\tInfo: &spec.Info{\n\t\t\tInfoProps: spec.InfoProps{\n\t\t\t\tTitle: \"Argo-Events\",\n\t\t\t\tVersion: \"v0.6\",\n\t\t\t},\n\t\t},\n\t}\n}", "func newCanaryConfig(provider config.Provider) (*Config, error) {\n\traw := provider.Get(ConfigurationKey)\n\tvar cfg Config\n\tif err := raw.Populate(&cfg); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load canary configuration with error: %v\", err)\n\t}\n\tif err := cfg.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &cfg, nil\n}", "func NewOidcConfigOK() *OidcConfigOK {\n\treturn &OidcConfigOK{}\n}", "func NewClientConfiguration(pfmServicesEnabled bool, isAutomaticBatchUpdateEnabled bool, isDevelopmentModeEnabled bool, isNonEuroAccountsSupported bool, isAutoCategorizationEnabled bool, mandatorLicense MandatorLicense, preferredConsentType PreferredConsentType, userNotificationCallbackUrl NullableString, userSynchronizationCallbackUrl NullableString, refreshTokensValidityPeriod int32, userAccessTokensValidityPeriod int32, clientAccessTokensValidityPeriod int32, maxUserLoginAttempts int32, transactionImportLimitation int32, isUserAutoVerificationEnabled bool, isMandatorAdmin bool, isWebScrapingEnabled bool, isXs2aEnabled bool, pinStorageAvailableInWebForm bool, paymentsEnabled bool, isStandalonePaymentsEnabled bool, availableBankGroups []string, products []Product, applicationName NullableString, finTSProductRegistrationNumber NullableString, storeSecretsAvailableInWebForm bool, supportSubjectDefault NullableString, supportEmail NullableString, aisWebFormMode WebFormMode, pisWebFormMode WebFormMode, pisStandaloneWebFormMode WebFormMode, betaBanksEnabled bool, categoryRestrictions []Category, autoDismountWebForm bool, corsAllowedOrigins []string, ) *ClientConfiguration {\n\tthis := ClientConfiguration{}\n\tthis.PfmServicesEnabled = pfmServicesEnabled\n\tthis.IsAutomaticBatchUpdateEnabled = isAutomaticBatchUpdateEnabled\n\tthis.IsDevelopmentModeEnabled = isDevelopmentModeEnabled\n\tthis.IsNonEuroAccountsSupported = isNonEuroAccountsSupported\n\tthis.IsAutoCategorizationEnabled = isAutoCategorizationEnabled\n\tthis.MandatorLicense = mandatorLicense\n\tthis.PreferredConsentType = preferredConsentType\n\tthis.UserNotificationCallbackUrl = userNotificationCallbackUrl\n\tthis.UserSynchronizationCallbackUrl = userSynchronizationCallbackUrl\n\tthis.RefreshTokensValidityPeriod = refreshTokensValidityPeriod\n\tthis.UserAccessTokensValidityPeriod = userAccessTokensValidityPeriod\n\tthis.ClientAccessTokensValidityPeriod = clientAccessTokensValidityPeriod\n\tthis.MaxUserLoginAttempts = maxUserLoginAttempts\n\tthis.TransactionImportLimitation = transactionImportLimitation\n\tthis.IsUserAutoVerificationEnabled = isUserAutoVerificationEnabled\n\tthis.IsMandatorAdmin = isMandatorAdmin\n\tthis.IsWebScrapingEnabled = isWebScrapingEnabled\n\tthis.IsXs2aEnabled = isXs2aEnabled\n\tthis.PinStorageAvailableInWebForm = pinStorageAvailableInWebForm\n\tthis.PaymentsEnabled = paymentsEnabled\n\tthis.IsStandalonePaymentsEnabled = isStandalonePaymentsEnabled\n\tthis.AvailableBankGroups = availableBankGroups\n\tthis.Products = products\n\tthis.ApplicationName = applicationName\n\tthis.FinTSProductRegistrationNumber = finTSProductRegistrationNumber\n\tthis.StoreSecretsAvailableInWebForm = storeSecretsAvailableInWebForm\n\tthis.SupportSubjectDefault = supportSubjectDefault\n\tthis.SupportEmail = supportEmail\n\tthis.AisWebFormMode = aisWebFormMode\n\tthis.PisWebFormMode = pisWebFormMode\n\tthis.PisStandaloneWebFormMode = pisStandaloneWebFormMode\n\tthis.BetaBanksEnabled = betaBanksEnabled\n\tthis.CategoryRestrictions = categoryRestrictions\n\tthis.AutoDismountWebForm = autoDismountWebForm\n\tthis.CorsAllowedOrigins = corsAllowedOrigins\n\treturn &this\n}", "func GetOpenIDConnectConfiguration(ctx context.Context, client *http.Client, url string) (config Configuration, err error) {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn config, errors.Wrap(err, \"authproxy/oidc: couldn't create HTTP request\")\n\t}\n\n\treq = req.WithContext(ctx)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn config, errors.Wrap(err, \"authproxy/oidc: HTTP request failed\")\n\t}\n\n\tdefer func() {\n\t\terrClose := resp.Body.Close()\n\t\tif errClose != nil {\n\t\t\terr = errors.Wrap(errClose, \"authproxy/oidc: couldn't close HTTP response body\")\n\t\t}\n\t}()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn config, errors.Errorf(\"authproxy:oidc received non-200 status code: %d\", resp.StatusCode)\n\t}\n\n\terr = json.NewDecoder(resp.Body).Decode(&config)\n\tif err != nil {\n\t\treturn config, errors.Wrap(err, \"authproxy/oidc: couldn't decode HTTP response body to JSON\")\n\t}\n\n\tconfig.FillDefaultValuesIfEmpty()\n\n\treturn config, nil\n}", "func NewOpenVpnConfig(name string, vlan, port uint16) (*OpenVpnCfg, error) {\n\tcmd := exec.Command(\"openvpn\", \"--genkey\", \"--secret\", \"/dev/fd/1\")\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error invoking openvpn: %v\", err)\n\t}\n\treturn &OpenVpnCfg{\n\t\tName: name,\n\t\tPort: port,\n\t\tVlan: vlan,\n\t\tKey: string(output),\n\t}, nil\n}", "func NewGetConfigurationOK() *GetConfigurationOK {\n\treturn &GetConfigurationOK{}\n}", "func WithOpenIDConfig(oeConfig DiscoveryDoc) Option {\n\treturn func(p *KeycloakProvider) {\n\t\tp.oeConfig = oeConfig\n\t}\n}", "func newConfigV101() *configV1 {\n\tconf := new(configV1)\n\tconf.Version = mcCurrentConfigVersion\n\t// make sure to allocate map's otherwise Golang\n\t// exits silently without providing any errors\n\tconf.Hosts = make(map[string]*hostConfig)\n\tconf.Aliases = make(map[string]string)\n\n\tlocalHostConfig := new(hostConfig)\n\tlocalHostConfig.AccessKeyID = \"\"\n\tlocalHostConfig.SecretAccessKey = \"\"\n\n\ts3HostConf := new(hostConfig)\n\ts3HostConf.AccessKeyID = globalAccessKeyID\n\ts3HostConf.SecretAccessKey = globalSecretAccessKey\n\n\t// Your example host config\n\texampleHostConf := new(hostConfig)\n\texampleHostConf.AccessKeyID = globalAccessKeyID\n\texampleHostConf.SecretAccessKey = globalSecretAccessKey\n\n\tplayHostConfig := new(hostConfig)\n\tplayHostConfig.AccessKeyID = \"\"\n\tplayHostConfig.SecretAccessKey = \"\"\n\n\tdlHostConfig := new(hostConfig)\n\tdlHostConfig.AccessKeyID = \"\"\n\tdlHostConfig.SecretAccessKey = \"\"\n\n\tconf.Hosts[exampleHostURL] = exampleHostConf\n\tconf.Hosts[\"localhost:*\"] = localHostConfig\n\tconf.Hosts[\"127.0.0.1:*\"] = localHostConfig\n\tconf.Hosts[\"s3*.amazonaws.com\"] = s3HostConf\n\tconf.Hosts[\"play.minio.io:9000\"] = playHostConfig\n\tconf.Hosts[\"dl.minio.io:9000\"] = dlHostConfig\n\n\taliases := make(map[string]string)\n\taliases[\"s3\"] = \"https://s3.amazonaws.com\"\n\taliases[\"play\"] = \"https://play.minio.io:9000\"\n\taliases[\"dl\"] = \"https://dl.minio.io:9000\"\n\taliases[\"localhost\"] = \"http://localhost:9000\"\n\tconf.Aliases = aliases\n\n\treturn conf\n}", "func newConfig(appName string, pathToKeybase string, log Log, ignoreSnooze bool) (*config, error) {\n\tcfg := newDefaultConfig(appName, pathToKeybase, log, ignoreSnooze)\n\terr := cfg.load()\n\treturn &cfg, err\n}", "func newConfig(serviceName string) config {\n\t// Use stdlib to parse. If it's an invalid value and doesn't parse, log it\n\t// and keep going. It should already be false on error but we force it to\n\t// be extra clear that it's failing closed.\n\tinsecure, err := strconv.ParseBool(os.Getenv(\"OTEL_EXPORTER_OTLP_INSECURE\"))\n\tif err != nil {\n\t\tinsecure = false\n\t\tlog.Println(\"Invalid boolean value in OTEL_EXPORTER_OTLP_INSECURE. Try true or false.\")\n\t}\n\n\treturn config{\n\t\tservicename: serviceName,\n\t\tendpoint: os.Getenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\"),\n\t\tinsecure: insecure,\n\t}\n}", "func newConfig() *config {\n\treturn &config{\n\t\tAddr: \":80\",\n\t\tCacheSize: 1000,\n\t\tLogLevel: \"info\",\n\t\tRequestTimeout: 3000,\n\t\tTargetAddr: \"https://places.aviasales.ru\",\n\t}\n}", "func NewAndroidCustomConfiguration()(*AndroidCustomConfiguration) {\n m := &AndroidCustomConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.androidCustomConfiguration\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func NewIosExpeditedCheckinConfiguration()(*IosExpeditedCheckinConfiguration) {\n m := &IosExpeditedCheckinConfiguration{\n AppleExpeditedCheckinConfigurationBase: *NewAppleExpeditedCheckinConfigurationBase(),\n }\n odataTypeValue := \"#microsoft.graph.iosExpeditedCheckinConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func NewMicrosoftAuthenticatorAuthenticationMethodConfiguration()(*MicrosoftAuthenticatorAuthenticationMethodConfiguration) {\n m := &MicrosoftAuthenticatorAuthenticationMethodConfiguration{\n AuthenticationMethodConfiguration: *NewAuthenticationMethodConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.microsoftAuthenticatorAuthenticationMethodConfiguration\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func NewGetWellKnownJSONWebKeysOK() *GetWellKnownJSONWebKeysOK {\n\treturn &GetWellKnownJSONWebKeysOK{}\n}", "func (r *oauthProxy) newOpenIDClient() (*oidc.Client, oidc.ProviderConfig, *http.Client, error) {\n\tvar err error\n\tvar config oidc.ProviderConfig\n\n\t// step: fix up the url if required, the underlining lib will add the .well-known/openid-configuration to the discovery url for us.\n\tif strings.HasSuffix(r.config.DiscoveryURL, \"/.well-known/openid-configuration\") {\n\t\tr.config.DiscoveryURL = strings.TrimSuffix(r.config.DiscoveryURL, \"/.well-known/openid-configuration\")\n\t}\n\n\t// step: create a idp http client\n\thc := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: func(_ *http.Request) (*url.URL, error) {\n\t\t\t\tif r.config.OpenIDProviderProxy != \"\" {\n\t\t\t\t\tidpProxyURL, err := url.Parse(r.config.OpenIDProviderProxy)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tr.log.Warn(\"invalid proxy address for open IDP provider proxy\", zap.Error(err))\n\t\t\t\t\t\treturn nil, nil\n\t\t\t\t\t}\n\t\t\t\t\treturn idpProxyURL, nil\n\t\t\t\t}\n\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: r.config.SkipOpenIDProviderTLSVerify,\n\t\t\t},\n\t\t},\n\t\tTimeout: time.Second * 10,\n\t}\n\n\t// step: attempt to retrieve the provider configuration\n\tcompleteCh := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tr.log.Info(\"attempting to retrieve configuration discovery url\",\n\t\t\t\tzap.String(\"url\", r.config.DiscoveryURL),\n\t\t\t\tzap.String(\"timeout\", r.config.OpenIDProviderTimeout.String()))\n\t\t\tif config, err = oidc.FetchProviderConfig(hc, r.config.DiscoveryURL); err == nil {\n\t\t\t\tbreak // break and complete\n\t\t\t}\n\t\t\tr.log.Warn(\"failed to get provider configuration from discovery\", zap.Error(err))\n\t\t\ttime.Sleep(time.Second * 3)\n\t\t}\n\t\tcompleteCh <- true\n\t}()\n\t// wait for timeout or successful retrieval\n\tselect {\n\tcase <-time.After(r.config.OpenIDProviderTimeout):\n\t\treturn nil, config, nil, errors.New(\"failed to retrieve the provider configuration from discovery url\")\n\tcase <-completeCh:\n\t\tr.log.Info(\"successfully retrieved openid configuration from the discovery\")\n\t}\n\n\tclient, err := oidc.NewClient(oidc.ClientConfig{\n\t\tCredentials: oidc.ClientCredentials{\n\t\t\tID: r.config.ClientID,\n\t\t\tSecret: r.config.ClientSecret,\n\t\t},\n\t\tHTTPClient: hc,\n\t\tRedirectURL: fmt.Sprintf(\"%s/oauth/callback\", r.config.RedirectionURL),\n\t\tProviderConfig: config,\n\t\tScope: append(r.config.Scopes, oidc.DefaultScope...),\n\t})\n\tif err != nil {\n\t\treturn nil, config, hc, err\n\t}\n\t// start the provider sync for key rotation\n\tclient.SyncProviderConfig(r.config.DiscoveryURL)\n\n\treturn client, config, hc, nil\n}", "func NewDNSProviderConfig(config *Config) (*OVHApi, error) {\n\tif config == nil {\n\t\treturn nil, errors.New(\"ovh: the configuration of the DNS provider is nil\")\n\t}\n\n\tif config.APIEndpoint == \"\" || config.ApplicationKey == \"\" || config.ApplicationSecret == \"\" || config.ConsumerKey == \"\" {\n\t\treturn nil, errors.New(\"ovh: credentials missing\")\n\t}\n\tclient, err := ovh.NewClient(\n\t\tconfig.APIEndpoint,\n\t\tconfig.ApplicationKey,\n\t\tconfig.ApplicationSecret,\n\t\tconfig.ConsumerKey,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ovh: %w\", err)\n\t}\n\tclient.Client = config.HTTPClient\n\treturn &OVHApi{\n\t\tconfig: config,\n\t\tclient: client,\n\t}, nil\n}", "func NewConfig(configFile string) (*Config, error) {\n\n\tcfg := &Config{\n\t\tHost: \"0.0.0.0\",\n\t\tPort: 8080,\n\t\tAllowEmptyClientSecret: false,\n\t\tScopes: []string{\"openid\", \"profile\", \"email\", \"offline_access\"},\n\t\tUsernameClaim: \"nickname\",\n\t\tEmailClaim: \"\",\n\t\tServeTLS: false,\n\t\tCertFile: \"/etc/gangway/tls/tls.crt\",\n\t\tKeyFile: \"/etc/gangway/tls/tls.key\",\n\t\tClusterCAPath: \"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\",\n\t\tHTTPPath: \"\",\n\t}\n\n\tif configFile != \"\" {\n\t\tdata, err := ioutil.ReadFile(configFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = yaml.Unmarshal([]byte(data), cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr := envconfig.Process(\"gangway\", cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cfg.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check for trailing slash on HTTPPath and remove\n\tcfg.HTTPPath = strings.TrimRight(cfg.HTTPPath, \"/\")\n\n\treturn cfg, nil\n}", "func NewWindows10XVpnConfiguration()(*Windows10XVpnConfiguration) {\n m := &Windows10XVpnConfiguration{\n DeviceManagementResourceAccessProfileBase: *NewDeviceManagementResourceAccessProfileBase(),\n }\n odataTypeValue := \"#microsoft.graph.windows10XVpnConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func newConfigmap(customConfigmap *customConfigMapv1alpha1.CustomConfigMap) *corev1.ConfigMap {\n\tlabels := map[string]string{\n\t\t\"name\": customConfigmap.Spec.ConfigMapName,\n\t\t\"customConfigName\": customConfigmap.Name,\n\t\t\"latest\": \"true\",\n\t}\n\tname := fmt.Sprintf(\"%s-%s\", customConfigmap.Spec.ConfigMapName, RandomSequence(5))\n\tconfigName := NameValidation(name)\n\treturn &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: configName,\n\t\t\tNamespace: customConfigmap.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(customConfigmap, customConfigMapv1alpha1.SchemeGroupVersion.WithKind(\"CustomConfigMap\")),\n\t\t\t},\n\t\t\tLabels: labels,\n\t\t},\n\t\tData: customConfigmap.Spec.Data,\n\t\tBinaryData: customConfigmap.Spec.BinaryData,\n\t}\n}", "func NewVpnConfiguration()(*VpnConfiguration) {\n m := &VpnConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.vpnConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func newConfig(initiator bool) noise.Config {\n\treturn noise.Config{\n\t\tCipherSuite: cipherSuite,\n\t\tPattern: noise.HandshakeNK,\n\t\tInitiator: initiator,\n\t\tPrologue: []byte(\"dnstt 2020-04-13\"),\n\t}\n}", "func NewAndroidWorkProfileGeneralDeviceConfiguration()(*AndroidWorkProfileGeneralDeviceConfiguration) {\n m := &AndroidWorkProfileGeneralDeviceConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.androidWorkProfileGeneralDeviceConfiguration\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func createWebhookConfigurationReadyNamespace(f *framework.Framework, namespace string) {\n\tctx := f.Context\n\t_, err := f.VclusterClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: namespace + \"-markers\",\n\t\t\tLabels: map[string]string{uniqueName + \"-markers\": \"true\"},\n\t\t},\n\t}, metav1.CreateOptions{})\n\tframework.ExpectNoError(err, \"creating namespace for webhook configuration ready markers\")\n}", "func NewApp(name string, scopes []string, redirectURI string) (g *Gondole, err error) {\n\t// Load configuration, will register if none is found\n\tcnf, err := LoadConfig(name)\n\tif err != nil {\n\t\t// Nothing exist yet\n\t\tcnf := Config{\n\t\t\tDefault: name,\n\t\t}\n\t\terr = cnf.Write()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error: can not write config for %s\", name)\n\t\t}\n\n\t\t// Now register this through OAuth\n\t\tif scopes == nil {\n\t\t\tscopes = ourScopes\n\t\t}\n\n\t\tg, err = registerApplication(name, scopes, redirectURI)\n\n\t} else {\n\t\tg = &Gondole{\n\t\t\tName: cnf.Name,\n\t\t\tID: cnf.ID,\n\t\t\tSecret: cnf.BearerToken,\n\t\t}\n\t}\n\n\treturn\n}", "func newConfig() *bqConfig {\n\treturn &bqConfig{\n\t\tarenaSize: cDefaultArenaSize,\n\t\tmaxInMemArenas: cMinMaxInMemArenas,\n\t}\n}", "func MockOpenIDConnect(t *testing.T) string {\n\tconst discovery = `{\n\t\t\"issuer\": \"https://example.com/\",\n\t\t\"authorization_endpoint\": \"https://example.com/authorize\",\n\t\t\"token_endpoint\": \"https://example.com/token\",\n\t\t\"userinfo_endpoint\": \"https://example.com/userinfo\",\n\t\t\"jwks_uri\": \"https://example.com/.well-known/jwks.json\",\n\t\t\"scopes_supported\": [\n\t\t\t\"pets_read\",\n\t\t\t\"pets_write\",\n\t\t\t\"admin\"\n\t\t],\n\t\t\"response_types_supported\": [\n\t\t\t\"code\",\n\t\t\t\"id_token\",\n\t\t\t\"token id_token\"\n\t\t],\n\t\t\"token_endpoint_auth_methods_supported\": [\n\t\t\t\"client_secret_basic\"\n\t\t]\n\t}`\n\tsrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, err := w.Write([]byte(discovery))\n\t\trequire.NoError(t, err)\n\t}))\n\tt.Cleanup(func() {\n\t\tsrv.Close()\n\t})\n\treturn srv.URL + \"/.well-known/openid-configuration\"\n}", "func newConfig() (*rest.Config, error) {\n // try in cluster config first, it should fail quickly on lack of env vars\n cfg, err := inClusterConfig()\n if err != nil {\n cfg, err = clientcmd.BuildConfigFromFlags(\"\", clientcmd.RecommendedHomeFile)\n if err != nil {\n return nil, errors.Wrap(err, \"failed to get InClusterConfig and Config from kube_config\")\n }\n }\n return cfg, nil\n}", "func newConfig() (config quick.Config, err error) {\n\tconf := newConfigV101()\n\tconfig, err = quick.New(conf)\n\tif err != nil {\n\t\treturn nil, NewIodine(iodine.New(err, nil))\n\t}\n\treturn config, nil\n}", "func (configMapTemplateFactory) NewExisting(name string, ci client.Interface, gc interfaces.GraphContext) interfaces.Resource {\n\treturn report.SimpleReporter{BaseResource: existingConfigMap{Name: name, Client: ci.ConfigMaps()}}\n}", "func NewConfiguration() *Configuration {\n\tcfg := &Configuration{\n\t\tBasePath: \"https://api.payrobot.io\",\n\t\tDefaultHeader: make(map[string]string),\n\t\tUserAgent: \"OpenAPI-Generator/1.0.0/go\",\n\t\tDebug: false,\n\t\tServers: []ServerConfiguration{\n\t\t\t{\n\t\t\t\tUrl: \"https://api.payrobot.io\",\n\t\t\t\tDescription: \"Production server (uses live data)\",\n\t\t\t},\n\t\t},\n\t}\n\treturn cfg\n}", "func New() *Configuration {\n\treturn &Configuration{config: koanf.New(\".\")}\n}", "func newClientConfig(fname, id, name, serverKey, serverUrl string) (err error) {\n\tconfig := Config{\n\t\tid,\n\t\tname,\n\t\t\"client\",\n\t\t\"\",\n\t\tserverKey,\n\t\tserverUrl,\n\t\tDEFAULT_PROCESS_USER,\n\t\tDEFAULT_PROCESS_LOCK,\n\t\tDEFAULT_PROCESS_LOG,\n\t\tDEFAULT_BASE_DIR,\n\t\tDEFAULT_DATA_DIR,\n\t\tDEFAULT_HTTP_LISTEN,\n\t\tfname,\n\t}\n\n\treturn SaveConfig(config)\n}", "func NewApplicationConfig() *AppSettings {\r\n\tconf := AppSettings{}\r\n\r\n\tp := os.Getenv(\"REPORT_DB_PORT\")\r\n\tport, err := strconv.Atoi(p)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tconf.Database = ServerDetail{\r\n\t\tEndpoint: os.Getenv(\"REPORT_DB_HOST\"),\r\n\t\tPort: port,\r\n\t\tUsername: os.Getenv(\"REPORT_DB_USER\"),\r\n\t\tPassword: os.Getenv(\"REPORT_DB_PASSWORD\"),\r\n\t\tName: os.Getenv(\"REPORT_DB_NAME\"),\r\n\t}\r\n\r\n\tb := os.Getenv(\"REPORT_OAUTH2_VERIFY_SSL\")\r\n\tverify := false\r\n\tif b == \"true\" || b == \"t\" || b == \"\" {\r\n\t\tverify = true\r\n\t}\r\n\r\n\tconf.OAuth2 = Detail{\r\n\t\tEndpoint: os.Getenv(\"REPORT_OAUTH2_BACKEND_URI\"),\r\n\t\tEndpointWeb: os.Getenv(\"REPORT_OAUTH2_WEB_URI\"),\r\n\t\tUsername: os.Getenv(\"REPORT_CLIENT_ID\"),\r\n\t\tPassword: os.Getenv(\"REPORT_CLIENT_SECRET\"),\r\n\t\tScope: os.Getenv(\"REPORT_OAUTH2_SCOPE\"),\r\n\t\tRedirectURI: os.Getenv(\"REPORT_REDIRECT_URI\"),\r\n\t\tName: os.Getenv(\"REPORT_OAUTH2_SERVICE_NAME\"),\r\n\t\tSSLVerify: verify,\r\n\t}\r\n\r\n\tconf.Admin = Admin{\r\n\t\tAPIKey: os.Getenv(\"REPORT_OAUTH2_API\"),\r\n\t}\r\n\r\n\tbo := os.Getenv(\"REPORT_BILLPAY_VERIFY_SSL\")\r\n\tverify2 := false\r\n\tif bo == \"true\" || bo == \"t\" || bo == \"\" {\r\n\t\tverify2 = true\r\n\t}\r\n\r\n\tconf.Billpay = Billpay{\r\n\t\tEndpoint: os.Getenv(\"REPORT_BILLPAY_URI\"),\r\n\t\tAPIKey: os.Getenv(\"REPORT_BILLPAY_APIKEY\"),\r\n\t\tCACert: os.Getenv(\"REPORT_BILLPAY_CACERT\"),\r\n\t\tSSLVerify: verify2,\r\n\t}\r\n\r\n\t// Log\r\n\tconf.Log = Log{\r\n\t\tLevel: os.Getenv(\"REPORT_LOG_LEVEL\"),\r\n\t\tFormat: os.Getenv(\"REPORT_LOG_FORMAT\"),\r\n\t}\r\n\r\n\treturn &conf\r\n}", "func newIoTConfigs(c *IotV1alpha1Client, namespace string) *ioTConfigs {\n\treturn &ioTConfigs{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func new() exampleInterface {\n\treturn config{}\n}", "func newKongs(c *KongV1alpha1Client, namespace string) *kongs {\n\treturn &kongs{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func NewWin32LobAppRegistryRule()(*Win32LobAppRegistryRule) {\n m := &Win32LobAppRegistryRule{\n Win32LobAppRule: *NewWin32LobAppRule(),\n }\n odataTypeValue := \"#microsoft.graph.win32LobAppRegistryRule\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func NewCreateanewPbxDeviceConfigRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/pbxdeviceconfigs\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}", "func NewOAUTHKey() *OAUTHKey {\n\n\treturn &OAUTHKey{\n\t\tModelVersion: 1,\n\t}\n}", "func newPostBuildOpenAPIObjectFunc(config ServerConfig, container *restful.Container) restfulSpec.PostBuildSwaggerObjectFunc {\n\treturn func(swo *spec.Swagger) {\n\t\tswo.Host = config.OpenAPI.Host\n\t\tswo.BasePath = config.OpenAPI.BasePath\n\t\tswo.Schemes = config.OpenAPI.Schemas\n\n\t\tvar title, description string\n\t\tif config.Name != \"\" {\n\t\t\ttitle = config.Name\n\t\t} else {\n\t\t\ttitle = config.OpenAPI.Spec.Title\n\t\t}\n\t\tif config.Description != \"\" {\n\t\t\tdescription = config.Description\n\t\t} else {\n\t\t\tdescription = config.OpenAPI.Spec.Description\n\t\t}\n\t\tswo.Info = &spec.Info{\n\t\t\tInfoProps: spec.InfoProps{\n\t\t\t\tTitle: title,\n\t\t\t\tDescription: description,\n\t\t\t\tContact: &spec.ContactInfo{\n\t\t\t\t\tContactInfoProps: spec.ContactInfoProps{\n\t\t\t\t\t\tName: config.OpenAPI.Spec.Contact.Name,\n\t\t\t\t\t\tEmail: config.OpenAPI.Spec.Contact.Email,\n\t\t\t\t\t\tURL: config.OpenAPI.Spec.Contact.URL,\n\t\t\t\t\t},\n\t\t\t\t},\n\n\t\t\t\tLicense: &spec.License{\n\t\t\t\t\tLicenseProps: spec.LicenseProps{\n\t\t\t\t\t\tName: config.OpenAPI.Spec.License.Name,\n\t\t\t\t\t\tURL: config.OpenAPI.Spec.License.URL,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVersion: config.OpenAPI.Spec.Version,\n\t\t\t},\n\t\t}\n\n\t\tvar nTags []spec.Tag\n\t\tvar tags []OpenapiTagConfig\n\t\tif len(config.OpenAPI.Tags) > 0 {\n\t\t\ttags = config.OpenAPI.Tags\n\t\t} else {\n\t\t\ttags = config.OpenAPI.Tags\n\t\t}\n\t\tfor _, tag := range tags {\n\t\t\tnTag := spec.Tag{TagProps: spec.TagProps{Name: tag.Name, Description: tag.Description}}\n\n\t\t\tnTags = append(nTags, nTag)\n\t\t}\n\t\tswo.Tags = nTags\n\t\t// setup security definitions\n\t\tif config.OpenAPI.Auth == \"basic\" {\n\t\t\tswo.SecurityDefinitions = map[string]*spec.SecurityScheme{\n\t\t\t\t\"basicAuth\": spec.BasicAuth(),\n\t\t\t}\n\t\t\tauth := make(map[string][]string)\n\t\t\tauth[\"basicAuth\"] = []string{}\n\t\t\tswo.Security = append(swo.Security, auth)\n\t\t} else if config.OpenAPI.Auth == \"jwt\" {\n\t\t\tswo.SecurityDefinitions = map[string]*spec.SecurityScheme{\n\t\t\t\t\"jwt\": spec.APIKeyAuth(\"Authorization\", \"header\"),\n\t\t\t}\n\t\t\tenrichSwaggerObjectSecurity(swo, container)\n\t\t}\n\n\t}\n}", "func newConfig() (*config, error) {\n\tec2Metadata := ec2metadata.New(session.Must(session.NewSession()))\n\tregion, err := ec2Metadata.Region()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get region from ec2 metadata\")\n\t}\n\n\tinstanceID, err := ec2Metadata.GetMetadata(\"instance-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get instance id from ec2 metadata\")\n\t}\n\n\tmac, err := ec2Metadata.GetMetadata(\"mac\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get mac from ec2 metadata\")\n\t}\n\n\tsecurityGroups, err := ec2Metadata.GetMetadata(\"security-groups\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get security groups from ec2 metadata\")\n\t}\n\n\tinterfaces, err := ec2Metadata.GetMetadata(\"network/interfaces/macs\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get interfaces from ec2 metadata\")\n\t}\n\n\tsubnet, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/subnet-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get subnet from ec2 metadata\")\n\t}\n\n\tvpc, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/vpc-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get vpc from ec2 metadata\")\n\t}\n\n\treturn &config{region: region,\n\t\tsubnet: subnet,\n\t\tindex: int64(len(strings.Split(interfaces, \"\\n\"))),\n\t\tinstanceID: instanceID,\n\t\tsecurityGroups: strings.Split(securityGroups, \"\\n\"),\n\t\tvpc: vpc,\n\t}, nil\n}", "func newOutboundConn(c net.Conn, s *Server, conf *config.FeedConfig) Conn {\n\n\tsname := s.Name()\n\n\tif len(sname) == 0 {\n\t\tsname = \"nntp.anon.tld\"\n\t}\n\tstorage := s.Storage\n\tif storage == nil {\n\t\tstorage = store.NewNullStorage()\n\t}\n\treturn &v1OBConn{\n\t\tconf: conf,\n\t\tC: v1Conn{\n\t\t\thooks: s,\n\t\t\tstate: ConnState{\n\t\t\t\tFeedName: conf.Name,\n\t\t\t\tHostName: conf.Addr,\n\t\t\t\tOpen: true,\n\t\t\t},\n\t\t\tserverName: sname,\n\t\t\tstorage: storage,\n\t\t\tC: textproto.NewConn(c),\n\t\t\tconn: c,\n\t\t\thdrio: message.NewHeaderIO(),\n\t\t},\n\t}\n}", "func NewAuthenticationCombinationConfiguration()(*AuthenticationCombinationConfiguration) {\n m := &AuthenticationCombinationConfiguration{\n Entity: *NewEntity(),\n }\n return m\n}", "func (d *dexterOIDChttp) createOauth2Config() error {\n\t// setup oidc client context\n\tctx := oidc.ClientContext(context.Background(), d.httpClient)\n\n\t// populate oauth2 config\n\td.Oauth2Config.ClientID = oidcDataHTTP.clientID\n\td.Oauth2Config.ClientSecret = oidcDataHTTP.clientSecret\n\td.Oauth2Config.RedirectURL = oidcDataHTTP.callback\n\n\tswitch oidcDataHTTP.endpoint {\n\tcase \"azure\":\n\t\td.Oauth2Config.Endpoint = microsoft.AzureADEndpoint(oidcDataHTTP.azureTenant)\n\t\td.Oauth2Config.Scopes = []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess, \"email\"}\n\tcase \"google\":\n\t\td.Oauth2Config.Endpoint = google.Endpoint\n\t\td.Oauth2Config.Scopes = []string{oidc.ScopeOpenID, \"profile\", \"email\"}\n\tdefault:\n\t\t// Attempt to use endpoint as generic issuer if it is a valid URL\n\t\t_, err := url.Parse(oidcDataHTTP.endpoint)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unsupported endpoint: %s\", oidcDataHTTP.endpoint)\n\t\t}\n\n\t\t// Attempt to gather endpoint information via discovery\n\t\tgenericProvider, err := oidc.NewProvider(ctx, oidcDataHTTP.endpoint)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\td.Oauth2Config.Endpoint = genericProvider.Endpoint()\n\t\td.Oauth2Config.Scopes = []string{oidc.ScopeOpenID, \"profile\", \"email\"}\n\t}\n\n\t// Append additional specified scopes\n\td.Oauth2Config.Scopes = append(d.Oauth2Config.Scopes, d.scopes...)\n\n\treturn nil\n}", "func (o *OIDC) Init(config Config) (err error) {\n\tswitch {\n\tcase o.Type == \"\":\n\t\treturn errors.New(\"type cannot be empty\")\n\tcase o.Name == \"\":\n\t\treturn errors.New(\"name cannot be empty\")\n\tcase o.ClientID == \"\":\n\t\treturn errors.New(\"clientID cannot be empty\")\n\tcase o.ConfigurationEndpoint == \"\":\n\t\treturn errors.New(\"configurationEndpoint cannot be empty\")\n\t}\n\n\t// Validate listenAddress if given\n\tif o.ListenAddress != \"\" {\n\t\tif _, _, err := net.SplitHostPort(o.ListenAddress); err != nil {\n\t\t\treturn errors.Wrap(err, \"error parsing listenAddress\")\n\t\t}\n\t}\n\n\t// Decode and validate openid-configuration endpoint\n\tu, err := url.Parse(o.ConfigurationEndpoint)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error parsing %s\", o.ConfigurationEndpoint)\n\t}\n\tif !strings.Contains(u.Path, \"/.well-known/openid-configuration\") {\n\t\tu.Path = path.Join(u.Path, \"/.well-known/openid-configuration\")\n\t}\n\tif err := getAndDecode(u.String(), &o.configuration); err != nil {\n\t\treturn err\n\t}\n\tif err := o.configuration.Validate(); err != nil {\n\t\treturn errors.Wrapf(err, \"error parsing %s\", o.ConfigurationEndpoint)\n\t}\n\t// Replace {tenantid} with the configured one\n\tif o.TenantID != \"\" {\n\t\to.configuration.Issuer = strings.ReplaceAll(o.configuration.Issuer, \"{tenantid}\", o.TenantID)\n\t}\n\t// Get JWK key set\n\to.keyStore, err = newKeyStore(o.configuration.JWKSetURI)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.ctl, err = NewController(o, o.Claims, config, o.Options)\n\treturn\n}", "func NewBgpConfiguration()(*BgpConfiguration) {\n m := &BgpConfiguration{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func newConfig() Config {\n\treturn Config{\n\t\tDefaultContainerConfig: newDefaultContainerConfig(),\n\t\tContainersConfig: map[string]ContainerConfig{},\n\t\tExclude: []string{},\n\t}\n}", "func NewDNSProvider() (*OVHApi, error) {\n\tvalues, err := env.Get(EnvEndpoint, EnvApplicationKey, EnvApplicationSecret, EnvConsumerKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ovh: %w\", err)\n\t}\n\n\tconfig := NewDefaultConfig()\n\tconfig.APIEndpoint = values[EnvEndpoint]\n\tconfig.ApplicationKey = values[EnvApplicationKey]\n\tconfig.ApplicationSecret = values[EnvApplicationSecret]\n\tconfig.ConsumerKey = values[EnvConsumerKey]\n\n\treturn NewDNSProviderConfig(config)\n}", "func CreateLogicAppTriggerEndpointConfigurationFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewLogicAppTriggerEndpointConfiguration(), nil\n}", "func NewCreateanewPbxDeviceConfigRequest(server string, body CreateanewPbxDeviceConfigJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewPbxDeviceConfigRequestWithBody(server, \"application/json\", bodyReader)\n}", "func New(conf map[string]string) *Conf {\n\treturn &Conf{\n\t\tc: conf,\n\t}\n}", "func newExternalAuth(conf *Config) {\n\tgomniauth.SetSecurityKey(conf.Auth.Facebook.Key)\n\tgomniauth.WithProviders(\n\t\tfacebook.New(\n\t\t\tconf.Auth.Facebook.Key,\n\t\t\tconf.Auth.Facebook.Secret,\n\t\t\tconf.Auth.Facebook.URL,\n\t\t),\n\t\tgoogle.New(\n\t\t\tconf.Auth.Google.Key,\n\t\t\tconf.Auth.Google.Secret,\n\t\t\tconf.Auth.Google.URL,\n\t\t),\n\t)\n}", "func newOAPI(c container.Container) (x OAPI, err error) {\n\tx = OAPI{c: c}\n\terr = json.Unmarshal(c.Bytes(), &x.o.OpenAPI)\n\treturn x, err\n}", "func newConfig(envParams envParams) error {\n\t// Initialize server config.\n\tsrvCfg := newServerConfigV14()\n\n\t// If env is set for a fresh start, save them to config file.\n\tif globalIsEnvCreds {\n\t\tsrvCfg.SetCredential(envParams.creds)\n\t}\n\n\tif globalIsEnvBrowser {\n\t\tsrvCfg.SetBrowser(envParams.browser)\n\t}\n\n\t// Create config path.\n\tif err := createConfigDir(); err != nil {\n\t\treturn err\n\t}\n\n\t// hold the mutex lock before a new config is assigned.\n\t// Save the new config globally.\n\t// unlock the mutex.\n\tserverConfigMu.Lock()\n\tserverConfig = srvCfg\n\tserverConfigMu.Unlock()\n\n\t// Save config into file.\n\treturn serverConfig.Save()\n}", "func newConfiguration() *configuration {\n\treturn &configuration{\n\t\tUseSSL: true,\n\t\tLocation: \"us-east-1\",\n\t\tMaxBackups: 5,\n\t\tBackupPrefix: \"backup-\",\n\t}\n}", "func newConfig() *config {\n\t/*According to Liqo Agent installation process, first check if\n\tuser has defined XDG_DATA_HOME env variable. Otherwise, use the\n\tfallback directory according to XDG specifications\n\t(www.freedesktop.com)*/\n\tXDGBaseDir, present := os.LookupEnv(\"XDG_DATA_HOME\")\n\tif !present {\n\t\tXDGBaseDir = filepath.Join(os.Getenv(\"HOME\"), \".local/share\")\n\t}\n\tliqoPath := filepath.Join(XDGBaseDir, \"liqo\")\n\tif err := os.Setenv(client.EnvLiqoPath, liqoPath); err != nil {\n\t\tos.Exit(1)\n\t}\n\tconf := &config{notifyLevel: NotifyLevelMax, notifyIconPath: filepath.Join(liqoPath, \"icons\")}\n\tconf.notifyTranslateMap = make(map[NotifyLevel]string)\n\tconf.notifyTranslateReverseMap = make(map[string]NotifyLevel)\n\tconf.notifyTranslateMap[NotifyLevelOff] = NotifyLevelOffDescription\n\tconf.notifyTranslateMap[NotifyLevelMin] = NotifyLevelMinDescription\n\tconf.notifyTranslateMap[NotifyLevelMax] = NotifyLevelMaxDescription\n\tfor k, v := range conf.notifyTranslateMap {\n\t\tconf.notifyTranslateReverseMap[v] = k\n\t}\n\treturn conf\n}", "func NewOIDC(c config.OAuthConfig) (oauthConfig *oauth2.Config, oauthVerifier OIDCVerifier) {\n\tctx := context.Background()\n\tprovider, err := oidc.NewProvider(ctx, c.Provider)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\toidcConfig := &oidc.Config{\n\t\tClientID: c.ClientID,\n\t}\n\tver := provider.Verifier(oidcConfig)\n\toauthVerifier = &oidcVerifier{ver}\n\toauthConfig = &oauth2.Config{\n\t\tClientID: c.ClientID,\n\t\tClientSecret: c.ClientSecret,\n\t\tEndpoint: provider.Endpoint(),\n\t\tRedirectURL: c.RedirectURL,\n\t\tScopes: openIDScope,\n\t}\n\treturn\n}", "func genHubConfig(fname string) (cfg *hubConfig) {\n cfg = new(hubConfig)\n cfg.Lan = genLanConfig()\n cfg.Sam = genDefaultSamConfig(false)\n cfg.ExitDest = \"exit.psi.i2p\"\n return\n}", "func NewOpenAPIDiscoveryOK() *OpenAPIDiscoveryOK {\n\treturn &OpenAPIDiscoveryOK{}\n}", "func newWSCon(t *testing.T) *websocket.Conn {\n\tdialer := websocket.DefaultDialer\n\trHeader := http.Header{}\n\tcon, r, err := dialer.Dial(websocketAddr, rHeader)\n\tfmt.Println(\"response\", r)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn con\n}", "func NewGetInternalconfigOK() *GetInternalconfigOK {\n\treturn &GetInternalconfigOK{}\n}", "func (client *WebAppsClient) createOrUpdateConfigurationCreateRequest(ctx context.Context, resourceGroupName string, name string, siteConfig SiteConfigResource, options *WebAppsCreateOrUpdateConfigurationOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, siteConfig)\n}", "func New() ConfStruct {\n\tnewConfif := &ConfStruct{}\n\treturn *newConfif\n}", "func NewSpecConfig(config *protoform.Config, kubeClient *kubernetes.Clientset, opssightClient *opssightclientset.Clientset, hubClient *hubclientset.Clientset, opssight *opssightapi.OpsSight, isBlackDuckClusterScope bool, dryRun bool) *SpecConfig {\n\topssightSpec := &opssight.Spec\n\tname := opssight.Name\n\tnames := map[string]string{\n\t\t\"perceptor\": \"core\",\n\t\t\"pod-perceiver\": \"pod-processor\",\n\t\t\"image-perceiver\": \"image-processor\",\n\t\t\"artifactory-perceiver\": \"artifactory-processor\",\n\t\t\"quay-perceiver\": \"quay-processor\",\n\t\t\"scanner\": \"scanner\",\n\t\t\"perceptor-imagefacade\": \"image-getter\",\n\t\t\"skyfire\": \"skyfire\",\n\t\t\"prometheus\": \"prometheus\",\n\t\t\"configmap\": \"opssight\",\n\t\t\"perceiver-service-account\": \"processor\",\n\t}\n\tbaseImageURL := \"docker.io/blackducksoftware\"\n\tversion := \"2.2.5\"\n\timages := map[string]string{\n\t\t\"perceptor\": fmt.Sprintf(\"%s/opssight-core:%s\", baseImageURL, version),\n\t\t\"pod-perceiver\": fmt.Sprintf(\"%s/opssight-pod-processor:%s\", baseImageURL, version),\n\t\t\"image-perceiver\": fmt.Sprintf(\"%s/opssight-image-processor:%s\", baseImageURL, version),\n\t\t\"artifactory-perceiver\": fmt.Sprintf(\"%s/opssight-artifactory-processor:%s\", baseImageURL, version),\n\t\t\"quay-perceiver\": fmt.Sprintf(\"%s/opssight-quay-processor:%s\", baseImageURL, version),\n\t\t\"scanner\": fmt.Sprintf(\"%s/opssight-scanner:%s\", baseImageURL, version),\n\t\t\"perceptor-imagefacade\": fmt.Sprintf(\"%s/opssight-image-getter:%s\", baseImageURL, version),\n\t\t\"skyfire\": \"gcr.io/saas-hub-stg/blackducksoftware/pyfire:master\",\n\t\t\"prometheus\": \"docker.io/prom/prometheus:v2.1.0\",\n\t}\n\tif opssightSpec.IsUpstream {\n\t\tnames = map[string]string{\n\t\t\t\"perceptor\": \"perceptor\",\n\t\t\t\"pod-perceiver\": \"pod-perceiver\",\n\t\t\t\"image-perceiver\": \"image-perceiver\",\n\t\t\t\"artifactory-perceiver\": \"artifactory-perceiver\",\n\t\t\t\"quay-perceiver\": \"quay-perceiver\",\n\t\t\t\"scanner\": \"scanner\",\n\t\t\t\"perceptor-imagefacade\": \"image-facade\",\n\t\t\t\"skyfire\": \"skyfire\",\n\t\t\t\"prometheus\": \"prometheus\",\n\t\t\t\"configmap\": \"perceptor\",\n\t\t\t\"perceiver-service-account\": \"perceiver\",\n\t\t}\n\t\tbaseImageURL = \"gcr.io/saas-hub-stg/blackducksoftware\"\n\t\tversion = \"master\"\n\t\timages = map[string]string{\n\t\t\t\"perceptor\": fmt.Sprintf(\"%s/perceptor:%s\", baseImageURL, version),\n\t\t\t\"pod-perceiver\": fmt.Sprintf(\"%s/pod-perceiver:%s\", baseImageURL, version),\n\t\t\t\"image-perceiver\": fmt.Sprintf(\"%s/image-perceiver:%s\", baseImageURL, version),\n\t\t\t\"artifactory-perceiver\": fmt.Sprintf(\"%s/artifactory-perceiver:%s\", baseImageURL, version),\n\t\t\t\"quay-perceiver\": fmt.Sprintf(\"%s/quay-perceiver:%s\", baseImageURL, version),\n\t\t\t\"scanner\": fmt.Sprintf(\"%s/perceptor-scanner:%s\", baseImageURL, version),\n\t\t\t\"perceptor-imagefacade\": fmt.Sprintf(\"%s/perceptor-imagefacade:%s\", baseImageURL, version),\n\t\t\t\"skyfire\": \"gcr.io/saas-hub-stg/blackducksoftware/pyfire:master\",\n\t\t\t\"prometheus\": \"docker.io/prom/prometheus:v2.1.0\"}\n\t}\n\n\tfor componentName, componentImage := range images {\n\t\timage := appsutil.GenerateImageTag(componentImage, opssightSpec.ImageRegistries, opssightSpec.RegistryConfiguration)\n\t\timages[componentName] = image\n\t}\n\n\tconfigMap := &MainOpssightConfigMap{\n\t\tLogLevel: opssightSpec.LogLevel,\n\t\tBlackDuck: &BlackDuckConfig{\n\t\t\tConnectionsEnvironmentVariableName: opssightSpec.Blackduck.ConnectionsEnvironmentVariableName,\n\t\t\tTLSVerification: opssightSpec.Blackduck.TLSVerification,\n\t\t},\n\t\tImageFacade: &ImageFacadeConfig{\n\t\t\tCreateImagesOnly: false,\n\t\t\tHost: \"localhost\",\n\t\t\tPort: 3004,\n\t\t\tImagePullerType: opssightSpec.ScannerPod.ImageFacade.ImagePullerType,\n\t\t},\n\t\tPerceiver: &PerceiverConfig{\n\t\t\tCertificate: opssightSpec.Perceiver.Certificate,\n\t\t\tCertificateKey: opssightSpec.Perceiver.CertificateKey,\n\t\t\tImage: &ImagePerceiverConfig{},\n\t\t\tPod: &PodPerceiverConfig{\n\t\t\t\tNamespaceFilter: opssightSpec.Perceiver.PodPerceiver.NamespaceFilter,\n\t\t\t},\n\t\t\tArtifactory: &ArtifactoryPerceiverConfig{\n\t\t\t\tDumper: opssightSpec.Perceiver.EnableArtifactoryPerceiverDumper,\n\t\t\t},\n\t\t\tAnnotationIntervalSeconds: opssightSpec.Perceiver.AnnotationIntervalSeconds,\n\t\t\tDumpIntervalMinutes: opssightSpec.Perceiver.DumpIntervalMinutes,\n\t\t\tPort: 3002,\n\t\t},\n\t\tPerceptor: &PerceptorConfig{\n\t\t\tTimings: &PerceptorTimingsConfig{\n\t\t\t\tCheckForStalledScansPauseHours: opssightSpec.Perceptor.CheckForStalledScansPauseHours,\n\t\t\t\tClientTimeoutMilliseconds: opssightSpec.Perceptor.ClientTimeoutMilliseconds,\n\t\t\t\tModelMetricsPauseSeconds: opssightSpec.Perceptor.ModelMetricsPauseSeconds,\n\t\t\t\tStalledScanClientTimeoutHours: opssightSpec.Perceptor.StalledScanClientTimeoutHours,\n\t\t\t\tUnknownImagePauseMilliseconds: opssightSpec.Perceptor.UnknownImagePauseMilliseconds,\n\t\t\t},\n\t\t\tHost: util.GetResourceName(name, util.OpsSightName, names[\"perceptor\"]),\n\t\t\tPort: 3001,\n\t\t\tUseMockMode: false,\n\t\t},\n\t\tScanner: &ScannerConfig{\n\t\t\tBlackDuckClientTimeoutSeconds: opssightSpec.ScannerPod.Scanner.ClientTimeoutSeconds,\n\t\t\tImageDirectory: opssightSpec.ScannerPod.ImageDirectory,\n\t\t\tPort: 3003,\n\t\t},\n\t\tSkyfire: &SkyfireConfig{\n\t\t\tBlackDuckClientTimeoutSeconds: opssightSpec.Skyfire.HubClientTimeoutSeconds,\n\t\t\tBlackDuckDumpPauseSeconds: opssightSpec.Skyfire.HubDumpPauseSeconds,\n\t\t\tKubeDumpIntervalSeconds: opssightSpec.Skyfire.KubeDumpIntervalSeconds,\n\t\t\tPerceptorDumpIntervalSeconds: opssightSpec.Skyfire.PerceptorDumpIntervalSeconds,\n\t\t\tPort: 3005,\n\t\t\tPrometheusPort: 3006,\n\t\t\tUseInClusterConfig: true,\n\t\t},\n\t}\n\treturn &SpecConfig{\n\t\tconfig: config,\n\t\tkubeClient: kubeClient,\n\t\topssightClient: opssightClient,\n\t\thubClient: hubClient,\n\t\topssight: opssight,\n\t\tconfigMap: configMap,\n\t\tisBlackDuckClusterScope: isBlackDuckClusterScope,\n\t\tdryRun: dryRun,\n\t\tnames: names,\n\t\timages: images,\n\t}\n}", "func newJiraConfigMap(j *v1alpha1.Jira) error {\n\tcm := &v1.ConfigMap{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"ConfigMap\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: j.Spec.ConfigMapName,\n\t\t\tNamespace: j.Namespace,\n\t\t\tOwnerReferences: ownerRef(j),\n\t\t\tLabels: jiraLabels(j),\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"dbconfig.xml\": DefaultDatabaseConfig,\n\t\t},\n\t}\n\treturn createResource(j, cm)\n}", "func populateClientConfig(config *Config) *Config {\n\tif config == nil {\n\t\tconfig = &Config{}\n\t}\n\tversions := config.Versions\n\tif len(versions) == 0 {\n\t\tversions = protocol.SupportedVersions\n\t}\n\n\thandshakeTimeout := protocol.DefaultHandshakeTimeout\n\tif config.HandshakeTimeout != 0 {\n\t\thandshakeTimeout = config.HandshakeTimeout\n\t}\n\tidleTimeout := protocol.DefaultIdleTimeout\n\tif config.IdleTimeout != 0 {\n\t\tidleTimeout = config.IdleTimeout\n\t}\n\n\tmaxReceiveStreamFlowControlWindow := config.MaxReceiveStreamFlowControlWindow\n\tif maxReceiveStreamFlowControlWindow == 0 {\n\t\tmaxReceiveStreamFlowControlWindow = protocol.DefaultMaxReceiveStreamFlowControlWindowClient\n\t}\n\tmaxReceiveConnectionFlowControlWindow := config.MaxReceiveConnectionFlowControlWindow\n\tif maxReceiveConnectionFlowControlWindow == 0 {\n\t\tmaxReceiveConnectionFlowControlWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindowClient\n\t}\n\n\treturn &Config{\n\t\tVersions: versions,\n\t\tHandshakeTimeout: handshakeTimeout,\n\t\tIdleTimeout: idleTimeout,\n\t\tRequestConnectionIDTruncation: config.RequestConnectionIDTruncation,\n\t\tMaxReceiveStreamFlowControlWindow: maxReceiveStreamFlowControlWindow,\n\t\tMaxReceiveConnectionFlowControlWindow: maxReceiveConnectionFlowControlWindow,\n\t\tKeepAlive: config.KeepAlive,\n\t\tCacheHandshake: config.CacheHandshake,\n\t\tCreatePaths: config.CreatePaths,\n\t}\n}", "func newPeersConfigImpl() *examplePeersConfig {\n\tpConfig := verifyIsLocalPeersURLs(peersConfig)\n\tpeersConfig = pConfig\n\tp := &examplePeersConfig{}\n\treturn p\n}", "func newSSHDefaultConfig(userName, identity string) (*sshClientConfig, error) {\n\tconfig, err := sshDefaultConfig(userName, identity)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sshClientConfig{ClientConfig: config}, nil\n}", "func DefaultProtocolConfig() ProtocolConfig {\n\treturn ProtocolConfig{\n\t\tMinPoWScore: 100,\n\t\tPublicKeyRanges: []coopkg.PublicKeyRange{\n\t\t\t{\n\t\t\t\tKey: \"ed3c3f1a319ff4e909cf2771d79fece0ac9bd9fd2ee49ea6c0885c9cb3b1248c\",\n\t\t\t\tStartIndex: 0,\n\t\t\t\tEndIndex: 0,\n\t\t\t},\n\t\t\t{\n\t\t\t\tKey: \"f6752f5f46a53364e2ee9c4d662d762a81efd51010282a75cd6bd03f28ef349c\",\n\t\t\t\tStartIndex: 0,\n\t\t\t\tEndIndex: 0,\n\t\t\t},\n\t\t},\n\t\tNetworkIDName: \"alphanet1\",\n\t}\n}", "func WrapConfig(hfn http.HandlerFunc, cfg *config.APICfg, brk brokers.Broker, str stores.Store, mgr *oldPush.Manager, c push.Client) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tnStr := str.Clone()\n\t\tdefer nStr.Close()\n\t\tgorillaContext.Set(r, \"brk\", brk)\n\t\tgorillaContext.Set(r, \"str\", nStr)\n\t\tgorillaContext.Set(r, \"mgr\", mgr)\n\t\tgorillaContext.Set(r, \"apsc\", c)\n\t\tgorillaContext.Set(r, \"auth_resource\", cfg.ResAuth)\n\t\tgorillaContext.Set(r, \"auth_service_token\", cfg.ServiceToken)\n\t\tgorillaContext.Set(r, \"push_worker_token\", cfg.PushWorkerToken)\n\t\tgorillaContext.Set(r, \"push_enabled\", cfg.PushEnabled)\n\t\thfn.ServeHTTP(w, r)\n\n\t})\n}", "func newWSConn(conn *websocket.Conn) (*wsConn, error) {\n\tc := &wsConn{conn: conn}\n\treturn c, nil\n}", "func New() Config {\n\n\tconfig := &configImpl{\n\t\tcodec: make(map[string]Codec),\n\t}\n\n\tcodecs := Codecs()\n\n\tfor _, codec := range codecs {\n\t\tif codec.Name() == \"json\" {\n\t\t\tconfig.jsonCodec = codec\n\t\t}\n\t\tconfig.codec[codec.Name()] = codec\n\t}\n\n\tif config.jsonCodec == nil {\n\t\tpanic(errors.Wrap(ErrJSONCodec, \"using register function to register json codec\"))\n\t}\n\n\treturn config\n}", "func (d *NAT44GlobalDescriptor) Create(key string, globalCfg *nat.Nat44Global) (metadata interface{}, err error) {\n\tif !d.natHandler.WithLegacyStartupConf() {\n\t\terr = d.natHandler.EnableNAT44Plugin(vppcalls.Nat44InitOpts{\n\t\t\tEndpointDependent: !globalCfg.EndpointIndependent,\n\t\t})\n\t\tif err != nil {\n\t\t\td.log.Error(err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn d.Update(key, d.defaultGlobalCfg, globalCfg, nil)\n}", "func New(secretKey, issuer string, expiredIn int) It.ITokenFactory {\n\treturn &JwtConfig{\n\t\tSecretKey: secretKey,\n\t\tExpiredIn: expiredIn,\n\t\tIssuer: issuer,\n\t}\n}", "func New(c *xoauth2.Config) oauth2.Provider {\n\tc.Endpoint = xslack.Endpoint\n\tif c.Scopes == nil {\n\t\tc.Scopes = defaultScopes\n\t}\n\treturn slackProvider{config: c}\n}", "func NewConfig(data map[string]string) (settings *Config) {\n cfg := &Config{\n ConsumerKey: data[\"consumer_key\"],\n ConsumerSecret: data[\"consumer_secret\"],\n }\n\n // save access token if defined\n if atoken, ok := data[\"access_token\"]; ok {\n cfg.AccessToken = atoken\n }\n\n // save access token secret if defined\n if asecret, ok := data[\"access_secret\"]; ok {\n cfg.AccessSecret = asecret\n }\n\n // save debug flag if defined\n if debug, ok := data[\"debug\"]; ok && debug == \"on\" {\n cfg.Debug = true\n }\n\n return cfg\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tTokenType: \"Bearer\",\n\t}\n}", "func (o ServiceOutput) OpenapiConfig() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Service) pulumi.StringPtrOutput { return v.OpenapiConfig }).(pulumi.StringPtrOutput)\n}", "func New(addr []string, config *tls.Config) (register.Register, error) {\n\tkcfg := &gokong.Config{\n\t\tHostAddress: addr[0],\n\t}\n\n\treg := &kRegister{\n\t\tkAddrs: addr,\n\t\tkClient: gokong.NewClient(kcfg),\n\t}\n\treturn reg, nil\n}", "func NewIosUpdateConfiguration()(*IosUpdateConfiguration) {\n m := &IosUpdateConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.iosUpdateConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func NewNewDiscoveryOK() *NewDiscoveryOK {\n\treturn &NewDiscoveryOK{}\n}", "func NewCombinedFromConfig(other map[string]interface{}) (Provider, error) {\n\tstatus, err := NewOpenWBStatusProviderFromConfig(other)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := &combinedProvider{status: status}\n\treturn o, nil\n}", "func (l0 *l0Conn) parametersNew() (p *paramConf) {\n\tp = &paramConf{conf.New(l0.teo, &param{}), l0.teo.ev.subscribe(), l0}\n\tfmt.Printf(\"!!! Subscribed - peer connected !!!\\n\")\n\tgo func() {\n\t\t// TODO: uncomment wg using when normolise close channel during exit\n\t\tl0.teo.wg.Add(1)\n\t\tfor ev := range p.chanEvent {\n\t\t\tp.eventProcess(ev)\n\t\t}\n\t\tfmt.Printf(\"!!! Subscribed channel closed !!!\\n\")\n\t\tl0.teo.wg.Done()\n\t}()\n\treturn\n}", "func NewConfig() {\n\t appConfig = &AppConfig{}\n}", "func NewThingConfig(resp *iot.CreateCertificateFromCsrOutput) *ThingConfig {\n\treturn &ThingConfig{\n\t\tCertificateArn: *resp.CertificateArn,\n\t\tCertificateID: *resp.CertificateId,\n\t\tCertificatePem: *resp.CertificatePem,\n\t}\n}", "func New(token, siteID string, hc *http.Client) *apiConfig {\n\tvar client *pester.Client\n\n\t// If a http client is passed in, use it.\n\tif hc != nil {\n\t\tclient = pester.NewExtendedClient(hc)\n\t} else {\n\t\tclient = pester.New()\n\t}\n\n\t// client.Concurrency = 3\n\tclient.MaxRetries = 10\n\tclient.Backoff = pester.ExponentialBackoff\n\tclient.KeepLog = true\n\tclient.RetryOnHTTP429 = true\n\n\treturn &apiConfig{\n\t\tClient: client,\n\t\tToken: token,\n\t\tVersion: defaultVersion,\n\t\tBaseURL: defaultURL,\n\t\tSiteID: siteID,\n\t}\n}", "func New(content map[string]interface{}) *Config {\n\treturn &Config{\n\t\tm: content,\n\t}\n}", "func New(config Config) IDology {\n\treturn IDology{\n\t\texpectID: expectid.NewClient(expectid.Config(config)),\n\t}\n}", "func New(domain, key, webocketURL, folder string) (c *client, err error) {\n\tif strings.HasPrefix(webocketURL, \"http\") {\n\t\twebocketURL = strings.Replace(webocketURL, \"http\", \"ws\", 1)\n\t}\n\twebocketURL += \"/ws\"\n\n\tif domain == \"\" {\n\t\tdomain = namesgenerator.GetRandomName()\n\t}\n\n\tif key == \"\" {\n\t\tkey = utils.RandStringBytesMaskImpr(6)\n\t}\n\n\tif folder == \"\" {\n\t\tfolder = \".\"\n\t}\n\n\tfolder, _ = filepath.Abs(folder)\n\tfolder = filepath.ToSlash(folder)\n\n\tif _, err = os.Stat(folder); os.IsNotExist(err) {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tlog.Infof(\"connecting to %s\", webocketURL)\n\tlog.Infof(\"using domain '%s'\", domain)\n\tlog.Infof(\"using key '%s'\", key)\n\tlog.Infof(\"watching folder '%s'\", folder)\n\tpublicURL := strings.Replace(webocketURL, \"ws\", \"http\", 1)\n\tpublicURL = strings.Replace(publicURL, \"/ws\", \"/\"+domain+\"/\", 1)\n\tfmt.Printf(\"\\n\\t%s\\n\\n\", publicURL)\n\n\tc = &client{\n\t\tWebsocketURL: webocketURL,\n\t\tDomain: domain,\n\t\tKey: key,\n\t\tFolder: folder,\n\t\tfileList: make(map[string]struct{}),\n\t}\n\treturn\n}", "func NewBasicAuthentication()(*BasicAuthentication) {\n m := &BasicAuthentication{\n ApiAuthenticationConfigurationBase: *NewApiAuthenticationConfigurationBase(),\n }\n odataTypeValue := \"#microsoft.graph.basicAuthentication\";\n m.SetOdataType(&odataTypeValue);\n return m\n}" ]
[ "0.6481607", "0.5625067", "0.5399198", "0.53529614", "0.53529614", "0.519715", "0.50579363", "0.4971162", "0.4905817", "0.48680037", "0.4852645", "0.48009828", "0.47957188", "0.47848693", "0.47283456", "0.47203878", "0.47152308", "0.4688265", "0.46798852", "0.46703023", "0.46603233", "0.4651478", "0.46479326", "0.4595405", "0.45615315", "0.45535958", "0.45515007", "0.4551453", "0.45487857", "0.4534631", "0.45343724", "0.453127", "0.4516594", "0.44821975", "0.4482187", "0.44795153", "0.44715267", "0.44683003", "0.4461263", "0.4443991", "0.44353262", "0.44308168", "0.44270992", "0.4425333", "0.44222027", "0.4409459", "0.43919855", "0.4372845", "0.43705207", "0.43694898", "0.43692976", "0.435866", "0.43562156", "0.43555042", "0.4352328", "0.43435323", "0.4316317", "0.4294926", "0.42922032", "0.42917746", "0.42887774", "0.42791444", "0.4261695", "0.42585164", "0.4258378", "0.42531896", "0.424763", "0.42470658", "0.42465058", "0.42430326", "0.42405605", "0.4239182", "0.42384592", "0.42370743", "0.42309886", "0.42234752", "0.42165995", "0.42043796", "0.41966352", "0.41965774", "0.41965717", "0.41953838", "0.41951334", "0.4192846", "0.41926518", "0.41862553", "0.41787916", "0.41775042", "0.41766158", "0.41724578", "0.416876", "0.4162198", "0.41578", "0.4154604", "0.41534865", "0.41504017", "0.414804", "0.41445205", "0.4141159", "0.41383892" ]
0.7796182
0
Copy the values of the OAuth2WellKnownConfiguration and return it as a new struct.
func (opts OAuth2WellKnownConfiguration) Copy() (optsCopy OAuth2WellKnownConfiguration) { optsCopy = OAuth2WellKnownConfiguration{ CommonDiscoveryOptions: opts.CommonDiscoveryOptions, OAuth2DiscoveryOptions: opts.OAuth2DiscoveryOptions, } if opts.OAuth2DeviceAuthorizationGrantDiscoveryOptions != nil { optsCopy.OAuth2DeviceAuthorizationGrantDiscoveryOptions = &OAuth2DeviceAuthorizationGrantDiscoveryOptions{} *optsCopy.OAuth2DeviceAuthorizationGrantDiscoveryOptions = *opts.OAuth2DeviceAuthorizationGrantDiscoveryOptions } if opts.OAuth2MutualTLSClientAuthenticationDiscoveryOptions != nil { optsCopy.OAuth2MutualTLSClientAuthenticationDiscoveryOptions = &OAuth2MutualTLSClientAuthenticationDiscoveryOptions{} *optsCopy.OAuth2MutualTLSClientAuthenticationDiscoveryOptions = *opts.OAuth2MutualTLSClientAuthenticationDiscoveryOptions } if opts.OAuth2IssuerIdentificationDiscoveryOptions != nil { optsCopy.OAuth2IssuerIdentificationDiscoveryOptions = &OAuth2IssuerIdentificationDiscoveryOptions{} *optsCopy.OAuth2IssuerIdentificationDiscoveryOptions = *opts.OAuth2IssuerIdentificationDiscoveryOptions } if opts.OAuth2JWTIntrospectionResponseDiscoveryOptions != nil { optsCopy.OAuth2JWTIntrospectionResponseDiscoveryOptions = &OAuth2JWTIntrospectionResponseDiscoveryOptions{} *optsCopy.OAuth2JWTIntrospectionResponseDiscoveryOptions = *opts.OAuth2JWTIntrospectionResponseDiscoveryOptions } if opts.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions != nil { optsCopy.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions = &OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions{} *optsCopy.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions = *opts.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions } if opts.OAuth2PushedAuthorizationDiscoveryOptions != nil { optsCopy.OAuth2PushedAuthorizationDiscoveryOptions = &OAuth2PushedAuthorizationDiscoveryOptions{} *optsCopy.OAuth2PushedAuthorizationDiscoveryOptions = *opts.OAuth2PushedAuthorizationDiscoveryOptions } return optsCopy }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (opts OpenIDConnectWellKnownConfiguration) Copy() (optsCopy OpenIDConnectWellKnownConfiguration) {\n\toptsCopy = OpenIDConnectWellKnownConfiguration{\n\t\tOAuth2WellKnownConfiguration: opts.OAuth2WellKnownConfiguration.Copy(),\n\t\tOpenIDConnectDiscoveryOptions: opts.OpenIDConnectDiscoveryOptions,\n\t}\n\n\tif opts.OpenIDConnectFrontChannelLogoutDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectFrontChannelLogoutDiscoveryOptions = &OpenIDConnectFrontChannelLogoutDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectFrontChannelLogoutDiscoveryOptions = *opts.OpenIDConnectFrontChannelLogoutDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectBackChannelLogoutDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectBackChannelLogoutDiscoveryOptions = &OpenIDConnectBackChannelLogoutDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectBackChannelLogoutDiscoveryOptions = *opts.OpenIDConnectBackChannelLogoutDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectSessionManagementDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectSessionManagementDiscoveryOptions = &OpenIDConnectSessionManagementDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectSessionManagementDiscoveryOptions = *opts.OpenIDConnectSessionManagementDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectRPInitiatedLogoutDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectRPInitiatedLogoutDiscoveryOptions = &OpenIDConnectRPInitiatedLogoutDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectRPInitiatedLogoutDiscoveryOptions = *opts.OpenIDConnectRPInitiatedLogoutDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectPromptCreateDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectPromptCreateDiscoveryOptions = &OpenIDConnectPromptCreateDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectPromptCreateDiscoveryOptions = *opts.OpenIDConnectPromptCreateDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions = &OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions = *opts.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions\n\t}\n\n\tif opts.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions = &OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions{}\n\t\t*optsCopy.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions = *opts.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions\n\t}\n\n\tif opts.OpenIDFederationDiscoveryOptions != nil {\n\t\toptsCopy.OpenIDFederationDiscoveryOptions = &OpenIDFederationDiscoveryOptions{}\n\t\t*optsCopy.OpenIDFederationDiscoveryOptions = *opts.OpenIDFederationDiscoveryOptions\n\t}\n\n\treturn optsCopy\n}", "func NewOpenIDConnectWellKnownConfiguration(c *schema.IdentityProvidersOpenIDConnect) (config OpenIDConnectWellKnownConfiguration) {\n\tconfig = OpenIDConnectWellKnownConfiguration{\n\t\tOAuth2WellKnownConfiguration: OAuth2WellKnownConfiguration{\n\t\t\tCommonDiscoveryOptions: CommonDiscoveryOptions{\n\t\t\t\tSubjectTypesSupported: []string{\n\t\t\t\t\tSubjectTypePublic,\n\t\t\t\t\tSubjectTypePairwise,\n\t\t\t\t},\n\t\t\t\tResponseTypesSupported: []string{\n\t\t\t\t\tResponseTypeAuthorizationCodeFlow,\n\t\t\t\t\tResponseTypeImplicitFlowIDToken,\n\t\t\t\t\tResponseTypeImplicitFlowToken,\n\t\t\t\t\tResponseTypeImplicitFlowBoth,\n\t\t\t\t\tResponseTypeHybridFlowIDToken,\n\t\t\t\t\tResponseTypeHybridFlowToken,\n\t\t\t\t\tResponseTypeHybridFlowBoth,\n\t\t\t\t},\n\t\t\t\tGrantTypesSupported: []string{\n\t\t\t\t\tGrantTypeAuthorizationCode,\n\t\t\t\t\tGrantTypeImplicit,\n\t\t\t\t\tGrantTypeClientCredentials,\n\t\t\t\t\tGrantTypeRefreshToken,\n\t\t\t\t},\n\t\t\t\tResponseModesSupported: []string{\n\t\t\t\t\tResponseModeFormPost,\n\t\t\t\t\tResponseModeQuery,\n\t\t\t\t\tResponseModeFragment,\n\t\t\t\t\tResponseModeJWT,\n\t\t\t\t\tResponseModeFormPostJWT,\n\t\t\t\t\tResponseModeQueryJWT,\n\t\t\t\t\tResponseModeFragmentJWT,\n\t\t\t\t},\n\t\t\t\tScopesSupported: []string{\n\t\t\t\t\tScopeOfflineAccess,\n\t\t\t\t\tScopeOpenID,\n\t\t\t\t\tScopeProfile,\n\t\t\t\t\tScopeGroups,\n\t\t\t\t\tScopeEmail,\n\t\t\t\t},\n\t\t\t\tClaimsSupported: []string{\n\t\t\t\t\tClaimAuthenticationMethodsReference,\n\t\t\t\t\tClaimAudience,\n\t\t\t\t\tClaimAuthorizedParty,\n\t\t\t\t\tClaimClientIdentifier,\n\t\t\t\t\tClaimExpirationTime,\n\t\t\t\t\tClaimIssuedAt,\n\t\t\t\t\tClaimIssuer,\n\t\t\t\t\tClaimJWTID,\n\t\t\t\t\tClaimRequestedAt,\n\t\t\t\t\tClaimSubject,\n\t\t\t\t\tClaimAuthenticationTime,\n\t\t\t\t\tClaimNonce,\n\t\t\t\t\tClaimPreferredEmail,\n\t\t\t\t\tClaimEmailVerified,\n\t\t\t\t\tClaimEmailAlts,\n\t\t\t\t\tClaimGroups,\n\t\t\t\t\tClaimPreferredUsername,\n\t\t\t\t\tClaimFullName,\n\t\t\t\t},\n\t\t\t\tTokenEndpointAuthMethodsSupported: []string{\n\t\t\t\t\tClientAuthMethodClientSecretBasic,\n\t\t\t\t\tClientAuthMethodClientSecretPost,\n\t\t\t\t\tClientAuthMethodClientSecretJWT,\n\t\t\t\t\tClientAuthMethodPrivateKeyJWT,\n\t\t\t\t\tClientAuthMethodNone,\n\t\t\t\t},\n\t\t\t\tTokenEndpointAuthSigningAlgValuesSupported: []string{\n\t\t\t\t\tSigningAlgHMACUsingSHA256,\n\t\t\t\t\tSigningAlgHMACUsingSHA384,\n\t\t\t\t\tSigningAlgHMACUsingSHA512,\n\t\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\t\tSigningAlgRSAUsingSHA384,\n\t\t\t\t\tSigningAlgRSAUsingSHA512,\n\t\t\t\t\tSigningAlgECDSAUsingP256AndSHA256,\n\t\t\t\t\tSigningAlgECDSAUsingP384AndSHA384,\n\t\t\t\t\tSigningAlgECDSAUsingP521AndSHA512,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA256,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA384,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA512,\n\t\t\t\t},\n\t\t\t},\n\t\t\tOAuth2DiscoveryOptions: OAuth2DiscoveryOptions{\n\t\t\t\tCodeChallengeMethodsSupported: []string{\n\t\t\t\t\tPKCEChallengeMethodSHA256,\n\t\t\t\t},\n\t\t\t\tRevocationEndpointAuthMethodsSupported: []string{\n\t\t\t\t\tClientAuthMethodClientSecretBasic,\n\t\t\t\t\tClientAuthMethodClientSecretPost,\n\t\t\t\t\tClientAuthMethodClientSecretJWT,\n\t\t\t\t\tClientAuthMethodPrivateKeyJWT,\n\t\t\t\t\tClientAuthMethodNone,\n\t\t\t\t},\n\t\t\t\tRevocationEndpointAuthSigningAlgValuesSupported: []string{\n\t\t\t\t\tSigningAlgHMACUsingSHA256,\n\t\t\t\t\tSigningAlgHMACUsingSHA384,\n\t\t\t\t\tSigningAlgHMACUsingSHA512,\n\t\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\t\tSigningAlgRSAUsingSHA384,\n\t\t\t\t\tSigningAlgRSAUsingSHA512,\n\t\t\t\t\tSigningAlgECDSAUsingP256AndSHA256,\n\t\t\t\t\tSigningAlgECDSAUsingP384AndSHA384,\n\t\t\t\t\tSigningAlgECDSAUsingP521AndSHA512,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA256,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA384,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA512,\n\t\t\t\t},\n\t\t\t\tIntrospectionEndpointAuthMethodsSupported: []string{\n\t\t\t\t\tClientAuthMethodClientSecretBasic,\n\t\t\t\t\tClientAuthMethodNone,\n\t\t\t\t},\n\t\t\t},\n\t\t\tOAuth2JWTIntrospectionResponseDiscoveryOptions: &OAuth2JWTIntrospectionResponseDiscoveryOptions{\n\t\t\t\tIntrospectionSigningAlgValuesSupported: []string{\n\t\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\t\tSigningAlgNone,\n\t\t\t\t},\n\t\t\t},\n\t\t\tOAuth2PushedAuthorizationDiscoveryOptions: &OAuth2PushedAuthorizationDiscoveryOptions{\n\t\t\t\tRequirePushedAuthorizationRequests: c.PAR.Enforce,\n\t\t\t},\n\t\t\tOAuth2IssuerIdentificationDiscoveryOptions: &OAuth2IssuerIdentificationDiscoveryOptions{\n\t\t\t\tAuthorizationResponseIssuerParameterSupported: true,\n\t\t\t},\n\t\t},\n\n\t\tOpenIDConnectDiscoveryOptions: OpenIDConnectDiscoveryOptions{\n\t\t\tIDTokenSigningAlgValuesSupported: []string{\n\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\tSigningAlgNone,\n\t\t\t},\n\t\t\tUserinfoSigningAlgValuesSupported: []string{\n\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\tSigningAlgNone,\n\t\t\t},\n\t\t\tRequestObjectSigningAlgValuesSupported: []string{\n\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\tSigningAlgRSAUsingSHA384,\n\t\t\t\tSigningAlgRSAUsingSHA512,\n\t\t\t\tSigningAlgECDSAUsingP256AndSHA256,\n\t\t\t\tSigningAlgECDSAUsingP384AndSHA384,\n\t\t\t\tSigningAlgECDSAUsingP521AndSHA512,\n\t\t\t\tSigningAlgRSAPSSUsingSHA256,\n\t\t\t\tSigningAlgRSAPSSUsingSHA384,\n\t\t\t\tSigningAlgRSAPSSUsingSHA512,\n\t\t\t\tSigningAlgNone,\n\t\t\t},\n\t\t},\n\t\tOpenIDConnectPromptCreateDiscoveryOptions: &OpenIDConnectPromptCreateDiscoveryOptions{\n\t\t\tPromptValuesSupported: []string{\n\t\t\t\tPromptNone,\n\t\t\t\tPromptConsent,\n\t\t\t},\n\t\t},\n\t\tOpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions: &OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions{\n\t\t\tAuthorizationSigningAlgValuesSupported: []string{\n\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, alg := range c.Discovery.ResponseObjectSigningAlgs {\n\t\tif !utils.IsStringInSlice(alg, config.IDTokenSigningAlgValuesSupported) {\n\t\t\tconfig.IDTokenSigningAlgValuesSupported = append(config.IDTokenSigningAlgValuesSupported, alg)\n\t\t}\n\n\t\tif !utils.IsStringInSlice(alg, config.UserinfoSigningAlgValuesSupported) {\n\t\t\tconfig.UserinfoSigningAlgValuesSupported = append(config.UserinfoSigningAlgValuesSupported, alg)\n\t\t}\n\n\t\tif !utils.IsStringInSlice(alg, config.IntrospectionSigningAlgValuesSupported) {\n\t\t\tconfig.IntrospectionSigningAlgValuesSupported = append(config.IntrospectionSigningAlgValuesSupported, alg)\n\t\t}\n\n\t\tif !utils.IsStringInSlice(alg, config.AuthorizationSigningAlgValuesSupported) {\n\t\t\tconfig.AuthorizationSigningAlgValuesSupported = append(config.AuthorizationSigningAlgValuesSupported, alg)\n\t\t}\n\t}\n\n\tsort.Sort(SortedSigningAlgs(config.IDTokenSigningAlgValuesSupported))\n\tsort.Sort(SortedSigningAlgs(config.UserinfoSigningAlgValuesSupported))\n\tsort.Sort(SortedSigningAlgs(config.IntrospectionSigningAlgValuesSupported))\n\tsort.Sort(SortedSigningAlgs(config.AuthorizationSigningAlgValuesSupported))\n\n\tif c.EnablePKCEPlainChallenge {\n\t\tconfig.CodeChallengeMethodsSupported = append(config.CodeChallengeMethodsSupported, PKCEChallengeMethodPlain)\n\t}\n\n\treturn config\n}", "func getOauthConfig(m configmap.Mapper) *oauth2.Config {\n\t// If not impersonating, use standard scopes\n\tif impersonate, _ := m.Get(\"impersonate\"); impersonate == \"\" {\n\t\treturn dropboxConfig\n\t}\n\t// Make a copy of the config\n\tconfig := *dropboxConfig\n\t// Make a copy of the scopes with extra scopes requires appended\n\tconfig.Scopes = append(config.Scopes, \"members.read\", \"team_data.member\")\n\treturn &config\n}", "func (r *Routes) configureWellKnown(healthFunc func() bool) {\n\twellKnown := r.Group(\"/.well-known\")\n\t{\n\t\twellKnown.GET(\"/schema-discovery\", func(ctx *gin.Context) {\n\t\t\tdiscovery := struct {\n\t\t\t\tSchemaURL string `json:\"schema_url\"`\n\t\t\t\tSchemaType string `json:\"schema_type\"`\n\t\t\t\tUIURL string `json:\"ui_url\"`\n\t\t\t}{\n\t\t\t\tSchemaURL: \"/swagger.json\",\n\t\t\t\tSchemaType: \"swagger-2.0\",\n\t\t\t}\n\t\t\tctx.JSON(http.StatusOK, &discovery)\n\t\t})\n\t\twellKnown.GET(\"/health\", healthHandler(healthFunc))\n\t}\n\n\tr.GET(\"/swagger.json\", func(ctx *gin.Context) {\n\t\tctx.String(http.StatusOK, string(SwaggerJSON))\n\t})\n}", "func (r *Routes) configureWellKnown(healthFunc func() bool) {\n\twellKnown := r.Group(\"/.well-known\")\n\t{\n\t\twellKnown.GET(\"/schema-discovery\", func(ctx *gin.Context) {\n\t\t\tdiscovery := struct {\n\t\t\t\tSchemaURL string `json:\"schema_url\"`\n\t\t\t\tSchemaType string `json:\"schema_type\"`\n\t\t\t\tUIURL string `json:\"ui_url\"`\n\t\t\t}{\n\t\t\t\tSchemaURL: \"/swagger.json\",\n\t\t\t\tSchemaType: \"swagger-2.0\",\n\t\t\t}\n\t\t\tctx.JSON(http.StatusOK, &discovery)\n\t\t})\n\t\twellKnown.GET(\"/health\", healthHandler(healthFunc))\n\t}\n\n\tr.GET(\"/swagger.json\", func(ctx *gin.Context) {\n\t\tctx.String(http.StatusOK, string(SwaggerJSON))\n\t})\n}", "func (p Provider) OAuth2Config(c *Client) (cfg oauth2.Config) {\n\tcfg = oauth2.Config{\n\t\tClientID: c.ID,\n\t\tClientSecret: c.Secret,\n\t\tEndpoint: p.Endpoint(),\n\t}\n\n\tif len(c.RedirectURIs) > 0 {\n\t\tcfg.RedirectURL = c.RedirectURIs[0]\n\t}\n\n\treturn cfg\n}", "func (o *OAuth2) Config() *wx.AppConfig {\n\treturn o.config\n}", "func (c *config) newConfig(redirect string) *oauth2.Config {\n\treturn &oauth2.Config{\n\t\tClientID: c.Client,\n\t\tClientSecret: c.Secret,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: fmt.Sprintf(\"%s/site/oauth2/authorize\", c.URL),\n\t\t\tTokenURL: fmt.Sprintf(\"%s/site/oauth2/access_token\", c.URL),\n\t\t},\n\t\tRedirectURL: fmt.Sprintf(\"%s/authorize\", redirect),\n\t}\n}", "func (o InstanceOutput) OauthConfig() InstanceOauthConfigPtrOutput {\n\treturn o.ApplyT(func(v *Instance) InstanceOauthConfigPtrOutput { return v.OauthConfig }).(InstanceOauthConfigPtrOutput)\n}", "func Oauth2Config(v *viper.Viper) *oauth2.Config {\n\treturn &oauth2.Config{\n\t\tClientID: v.GetString(\"oauth_clientID\"), //\"541640626027-75fep8r1ptdd377l73qhb2f03pckc0po.apps.googleusercontent.com\",\n\t\tClientSecret: v.GetString(\"oauth_client_secret\"), //\"7Qbre2sDMxnPHZwa_6DASz4j\",\n\t\tEndpoint: google.Endpoint,\n\t\tScopes: []string{\n\t\t\turlshortener.UrlshortenerScope,\n\t\t\tgmail.GmailSendScope,\n\t\t\t\"openid\",\n\t\t\t\"profile\",\n\t\t\t\"email\",\n\t\t},\n\t\tRedirectURL: v.GetString(\"oauth_redirect_url\"), //\"http://localhost:1323/token\",\n\t}\n}", "func Unmarshal(conf []byte) (*ServerConfig, error) {\n\tres := &ServerConfig{}\n\tif err := json.Unmarshal(conf, res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func oauthConfig(config cfg.Config) (oauth1.Config, error) {\n\tpvtKeyPath := config.GetConfigString(\"jira-private-key-path\")\n\n\tpvtKeyFile, err := os.Open(pvtKeyPath)\n\tif err != nil {\n\t\treturn oauth1.Config{}, fmt.Errorf(\"unable to open private key file for reading: %v\", err)\n\t}\n\n\tpvtKey, err := ioutil.ReadAll(pvtKeyFile)\n\tif err != nil {\n\t\treturn oauth1.Config{}, fmt.Errorf(\"unable to read contents of private key file: %v\", err)\n\t}\n\n\tkeyDERBlock, _ := pem.Decode(pvtKey)\n\tif keyDERBlock == nil {\n\t\treturn oauth1.Config{}, errors.New(\"unable to decode private key PEM block\")\n\t}\n\tif keyDERBlock.Type != \"PRIVATE KEY\" && !strings.HasSuffix(keyDERBlock.Type, \" PRIVATE KEY\") {\n\t\treturn oauth1.Config{}, fmt.Errorf(\"unexpected private key DER block type: %s\", keyDERBlock.Type)\n\t}\n\n\tkey, err := x509.ParsePKCS1PrivateKey(keyDERBlock.Bytes)\n\tif err != nil {\n\t\treturn oauth1.Config{}, fmt.Errorf(\"unable to parse PKCS1 private key: %v\", err)\n\t}\n\n\turi := config.GetConfigString(\"jira-uri\")\n\n\treturn oauth1.Config{\n\t\tConsumerKey: config.GetConfigString(\"jira-consumer-key\"),\n\t\tCallbackURL: \"oob\",\n\t\tEndpoint: oauth1.Endpoint{\n\t\t\tRequestTokenURL: fmt.Sprintf(\"%splugins/servlet/oauth/request-token\", uri),\n\t\t\tAuthorizeURL: fmt.Sprintf(\"%splugins/servlet/oauth/authorize\", uri),\n\t\t\tAccessTokenURL: fmt.Sprintf(\"%splugins/servlet/oauth/access-token\", uri),\n\t\t},\n\t\tSigner: &oauth1.RSASigner{\n\t\t\tPrivateKey: key,\n\t\t},\n\t}, nil\n}", "func (cfg *appConfig) copy() appConfig { return *cfg }", "func ObserveAuthMetadata(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) {\n\tlisters := genericListers.(configobservation.Listers)\n\terrs := []error{}\n\tprevObservedConfig := map[string]interface{}{}\n\n\ttopLevelMetadataFilePath := []string{\"authConfig\", \"oauthMetadataFile\"}\n\tcurrentMetadataFilePath, _, err := unstructured.NestedString(existingConfig, topLevelMetadataFilePath...)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\tif len(currentMetadataFilePath) > 0 {\n\t\tif err := unstructured.SetNestedField(prevObservedConfig, currentMetadataFilePath, topLevelMetadataFilePath...); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\tobservedConfig := map[string]interface{}{}\n\tauthConfigNoDefaults, err := listers.AuthConfigLister.Get(\"cluster\")\n\tif errors.IsNotFound(err) {\n\t\tklog.Warningf(\"authentications.config.openshift.io/cluster: not found\")\n\t\treturn observedConfig, errs\n\t}\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\treturn prevObservedConfig, errs\n\t}\n\n\tauthConfig := defaultAuthConfig(authConfigNoDefaults)\n\n\tvar (\n\t\tsourceNamespace string\n\t\tsourceConfigMap string\n\t\tstatusConfigMap string\n\t)\n\n\tspecConfigMap := authConfig.Spec.OAuthMetadata.Name\n\n\t// TODO: Add a case here for the KeyCloak type.\n\tswitch {\n\tcase len(authConfig.Status.IntegratedOAuthMetadata.Name) > 0 && authConfig.Spec.Type == configv1.AuthenticationTypeIntegratedOAuth:\n\t\tstatusConfigMap = authConfig.Status.IntegratedOAuthMetadata.Name\n\tdefault:\n\t\tklog.V(5).Infof(\"no integrated oauth metadata configmap observed from status\")\n\t}\n\n\t// Spec configMap takes precedence over Status.\n\tswitch {\n\tcase len(specConfigMap) > 0:\n\t\tsourceConfigMap = specConfigMap\n\t\tsourceNamespace = configNamespace\n\tcase len(statusConfigMap) > 0:\n\t\tsourceConfigMap = statusConfigMap\n\t\tsourceNamespace = managedNamespace\n\tdefault:\n\t\tklog.V(5).Infof(\"no authentication config metadata specified\")\n\t}\n\n\t// Sync the user or status-specified configMap to the well-known resting place that corresponds to the oauthMetadataFile path.\n\t// If neither are set, this updates the destination with an empty source, which deletes the destination resource.\n\terr = listers.ResourceSyncer().SyncConfigMap(\n\t\tresourcesynccontroller.ResourceLocation{\n\t\t\tNamespace: targetNamespaceName,\n\t\t\tName: \"oauth-metadata\",\n\t\t},\n\t\tresourcesynccontroller.ResourceLocation{\n\t\t\tNamespace: sourceNamespace,\n\t\t\tName: sourceConfigMap,\n\t\t},\n\t)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\treturn prevObservedConfig, errs\n\t}\n\n\t// Unsets oauthMetadataFile if we had an empty source.\n\tif len(sourceConfigMap) == 0 {\n\t\treturn observedConfig, errs\n\t}\n\n\t// Set oauthMetadataFile.\n\tif err := unstructured.SetNestedField(observedConfig, oauthMetadataFilePath, topLevelMetadataFilePath...); err != nil {\n\t\trecorder.Eventf(\"ObserveAuthMetadataConfigMap\", \"Failed setting oauthMetadataFile: %v\", err)\n\t\terrs = append(errs, err)\n\t}\n\n\treturn observedConfig, errs\n}", "func (o *SyntheticsPrivateLocationCreationResponse) GetConfig() interface{} {\n\tif o == nil || o.Config == nil {\n\t\tvar ret interface{}\n\t\treturn ret\n\t}\n\treturn o.Config\n}", "func (conf *ThrapConfig) Clone() *ThrapConfig {\n\tif conf == nil {\n\t\treturn nil\n\t}\n\n\tc := &ThrapConfig{\n\t\tVCS: make(map[string]*VCSConfig, len(conf.VCS)),\n\t\tOrchestrator: make(map[string]*OrchestratorConfig, len(conf.Orchestrator)),\n\t\tRegistry: make(map[string]*RegistryConfig, len(conf.Registry)),\n\t\tSecrets: make(map[string]*SecretsConfig, len(conf.Secrets)),\n\t}\n\n\tfor k, v := range conf.VCS {\n\t\tc.VCS[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Orchestrator {\n\t\tc.Orchestrator[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Registry {\n\t\tc.Registry[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Secrets {\n\t\tc.Secrets[k] = v.Clone()\n\t}\n\n\treturn conf\n}", "func (s *store) parseConfig(raw json.RawMessage) (*config, error) {\n\tc := &config{}\n\tif err := sidecred.UnmarshalConfig(raw, &c); err != nil {\n\t\treturn nil, err\n\t}\n\tif c.SecretTemplate == \"\" {\n\t\tc.SecretTemplate = s.secretTemplate\n\t}\n\treturn c, nil\n}", "func WrapConfig(hfn http.HandlerFunc, cfg *config.APICfg, brk brokers.Broker, str stores.Store, mgr *oldPush.Manager, c push.Client) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tnStr := str.Clone()\n\t\tdefer nStr.Close()\n\t\tgorillaContext.Set(r, \"brk\", brk)\n\t\tgorillaContext.Set(r, \"str\", nStr)\n\t\tgorillaContext.Set(r, \"mgr\", mgr)\n\t\tgorillaContext.Set(r, \"apsc\", c)\n\t\tgorillaContext.Set(r, \"auth_resource\", cfg.ResAuth)\n\t\tgorillaContext.Set(r, \"auth_service_token\", cfg.ServiceToken)\n\t\tgorillaContext.Set(r, \"push_worker_token\", cfg.PushWorkerToken)\n\t\tgorillaContext.Set(r, \"push_enabled\", cfg.PushEnabled)\n\t\thfn.ServeHTTP(w, r)\n\n\t})\n}", "func populateConfig(p *ProviderConfig, credentials aws.Credentials) cty.Value {\n\tmerged := make(map[string]cty.Value)\n\tmerged[\"region\"] = cty.StringVal(p.Spec.Region)\n\tmerged[\"access_key\"] = cty.StringVal(credentials.AccessKeyID)\n\tmerged[\"secret_key\"] = cty.StringVal(credentials.SecretAccessKey)\n\tmerged[\"assume_role\"] = cty.ListValEmpty(assumeRoleObjectType())\n\tmerged[\"ignore_tags\"] = cty.ListValEmpty(ignoreTagsObjectType())\n\tmerged[\"endpoints\"] = cty.SetValEmpty(endpointSetElementType())\n\n\tmerged[\"token\"] = cty.NullVal(cty.String)\n\tmerged[\"allowed_account_ids\"] = cty.SetValEmpty(cty.String)\n\tmerged[\"forbidden_account_ids\"] = cty.SetValEmpty(cty.String)\n\tmerged[\"insecure\"] = cty.NullVal(cty.Bool)\n\tmerged[\"max_retries\"] = cty.NullVal(cty.Number)\n\tmerged[\"profile\"] = cty.NullVal(cty.String)\n\tmerged[\"s3_force_path_style\"] = cty.NullVal(cty.Bool)\n\tmerged[\"shared_credentials_file\"] = cty.NullVal(cty.String)\n\tmerged[\"skip_credentials_validation\"] = cty.NullVal(cty.Bool)\n\tmerged[\"skip_get_ec2_platforms\"] = cty.NullVal(cty.Bool)\n\tmerged[\"skip_metadata_api_check\"] = cty.NullVal(cty.Bool)\n\tmerged[\"skip_region_validation\"] = cty.NullVal(cty.Bool)\n\tmerged[\"skip_requesting_account_id\"] = cty.NullVal(cty.Bool)\n\tmerged[\"token\"] = cty.NullVal(cty.String)\n\n\treturn cty.ObjectVal(merged)\n}", "func (o *Config) Clone() *Config {\n\tvar ret Config\n\tif err := json.Unmarshal([]byte(o.ToJSON()), &ret); err != nil {\n\t\tpanic(err)\n\t}\n\treturn &ret\n}", "func NewDeviceEnrollmentWindowsHelloForBusinessConfiguration()(*DeviceEnrollmentWindowsHelloForBusinessConfiguration) {\n m := &DeviceEnrollmentWindowsHelloForBusinessConfiguration{\n DeviceEnrollmentConfiguration: *NewDeviceEnrollmentConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.deviceEnrollmentWindowsHelloForBusinessConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func (c ConfChangeV2) AsV2() ConfChangeV2 { return c }", "func NewCombinedFromConfig(other map[string]interface{}) (Provider, error) {\n\tstatus, err := NewOpenWBStatusProviderFromConfig(other)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := &combinedProvider{status: status}\n\treturn o, nil\n}", "func (o *Config) Copy(s Config) {\n o.Enable = s.Enable\n o.RouterId = s.RouterId\n o.AsNumber = s.AsNumber\n o.BfdProfile = s.BfdProfile\n o.RejectDefaultRoute = s.RejectDefaultRoute\n o.InstallRoute = s.InstallRoute\n o.AggregateMed = s.AggregateMed\n o.DefaultLocalPreference = s.DefaultLocalPreference\n o.AsFormat = s.AsFormat\n o.AlwaysCompareMed = s.AlwaysCompareMed\n o.DeterministicMedComparison = s.DeterministicMedComparison\n o.EcmpMultiAs = s.EcmpMultiAs\n o.EnforceFirstAs = s.EnforceFirstAs\n o.EnableGracefulRestart = s.EnableGracefulRestart\n o.StaleRouteTime = s.StaleRouteTime\n o.LocalRestartTime = s.LocalRestartTime\n o.MaxPeerRestartTime = s.MaxPeerRestartTime\n o.ReflectorClusterId = s.ReflectorClusterId\n o.ConfederationMemberAs = s.ConfederationMemberAs\n o.AllowRedistributeDefaultRoute = s.AllowRedistributeDefaultRoute\n}", "func NewThingConfig(resp *iot.CreateCertificateFromCsrOutput) *ThingConfig {\n\treturn &ThingConfig{\n\t\tCertificateArn: *resp.CertificateArn,\n\t\tCertificateID: *resp.CertificateId,\n\t\tCertificatePem: *resp.CertificatePem,\n\t}\n}", "func ConfigClone(c *tls.Config,) *tls.Config", "func NewConfig() *Config {\n\treturn &Config{\n\t\tTokenType: \"Bearer\",\n\t\tAllowedResponseTypes: []oauth2.ResponseType{oauth2.Code, oauth2.Token},\n\t\tAllowedGrantTypes: []oauth2.GrantType{\n\t\t\toauth2.AuthorizationCode,\n\t\t\toauth2.PasswordCredentials,\n\t\t\toauth2.ClientCredentials,\n\t\t\toauth2.Refreshing,\n\t\t},\n\t}\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tTokenType: \"Bearer\",\n\t\tAllowedResponseTypes: []oauth2.ResponseType{oauth2.Code, oauth2.Token},\n\t\tAllowedGrantTypes: []oauth2.GrantType{\n\t\t\toauth2.AuthorizationCode,\n\t\t\toauth2.PasswordCredentials,\n\t\t\toauth2.ClientCredentials,\n\t\t\toauth2.Refreshing,\n\t\t},\n\t}\n}", "func ConvertRawExtention2AppConfig(raw runtime.RawExtension) (*v1alpha2.ApplicationConfiguration, error) {\n\tac := &v1alpha2.ApplicationConfiguration{}\n\tb, err := raw.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := json.Unmarshal(b, ac); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ac, nil\n}", "func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) {\n\tregMap := make(map[string]*Registry)\n\t// The order of the registries is not really important, but make it deterministic (the same for the same config file)\n\t// to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.\n\tregistryOrder := []string{}\n\n\tgetRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object\n\t\tvar err error\n\t\tlocation, err = parseLocation(location)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg, exists := regMap[location]\n\t\tif !exists {\n\t\t\treg = &Registry{\n\t\t\t\tEndpoint: Endpoint{Location: location},\n\t\t\t\tMirrors: []Endpoint{},\n\t\t\t\tPrefix: location,\n\t\t\t}\n\t\t\tregMap[location] = reg\n\t\t\tregistryOrder = append(registryOrder, location)\n\t\t}\n\t\treturn reg, nil\n\t}\n\n\tfor _, blocked := range config.V1TOMLConfig.Block.Registries {\n\t\treg, err := getRegistry(blocked)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg.Blocked = true\n\t}\n\tfor _, insecure := range config.V1TOMLConfig.Insecure.Registries {\n\t\treg, err := getRegistry(insecure)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg.Insecure = true\n\t}\n\n\tres := &V2RegistriesConf{\n\t\tUnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries,\n\t}\n\tfor _, location := range registryOrder {\n\t\treg := regMap[location]\n\t\tres.Registries = append(res.Registries, *reg)\n\t}\n\treturn res, nil\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tTokenType: \"Bearer\",\n\t}\n}", "func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error) {\n\tentry, err := s.Get(ctx, \"config\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif entry == nil {\n\t\treturn nil, nil\n\t}\n\n\tvar result config\n\tif err := entry.DecodeJSON(&result); err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading configuration: %s\", err)\n\t}\n\n\tif len(result.TokenPolicies) == 0 && len(result.Policies) > 0 {\n\t\tresult.TokenPolicies = result.Policies\n\t}\n\n\treturn &result, nil\n}", "func ReadConfig(conf *api.ConfigMap) config.Configuration {\n\tif len(conf.Data) == 0 {\n\t\treturn config.NewDefault()\n\t}\n\n\tvar errors []int\n\tvar skipUrls []string\n\tvar whitelist []string\n\n\tif val, ok := conf.Data[customHTTPErrors]; ok {\n\t\tdelete(conf.Data, customHTTPErrors)\n\t\tfor _, i := range strings.Split(val, \",\") {\n\t\t\tj, err := strconv.Atoi(i)\n\t\t\tif err != nil {\n\t\t\t\tglog.Warningf(\"%v is not a valid http code: %v\", i, err)\n\t\t\t} else {\n\t\t\t\terrors = append(errors, j)\n\t\t\t}\n\t\t}\n\t}\n\tif val, ok := conf.Data[skipAccessLogUrls]; ok {\n\t\tdelete(conf.Data, skipAccessLogUrls)\n\t\tskipUrls = strings.Split(val, \",\")\n\t}\n\tif val, ok := conf.Data[whitelistSourceRange]; ok {\n\t\tdelete(conf.Data, whitelistSourceRange)\n\t\twhitelist = append(whitelist, strings.Split(val, \",\")...)\n\t}\n\n\tto := config.Configuration{}\n\tto.Backend = defaults.Backend{\n\t\tCustomHTTPErrors: filterErrors(errors),\n\t\tSkipAccessLogURLs: skipUrls,\n\t\tWhitelistSourceRange: whitelist,\n\t}\n\tdef := config.NewDefault()\n\tif err := mergo.Merge(&to, def); err != nil {\n\t\tglog.Warningf(\"unexpected error merging defaults: %v\", err)\n\t}\n\n\tmetadata := &mapstructure.Metadata{}\n\tdecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{\n\t\tTagName: \"structs\",\n\t\tResult: &to,\n\t\tWeaklyTypedInput: true,\n\t\tMetadata: metadata,\n\t})\n\n\terr = decoder.Decode(conf.Data)\n\tif err != nil {\n\t\tglog.Infof(\"%v\", err)\n\t}\n\treturn to\n}", "func NewConfig(cfg config.Config) *oauth2.Config {\n\tconf := &oauth2.Config{\n\t\tClientID: cfg.ClientID,\n\t\tScopes: []string{\"authorization_code\"},\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tTokenURL: config.TokenURL,\n\t\t\tAuthURL: config.AuthURL,\n\t\t},\n\t}\n\treturn conf\n}", "func newConfig() (*config, error) {\n\tec2Metadata := ec2metadata.New(session.Must(session.NewSession()))\n\tregion, err := ec2Metadata.Region()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get region from ec2 metadata\")\n\t}\n\n\tinstanceID, err := ec2Metadata.GetMetadata(\"instance-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get instance id from ec2 metadata\")\n\t}\n\n\tmac, err := ec2Metadata.GetMetadata(\"mac\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get mac from ec2 metadata\")\n\t}\n\n\tsecurityGroups, err := ec2Metadata.GetMetadata(\"security-groups\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get security groups from ec2 metadata\")\n\t}\n\n\tinterfaces, err := ec2Metadata.GetMetadata(\"network/interfaces/macs\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get interfaces from ec2 metadata\")\n\t}\n\n\tsubnet, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/subnet-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get subnet from ec2 metadata\")\n\t}\n\n\tvpc, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/vpc-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get vpc from ec2 metadata\")\n\t}\n\n\treturn &config{region: region,\n\t\tsubnet: subnet,\n\t\tindex: int64(len(strings.Split(interfaces, \"\\n\"))),\n\t\tinstanceID: instanceID,\n\t\tsecurityGroups: strings.Split(securityGroups, \"\\n\"),\n\t\tvpc: vpc,\n\t}, nil\n}", "func (c *Config) Apply(b *Config) *Config {\n\tnewCfg := new(Config)\n\tif b.Token == \"\" {\n\t\tnewCfg.Token = c.Token\n\t} else {\n\t\tnewCfg.Token = b.Token\n\t}\n\tif b.URL == \"\" {\n\t\tnewCfg.URL = c.URL\n\t} else {\n\t\tnewCfg.URL = b.URL\n\t}\n\treturn newCfg\n}", "func (r *K8sRESTConfigFactory) Create(token string) (*rest.Config, error) {\n\tshallowCopy := *r.cfg\n\tshallowCopy.BearerToken = token\n\tif r.insecure {\n\t\tshallowCopy.TLSClientConfig = rest.TLSClientConfig{\n\t\t\tInsecure: r.insecure,\n\t\t}\n\t}\n\treturn &shallowCopy, nil\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsCaller) GetRegistrationConfig(opts *bind.CallOpts) (struct {\n\tEnabled bool\n\tWindowSizeInBlocks uint32\n\tAllowedPerWindow uint16\n\tKeeperRegistry common.Address\n\tMinLINKJuels *big.Int\n\tWindowStart uint64\n\tApprovedInCurrentWindow uint16\n}, error) {\n\tvar out []interface{}\n\terr := _UpkeepRegistrationRequests.contract.Call(opts, &out, \"getRegistrationConfig\")\n\n\toutstruct := new(struct {\n\t\tEnabled bool\n\t\tWindowSizeInBlocks uint32\n\t\tAllowedPerWindow uint16\n\t\tKeeperRegistry common.Address\n\t\tMinLINKJuels *big.Int\n\t\tWindowStart uint64\n\t\tApprovedInCurrentWindow uint16\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Enabled = *abi.ConvertType(out[0], new(bool)).(*bool)\n\toutstruct.WindowSizeInBlocks = *abi.ConvertType(out[1], new(uint32)).(*uint32)\n\toutstruct.AllowedPerWindow = *abi.ConvertType(out[2], new(uint16)).(*uint16)\n\toutstruct.KeeperRegistry = *abi.ConvertType(out[3], new(common.Address)).(*common.Address)\n\toutstruct.MinLINKJuels = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int)\n\toutstruct.WindowStart = *abi.ConvertType(out[5], new(uint64)).(*uint64)\n\toutstruct.ApprovedInCurrentWindow = *abi.ConvertType(out[6], new(uint16)).(*uint16)\n\n\treturn *outstruct, err\n\n}", "func (o *DatadogMetricsAdapter) Config() (*apiserver.Config, error) {\n\tif err := o.SecureServing.MaybeDefaultWithSelfSignedCerts(\"localhost\", nil, []net.IP{net.ParseIP(\"127.0.0.1\")}); err != nil {\n\t\tlog.Errorf(\"Failed to create self signed AuthN/Z configuration %#v\", err)\n\t\treturn nil, fmt.Errorf(\"error creating self-signed certificates: %v\", err)\n\t}\n\n\tscheme := runtime.NewScheme()\n\tcodecs := serializer.NewCodecFactory(scheme)\n\tserverConfig := genericapiserver.NewConfig(codecs)\n\n\terr := o.SecureServing.ApplyTo(serverConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while converting SecureServing type %v\", err)\n\t\treturn nil, err\n\t}\n\n\t// Get the certificates from the extension-apiserver-authentication ConfigMap\n\tif err := o.Authentication.ApplyTo(&serverConfig.Authentication, serverConfig.SecureServing, nil); err != nil {\n\t\tlog.Errorf(\"Could not create Authentication configuration: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif err := o.Authorization.ApplyTo(&serverConfig.Authorization); err != nil {\n\t\tlog.Infof(\"Could not create Authorization configuration: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &apiserver.Config{\n\t\tGenericConfig: serverConfig,\n\t}, nil\n}", "func (client *XenClient) PBDGetOtherConfig(self string) (result map[string]string, err error) {\n\tobj, err := client.APICall(\"PBD.get_other_config\", self)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinterim := reflect.ValueOf(obj)\n\tresult = map[string]string{}\n\tfor _, key := range interim.MapKeys() {\n\t\tobj := interim.MapIndex(key)\n\t\tresult[key.String()] = obj.String()\n\t}\n\n\treturn\n}", "func (am *authManager) GetOAuth2Config(client kubernetes.Interface) (*oauth2.Config, []oauth2.AuthCodeOption, error) {\n\tvar err error\n\tclient, err = am.getK8sClient(client)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tauthSettings := am.settingsManager.GetAuthSettings(client)\n\tif authSettings == nil {\n\t\treturn nil, nil, fmt.Errorf(\"Auth settings not available\")\n\t}\n\tif !authSettings.Enabled {\n\t\treturn nil, nil, fmt.Errorf(\"OIDC Auth is not enabled\")\n\t}\n\treturn am.Oauth2Config(am.GetScopes(authSettings), client, authSettings)\n}", "func (r OAuthFlow) Clone() (*OAuthFlow, error) {\n\trbytes, err := yaml.Marshal(r)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tvalue := OAuthFlow{}\n\tif err := yaml.Unmarshal(rbytes, &value); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\treturn &value, nil\n}", "func (sc *StoreConfig) ExtraConfig() map[string]interface{} {\n\treturn sc.OtherConfig\n}", "func (c Config) Copy() Config {\n\tdst := Config{}\n\tdst.Credentials = c.Credentials\n\tdst.Endpoint = c.Endpoint\n\tdst.Region = c.Region\n\tdst.DisableSSL = c.DisableSSL\n\tdst.ManualSend = c.ManualSend\n\tdst.HTTPClient = c.HTTPClient\n\tdst.LogHTTPBody = c.LogHTTPBody\n\tdst.LogLevel = c.LogLevel\n\tdst.Logger = c.Logger\n\tdst.MaxRetries = c.MaxRetries\n\tdst.DisableParamValidation = c.DisableParamValidation\n\tdst.DisableComputeChecksums = c.DisableComputeChecksums\n\tdst.S3ForcePathStyle = c.S3ForcePathStyle\n\tdst.DomainMode = c.DomainMode\n\tdst.SignerVersion = c.SignerVersion\n\treturn dst\n}", "func NewMicrosoftAuthenticatorAuthenticationMethodConfiguration()(*MicrosoftAuthenticatorAuthenticationMethodConfiguration) {\n m := &MicrosoftAuthenticatorAuthenticationMethodConfiguration{\n AuthenticationMethodConfiguration: *NewAuthenticationMethodConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.microsoftAuthenticatorAuthenticationMethodConfiguration\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func (src *HNCConfiguration) ConvertTo(dstRaw conversion.Hub) error {\n\tdst := dstRaw.(*v1a2.HNCConfiguration)\n\n\t// Spec\n\tsrcSpecTypes := src.Spec.Types\n\tdstSpecRscs := []v1a2.ResourceSpec{}\n\tfor _, st := range srcSpecTypes {\n\t\tdr := v1a2.ResourceSpec{}\n\t\t// Hack the group from APIVersion by removing the version, e.g.\n\t\t// 1) \"rbac.authorization.k8s.io/v1\" => \"rbac.authorization.k8s.io\";\n\t\t// 2) \"v1\" => \"\" (for core type).\n\t\tgv := strings.Split(st.APIVersion, \"/\")\n\t\tif len(gv) == 2 {\n\t\t\tdr.Group = gv[0]\n\t\t}\n\t\t// Hack the resource from Kind by using the lower case and plural form, e.g.\n\t\t// 1) \"Role\" => \"roles\"\n\t\t// 2) \"NetworkPolicy\" => \"networkpolicies\"\n\t\tlk := strings.ToLower(st.Kind)\n\t\tif strings.HasSuffix(lk, \"y\") {\n\t\t\tlk = strings.TrimSuffix(lk, \"y\") + \"ie\"\n\t\t}\n\t\tdr.Resource = lk + \"s\"\n\t\tdtm, ok := toV1A2[st.Mode]\n\t\tif !ok {\n\t\t\t// This should never happen with the enum schema validation.\n\t\t\tdtm = v1a2.Ignore\n\t\t}\n\t\tdr.Mode = dtm\n\t\t// We will only convert non-enforced types since in v1a2 we removed the\n\t\t// enforced types from spec. Having enforced types configured in the spec\n\t\t// would cause 'MultipleConfigurationsForType' condition.\n\t\tif !v1a2.IsEnforcedType(dr) {\n\t\t\tdstSpecRscs = append(dstSpecRscs, dr)\n\t\t}\n\t}\n\tdst.Spec.Resources = dstSpecRscs\n\n\t// We don't need to convert status because controllers will update it.\n\tdst.Status = v1a2.HNCConfigurationStatus{}\n\n\t// rote conversion - ObjectMeta\n\tdst.ObjectMeta = src.ObjectMeta\n\n\treturn nil\n}", "func buildTwitchAuthConfig() {\n\tTwitchOauthConfig = &oauth2.Config{\n\t\tClientID: Config.TwitchClientID,\n\t\tClientSecret: Config.TwitchClientSecret,\n\t\tScopes: []string{\"user_read\"},\n\t\tRedirectURL: Config.TwitchRedirectURL,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: Config.TwitchAuthURL,\n\t\t\tTokenURL: Config.TwitchTokenURL,\n\t\t},\n\t}\n\tOauthStateString = str.RandStringRunes(10)\n}", "func soongGetConfig() *bobConfig {\n\tloadConfigOnce.Do(func() {\n\t\tonceLoadedConfig = &bobConfig{}\n\t\terr := onceLoadedConfig.Properties.LoadConfig(jsonPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif !onceLoadedConfig.Properties.GetBool(\"builder_soong\") {\n\t\t\tpanic(\"Build bootstrapped for Soong, but Soong builder has not been enabled\")\n\t\t}\n\t\tonceLoadedConfig.Generator = &soongGenerator{}\n\t})\n\treturn onceLoadedConfig\n\n}", "func expandGoogleChannelConfig(c *Client, f *GoogleChannelConfig) (map[string]interface{}, error) {\n\tm := make(map[string]interface{})\n\tres := f\n\t_ = res\n\tif v, err := dcl.DeriveField(\"projects/%s/locations/%s/googleChannelConfig\", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location)); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding Name into name: %w\", err)\n\t} else if !dcl.IsEmptyValueIndirect(v) {\n\t\tm[\"name\"] = v\n\t}\n\tif v := f.CryptoKeyName; dcl.ValueShouldBeSent(v) {\n\t\tm[\"cryptoKeyName\"] = v\n\t}\n\tif v, err := dcl.EmptyValue(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding Project into project: %w\", err)\n\t} else if !dcl.IsEmptyValueIndirect(v) {\n\t\tm[\"project\"] = v\n\t}\n\tif v, err := dcl.EmptyValue(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding Location into location: %w\", err)\n\t} else if !dcl.IsEmptyValueIndirect(v) {\n\t\tm[\"location\"] = v\n\t}\n\n\treturn m, nil\n}", "func GetOverwriteConfig(b []byte) (*overwriteConfig, error) {\n\tvar config overwriteConfig\n\terr := json.Unmarshal(b, &config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failure parsing overwrite config: %s error: %w\", string(b), err)\n\t}\n\n\treturn &config, nil\n}", "func UnmarshalConfigurationPatch(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(ConfigurationPatch)\n\terr = core.UnmarshalPrimitive(m, \"api_key\", &obj.ApiKey)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"lets_encrypt_environment\", &obj.LetsEncryptEnvironment)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"lets_encrypt_private_key\", &obj.LetsEncryptPrivateKey)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"lets_encrypt_preferred_chain\", &obj.LetsEncryptPreferredChain)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"cloud_internet_services_apikey\", &obj.CloudInternetServicesApikey)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"cloud_internet_services_crn\", &obj.CloudInternetServicesCrn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"classic_infrastructure_username\", &obj.ClassicInfrastructureUsername)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"classic_infrastructure_password\", &obj.ClassicInfrastructurePassword)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"max_ttl\", &obj.MaxTTL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crl_expiry\", &obj.CrlExpiry)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crl_disable\", &obj.CrlDisable)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crl_distribution_points_encoded\", &obj.CrlDistributionPointsEncoded)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"issuing_certificates_urls_encoded\", &obj.IssuingCertificatesUrlsEncoded)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_secret_groups\", &obj.AllowedSecretGroups)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"ttl\", &obj.TTL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_localhost\", &obj.AllowLocalhost)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_domains\", &obj.AllowedDomains)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_domains_template\", &obj.AllowedDomainsTemplate)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_bare_domains\", &obj.AllowBareDomains)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_subdomains\", &obj.AllowSubdomains)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_glob_domains\", &obj.AllowGlobDomains)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_any_name\", &obj.AllowAnyName)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"enforce_hostnames\", &obj.EnforceHostnames)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_ip_sans\", &obj.AllowIpSans)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_uri_sans\", &obj.AllowedUriSans)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_other_sans\", &obj.AllowedOtherSans)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"server_flag\", &obj.ServerFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"client_flag\", &obj.ClientFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"code_signing_flag\", &obj.CodeSigningFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"email_protection_flag\", &obj.EmailProtectionFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"key_type\", &obj.KeyType)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"key_bits\", &obj.KeyBits)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"key_usage\", &obj.KeyUsage)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"ext_key_usage\", &obj.ExtKeyUsage)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"ext_key_usage_oids\", &obj.ExtKeyUsageOids)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"use_csr_common_name\", &obj.UseCsrCommonName)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"use_csr_sans\", &obj.UseCsrSans)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"ou\", &obj.Ou)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"organization\", &obj.Organization)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"country\", &obj.Country)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locality\", &obj.Locality)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"province\", &obj.Province)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"street_address\", &obj.StreetAddress)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"postal_code\", &obj.PostalCode)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"serial_number\", &obj.SerialNumber)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"require_cn\", &obj.RequireCn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"policy_identifiers\", &obj.PolicyIdentifiers)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"basic_constraints_valid_for_non_ca\", &obj.BasicConstraintsValidForNonCa)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"not_before_duration\", &obj.NotBeforeDuration)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func NewSpecConfig(config *protoform.Config, kubeClient *kubernetes.Clientset, opssightClient *opssightclientset.Clientset, hubClient *hubclientset.Clientset, opssight *opssightapi.OpsSight, isBlackDuckClusterScope bool, dryRun bool) *SpecConfig {\n\topssightSpec := &opssight.Spec\n\tname := opssight.Name\n\tnames := map[string]string{\n\t\t\"perceptor\": \"core\",\n\t\t\"pod-perceiver\": \"pod-processor\",\n\t\t\"image-perceiver\": \"image-processor\",\n\t\t\"artifactory-perceiver\": \"artifactory-processor\",\n\t\t\"quay-perceiver\": \"quay-processor\",\n\t\t\"scanner\": \"scanner\",\n\t\t\"perceptor-imagefacade\": \"image-getter\",\n\t\t\"skyfire\": \"skyfire\",\n\t\t\"prometheus\": \"prometheus\",\n\t\t\"configmap\": \"opssight\",\n\t\t\"perceiver-service-account\": \"processor\",\n\t}\n\tbaseImageURL := \"docker.io/blackducksoftware\"\n\tversion := \"2.2.5\"\n\timages := map[string]string{\n\t\t\"perceptor\": fmt.Sprintf(\"%s/opssight-core:%s\", baseImageURL, version),\n\t\t\"pod-perceiver\": fmt.Sprintf(\"%s/opssight-pod-processor:%s\", baseImageURL, version),\n\t\t\"image-perceiver\": fmt.Sprintf(\"%s/opssight-image-processor:%s\", baseImageURL, version),\n\t\t\"artifactory-perceiver\": fmt.Sprintf(\"%s/opssight-artifactory-processor:%s\", baseImageURL, version),\n\t\t\"quay-perceiver\": fmt.Sprintf(\"%s/opssight-quay-processor:%s\", baseImageURL, version),\n\t\t\"scanner\": fmt.Sprintf(\"%s/opssight-scanner:%s\", baseImageURL, version),\n\t\t\"perceptor-imagefacade\": fmt.Sprintf(\"%s/opssight-image-getter:%s\", baseImageURL, version),\n\t\t\"skyfire\": \"gcr.io/saas-hub-stg/blackducksoftware/pyfire:master\",\n\t\t\"prometheus\": \"docker.io/prom/prometheus:v2.1.0\",\n\t}\n\tif opssightSpec.IsUpstream {\n\t\tnames = map[string]string{\n\t\t\t\"perceptor\": \"perceptor\",\n\t\t\t\"pod-perceiver\": \"pod-perceiver\",\n\t\t\t\"image-perceiver\": \"image-perceiver\",\n\t\t\t\"artifactory-perceiver\": \"artifactory-perceiver\",\n\t\t\t\"quay-perceiver\": \"quay-perceiver\",\n\t\t\t\"scanner\": \"scanner\",\n\t\t\t\"perceptor-imagefacade\": \"image-facade\",\n\t\t\t\"skyfire\": \"skyfire\",\n\t\t\t\"prometheus\": \"prometheus\",\n\t\t\t\"configmap\": \"perceptor\",\n\t\t\t\"perceiver-service-account\": \"perceiver\",\n\t\t}\n\t\tbaseImageURL = \"gcr.io/saas-hub-stg/blackducksoftware\"\n\t\tversion = \"master\"\n\t\timages = map[string]string{\n\t\t\t\"perceptor\": fmt.Sprintf(\"%s/perceptor:%s\", baseImageURL, version),\n\t\t\t\"pod-perceiver\": fmt.Sprintf(\"%s/pod-perceiver:%s\", baseImageURL, version),\n\t\t\t\"image-perceiver\": fmt.Sprintf(\"%s/image-perceiver:%s\", baseImageURL, version),\n\t\t\t\"artifactory-perceiver\": fmt.Sprintf(\"%s/artifactory-perceiver:%s\", baseImageURL, version),\n\t\t\t\"quay-perceiver\": fmt.Sprintf(\"%s/quay-perceiver:%s\", baseImageURL, version),\n\t\t\t\"scanner\": fmt.Sprintf(\"%s/perceptor-scanner:%s\", baseImageURL, version),\n\t\t\t\"perceptor-imagefacade\": fmt.Sprintf(\"%s/perceptor-imagefacade:%s\", baseImageURL, version),\n\t\t\t\"skyfire\": \"gcr.io/saas-hub-stg/blackducksoftware/pyfire:master\",\n\t\t\t\"prometheus\": \"docker.io/prom/prometheus:v2.1.0\"}\n\t}\n\n\tfor componentName, componentImage := range images {\n\t\timage := appsutil.GenerateImageTag(componentImage, opssightSpec.ImageRegistries, opssightSpec.RegistryConfiguration)\n\t\timages[componentName] = image\n\t}\n\n\tconfigMap := &MainOpssightConfigMap{\n\t\tLogLevel: opssightSpec.LogLevel,\n\t\tBlackDuck: &BlackDuckConfig{\n\t\t\tConnectionsEnvironmentVariableName: opssightSpec.Blackduck.ConnectionsEnvironmentVariableName,\n\t\t\tTLSVerification: opssightSpec.Blackduck.TLSVerification,\n\t\t},\n\t\tImageFacade: &ImageFacadeConfig{\n\t\t\tCreateImagesOnly: false,\n\t\t\tHost: \"localhost\",\n\t\t\tPort: 3004,\n\t\t\tImagePullerType: opssightSpec.ScannerPod.ImageFacade.ImagePullerType,\n\t\t},\n\t\tPerceiver: &PerceiverConfig{\n\t\t\tCertificate: opssightSpec.Perceiver.Certificate,\n\t\t\tCertificateKey: opssightSpec.Perceiver.CertificateKey,\n\t\t\tImage: &ImagePerceiverConfig{},\n\t\t\tPod: &PodPerceiverConfig{\n\t\t\t\tNamespaceFilter: opssightSpec.Perceiver.PodPerceiver.NamespaceFilter,\n\t\t\t},\n\t\t\tArtifactory: &ArtifactoryPerceiverConfig{\n\t\t\t\tDumper: opssightSpec.Perceiver.EnableArtifactoryPerceiverDumper,\n\t\t\t},\n\t\t\tAnnotationIntervalSeconds: opssightSpec.Perceiver.AnnotationIntervalSeconds,\n\t\t\tDumpIntervalMinutes: opssightSpec.Perceiver.DumpIntervalMinutes,\n\t\t\tPort: 3002,\n\t\t},\n\t\tPerceptor: &PerceptorConfig{\n\t\t\tTimings: &PerceptorTimingsConfig{\n\t\t\t\tCheckForStalledScansPauseHours: opssightSpec.Perceptor.CheckForStalledScansPauseHours,\n\t\t\t\tClientTimeoutMilliseconds: opssightSpec.Perceptor.ClientTimeoutMilliseconds,\n\t\t\t\tModelMetricsPauseSeconds: opssightSpec.Perceptor.ModelMetricsPauseSeconds,\n\t\t\t\tStalledScanClientTimeoutHours: opssightSpec.Perceptor.StalledScanClientTimeoutHours,\n\t\t\t\tUnknownImagePauseMilliseconds: opssightSpec.Perceptor.UnknownImagePauseMilliseconds,\n\t\t\t},\n\t\t\tHost: util.GetResourceName(name, util.OpsSightName, names[\"perceptor\"]),\n\t\t\tPort: 3001,\n\t\t\tUseMockMode: false,\n\t\t},\n\t\tScanner: &ScannerConfig{\n\t\t\tBlackDuckClientTimeoutSeconds: opssightSpec.ScannerPod.Scanner.ClientTimeoutSeconds,\n\t\t\tImageDirectory: opssightSpec.ScannerPod.ImageDirectory,\n\t\t\tPort: 3003,\n\t\t},\n\t\tSkyfire: &SkyfireConfig{\n\t\t\tBlackDuckClientTimeoutSeconds: opssightSpec.Skyfire.HubClientTimeoutSeconds,\n\t\t\tBlackDuckDumpPauseSeconds: opssightSpec.Skyfire.HubDumpPauseSeconds,\n\t\t\tKubeDumpIntervalSeconds: opssightSpec.Skyfire.KubeDumpIntervalSeconds,\n\t\t\tPerceptorDumpIntervalSeconds: opssightSpec.Skyfire.PerceptorDumpIntervalSeconds,\n\t\t\tPort: 3005,\n\t\t\tPrometheusPort: 3006,\n\t\t\tUseInClusterConfig: true,\n\t\t},\n\t}\n\treturn &SpecConfig{\n\t\tconfig: config,\n\t\tkubeClient: kubeClient,\n\t\topssightClient: opssightClient,\n\t\thubClient: hubClient,\n\t\topssight: opssight,\n\t\tconfigMap: configMap,\n\t\tisBlackDuckClusterScope: isBlackDuckClusterScope,\n\t\tdryRun: dryRun,\n\t\tnames: names,\n\t\timages: images,\n\t}\n}", "func CloneConfig(host string, verifyTLS bool, apiKey string, project string, config string, name string) (models.ConfigInfo, Error) {\n\tpostBody := map[string]interface{}{\"name\": name}\n\tbody, err := json.Marshal(postBody)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Invalid config info\"}\n\t}\n\n\tvar params []queryParam\n\tparams = append(params, queryParam{Key: \"project\", Value: project})\n\tparams = append(params, queryParam{Key: \"config\", Value: config})\n\n\turl, err := generateURL(host, \"/v3/configs/config/clone\", params)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to generate url\"}\n\t}\n\n\tstatusCode, _, response, err := PostRequest(url, verifyTLS, apiKeyHeader(apiKey), body)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to clone config\", Code: statusCode}\n\t}\n\n\tvar result map[string]interface{}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tconfigInfo, ok := result[\"config\"].(map[string]interface{})\n\tif !ok {\n\t\treturn models.ConfigInfo{}, Error{Err: fmt.Errorf(\"Unexpected type parsing config info, expected map[string]interface{}, got %T\", result[\"config\"]), Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\tinfo := models.ParseConfigInfo(configInfo)\n\treturn info, Error{}\n}", "func NewConfig(data map[string]string) (settings *Config) {\n cfg := &Config{\n ConsumerKey: data[\"consumer_key\"],\n ConsumerSecret: data[\"consumer_secret\"],\n }\n\n // save access token if defined\n if atoken, ok := data[\"access_token\"]; ok {\n cfg.AccessToken = atoken\n }\n\n // save access token secret if defined\n if asecret, ok := data[\"access_secret\"]; ok {\n cfg.AccessSecret = asecret\n }\n\n // save debug flag if defined\n if debug, ok := data[\"debug\"]; ok && debug == \"on\" {\n cfg.Debug = true\n }\n\n return cfg\n}", "func GetConfig(w http.ResponseWriter, r *http.Request) {\n\t//apiContext := api.GetApiContext(r)\n\tauthHeader := r.Header.Get(\"Authorization\")\n\tvar accessToken string\n\t// header value format will be \"Bearer <token>\"\n\tif authHeader != \"\" {\n\t\tif !strings.HasPrefix(authHeader, \"Bearer \") {\n\t\t\tlog.Errorf(\"GetMyIdentities Failed to find Bearer token %v\", authHeader)\n\t\t\tReturnHTTPError(w, r, http.StatusUnauthorized, \"Unauthorized, please provide a valid token\")\n\t\t}\n\t\taccessToken = strings.TrimPrefix(authHeader, \"Bearer \")\n\t}\n\n\tconfig, err := server.GetConfig(accessToken)\n\tif err == nil {\n\t\t//apiContext.Write(&config) -> apicontext cannot include nested structures\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tjson.NewEncoder(w).Encode(config)\n\t} else {\n\t\t//failed to get the config\n\t\tlog.Debugf(\"GetConfig failed with error %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusInternalServerError, \"Failed to get the auth config\")\n\t}\n}", "func (c *Config) Copy() (*Config, error) {\n\tnewC := New()\n\tc.Viper.Unmarshal(&newC.plainTextConfig)\n\tc.Viper.Unmarshal(&newC.secureConfig)\n\treturn newC, nil\n}", "func (m *WellKnown) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAuthorizationEndpoint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIDTokenSigningAlgValuesSupported(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIssuer(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateJwksURI(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateResponseTypesSupported(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubjectTypesSupported(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTokenEndpoint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *TelemetryConfig) Copy() *TelemetryConfig {\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tvar o TelemetryConfig\n\to.MetricsPrefix = StringCopy(c.MetricsPrefix)\n\n\tif c.Stdout != nil {\n\t\to.Stdout = c.Stdout.Copy()\n\t}\n\tif c.DogStatsD != nil {\n\t\to.DogStatsD = c.DogStatsD.Copy()\n\t}\n\tif c.Prometheus != nil {\n\t\to.Prometheus = c.Prometheus.Copy()\n\t}\n\n\treturn &o\n}", "func (e *EndpointCore) ExtraConfig() (map[string]interface{}, error) {\n\treturn utils.MergeInterfaceMaps(\n\t\tmap[string]interface{}{\n\t\t\t\"host\": e.Host,\n\t\t\t\"port\": e.Port,\n\t\t\t\"name\": utils.FirstNonEmpty(e.Name, string(e.ID)),\n\t\t}, e.Configuration), nil\n}", "func merge(existing, kind *Config) error {\n\t// verify assumptions about kubeadm / kind kubeconfigs\n\tif err := checkKubeadmExpectations(kind); err != nil {\n\t\treturn err\n\t}\n\n\t// insert or append cluster entry\n\tshouldAppend := true\n\tfor i := range existing.Clusters {\n\t\tif existing.Clusters[i].Name == kind.Clusters[0].Name {\n\t\t\texisting.Clusters[i] = kind.Clusters[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Clusters = append(existing.Clusters, kind.Clusters[0])\n\t}\n\n\t// insert or append user entry\n\tshouldAppend = true\n\tfor i := range existing.Users {\n\t\tif existing.Users[i].Name == kind.Users[0].Name {\n\t\t\texisting.Users[i] = kind.Users[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Users = append(existing.Users, kind.Users[0])\n\t}\n\n\t// insert or append context entry\n\tshouldAppend = true\n\tfor i := range existing.Contexts {\n\t\tif existing.Contexts[i].Name == kind.Contexts[0].Name {\n\t\t\texisting.Contexts[i] = kind.Contexts[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Contexts = append(existing.Contexts, kind.Contexts[0])\n\t}\n\n\t// set the current context\n\texisting.CurrentContext = kind.CurrentContext\n\n\t// TODO: We should not need this, but it allows broken clients that depend\n\t// on apiVersion and kind to work. Notably the upstream javascript client.\n\t// See: https://github.com/kubernetes-sigs/kind/issues/1242\n\tif len(existing.OtherFields) == 0 {\n\t\t// TODO: Should we be deep-copying? for now we don't need to\n\t\t// and doing so would be a pain (re and de-serialize maybe?) :shrug:\n\t\texisting.OtherFields = kind.OtherFields\n\t}\n\n\treturn nil\n}", "func (in *AdmissionWebhookConfigurationSpec) DeepCopy() *AdmissionWebhookConfigurationSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AdmissionWebhookConfigurationSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *Config) ToStruct(pointer interface{}, mapping ...map[string]string) error {\n\tif j := c.getJson(); j != nil {\n\t\treturn j.ToStruct(pointer, mapping...)\n\t}\n\treturn errors.New(\"configuration not found\")\n}", "func (client *XenClient) SMGetOtherConfig(self string) (result map[string]string, err error) {\n\tobj, err := client.APICall(\"SM.get_other_config\", self)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinterim := reflect.ValueOf(obj)\n\tresult = map[string]string{}\n\tfor _, key := range interim.MapKeys() {\n\t\tobj := interim.MapIndex(key)\n\t\tresult[key.String()] = obj.String()\n\t}\n\n\treturn\n}", "func GetWorkplaceSettings(host string, verifyTLS bool, apiKey string) (models.WorkplaceSettings, Error) {\n\turl, err := generateURL(host, \"/v3/workplace\", nil)\n\tif err != nil {\n\t\treturn models.WorkplaceSettings{}, Error{Err: err, Message: \"Unable to generate url\"}\n\t}\n\n\tstatusCode, _, response, err := GetRequest(url, verifyTLS, apiKeyHeader(apiKey))\n\tif err != nil {\n\t\treturn models.WorkplaceSettings{}, Error{Err: err, Message: \"Unable to fetch workplace settings\", Code: statusCode}\n\t}\n\n\tvar result map[string]interface{}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn models.WorkplaceSettings{}, Error{Err: err, Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tworkplace, ok := result[\"workplace\"].(map[string]interface{})\n\tif !ok {\n\t\treturn models.WorkplaceSettings{}, Error{Err: fmt.Errorf(\"Unexpected type parsing WorkplaceSettings, expected map[string]interface{}, got %T\", result[\"workplace\"]), Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\tsettings := models.ParseWorkplaceSettings(workplace)\n\treturn settings, Error{}\n}", "func populateClientConfig(config *Config) *Config {\n\tif config == nil {\n\t\tconfig = &Config{}\n\t}\n\tversions := config.Versions\n\tif len(versions) == 0 {\n\t\tversions = protocol.SupportedVersions\n\t}\n\n\thandshakeTimeout := protocol.DefaultHandshakeTimeout\n\tif config.HandshakeTimeout != 0 {\n\t\thandshakeTimeout = config.HandshakeTimeout\n\t}\n\tidleTimeout := protocol.DefaultIdleTimeout\n\tif config.IdleTimeout != 0 {\n\t\tidleTimeout = config.IdleTimeout\n\t}\n\n\tmaxReceiveStreamFlowControlWindow := config.MaxReceiveStreamFlowControlWindow\n\tif maxReceiveStreamFlowControlWindow == 0 {\n\t\tmaxReceiveStreamFlowControlWindow = protocol.DefaultMaxReceiveStreamFlowControlWindowClient\n\t}\n\tmaxReceiveConnectionFlowControlWindow := config.MaxReceiveConnectionFlowControlWindow\n\tif maxReceiveConnectionFlowControlWindow == 0 {\n\t\tmaxReceiveConnectionFlowControlWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindowClient\n\t}\n\n\treturn &Config{\n\t\tVersions: versions,\n\t\tHandshakeTimeout: handshakeTimeout,\n\t\tIdleTimeout: idleTimeout,\n\t\tRequestConnectionIDTruncation: config.RequestConnectionIDTruncation,\n\t\tMaxReceiveStreamFlowControlWindow: maxReceiveStreamFlowControlWindow,\n\t\tMaxReceiveConnectionFlowControlWindow: maxReceiveConnectionFlowControlWindow,\n\t\tKeepAlive: config.KeepAlive,\n\t\tCacheHandshake: config.CacheHandshake,\n\t\tCreatePaths: config.CreatePaths,\n\t}\n}", "func NewVpnConfiguration()(*VpnConfiguration) {\n m := &VpnConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.vpnConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func (conf *RegistryConfig) Clone() *RegistryConfig {\n\tif conf == nil {\n\t\treturn nil\n\t}\n\trc := &RegistryConfig{\n\t\tID: conf.ID,\n\t\tProvider: conf.Provider,\n\t\tAddr: conf.Addr,\n\t\tRepo: conf.Repo.Clone(),\n\t\tConfig: make(map[string]interface{}, len(conf.Config)),\n\t}\n\tfor k, v := range conf.Config {\n\t\trc.Config[k] = v\n\t}\n\n\treturn rc\n}", "func (*OpenconfigOfficeAp_System_Aaa_Authorization_Config) IsYANGGoStruct() {}", "func (o *SyntheticsPrivateLocationCreationResponse) GetConfigOk() (*interface{}, bool) {\n\tif o == nil || o.Config == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Config, true\n}", "func NewOAuth2(config *wx.AppConfig, client *http.Client) (*OAuth2, error) {\n\n\tif config == nil || config.AppID == \"\" || config.AppSecret == \"\" {\n\n\t\treturn nil, fmt.Errorf(\"NewOAuth2: config's AppID/AppSecret missing\")\n\n\t}\n\n\tif client == nil {\n\n\t\tclient = http.DefaultClient\n\n\t}\n\n\treturn &OAuth2{\n\t\tconfig: config,\n\t\tclient: client,\n\t}, nil\n\n}", "func (c *VaultConfig) Merge(o *VaultConfig) *VaultConfig {\n\tif c == nil {\n\t\tif o == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn o.Copy()\n\t}\n\n\tif o == nil {\n\t\treturn c.Copy()\n\t}\n\n\tr := c.Copy()\n\n\tif o.Address != nil {\n\t\tr.Address = o.Address\n\t}\n\n\tif o.Enabled != nil {\n\t\tr.Enabled = o.Enabled\n\t}\n\n\tif o.Namespace != nil {\n\t\tr.Namespace = o.Namespace\n\t}\n\n\tif o.RenewToken != nil {\n\t\tr.RenewToken = o.RenewToken\n\t}\n\n\tif o.Retry != nil {\n\t\tr.Retry = r.Retry.Merge(o.Retry)\n\t}\n\n\tif o.SSL != nil {\n\t\tr.SSL = r.SSL.Merge(o.SSL)\n\t}\n\n\tif o.Token != nil {\n\t\tr.Token = o.Token\n\t}\n\n\tif o.VaultAgentTokenFile != nil {\n\t\tr.VaultAgentTokenFile = o.VaultAgentTokenFile\n\t}\n\n\tif o.Transport != nil {\n\t\tr.Transport = r.Transport.Merge(o.Transport)\n\t}\n\n\tif o.UnwrapToken != nil {\n\t\tr.UnwrapToken = o.UnwrapToken\n\t}\n\n\tif o.DefaultLeaseDuration != nil {\n\t\tr.DefaultLeaseDuration = o.DefaultLeaseDuration\n\t}\n\n\tif o.LeaseRenewalThreshold != nil {\n\t\tr.LeaseRenewalThreshold = o.LeaseRenewalThreshold\n\t}\n\n\tif o.K8SAuthRoleName != nil {\n\t\tr.K8SAuthRoleName = o.K8SAuthRoleName\n\t}\n\n\tif o.K8SServiceAccountToken != nil {\n\t\tr.K8SServiceAccountToken = o.K8SServiceAccountToken\n\t}\n\n\tif o.K8SServiceAccountTokenPath != nil {\n\t\tr.K8SServiceAccountTokenPath = o.K8SServiceAccountTokenPath\n\t}\n\n\tif o.K8SServiceMountPath != nil {\n\t\tr.K8SServiceMountPath = o.K8SServiceMountPath\n\t}\n\n\treturn r\n}", "func NewConfig(configFile string) (*Config, error) {\n\n\tcfg := &Config{\n\t\tHost: \"0.0.0.0\",\n\t\tPort: 8080,\n\t\tAllowEmptyClientSecret: false,\n\t\tScopes: []string{\"openid\", \"profile\", \"email\", \"offline_access\"},\n\t\tUsernameClaim: \"nickname\",\n\t\tEmailClaim: \"\",\n\t\tServeTLS: false,\n\t\tCertFile: \"/etc/gangway/tls/tls.crt\",\n\t\tKeyFile: \"/etc/gangway/tls/tls.key\",\n\t\tClusterCAPath: \"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\",\n\t\tHTTPPath: \"\",\n\t}\n\n\tif configFile != \"\" {\n\t\tdata, err := ioutil.ReadFile(configFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = yaml.Unmarshal([]byte(data), cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr := envconfig.Process(\"gangway\", cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cfg.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check for trailing slash on HTTPPath and remove\n\tcfg.HTTPPath = strings.TrimRight(cfg.HTTPPath, \"/\")\n\n\treturn cfg, nil\n}", "func loadOAuthConfig(oauthConfigFile string) (*oauth.Config, error) {\n\terrFmt := \"failed to read OAuth config file: %s\"\n\tfileContents, err := ioutil.ReadFile(oauthConfigFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errFmt, err)\n\t}\n\tvar decodedJson map[string]struct {\n\t\tAuthURL string `json:\"auth_uri\"`\n\t\tClientId string `json:\"client_id\"`\n\t\tClientSecret string `json:\"client_secret\"`\n\t\tTokenURL string `json:\"token_uri\"`\n\t}\n\tif err := json.Unmarshal(fileContents, &decodedJson); err != nil {\n\t\treturn nil, fmt.Errorf(errFmt, err)\n\t}\n\tconfig, ok := decodedJson[\"web\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(errFmt, err)\n\t}\n\treturn &oauth.Config{\n\t\tClientId: config.ClientId,\n\t\tClientSecret: config.ClientSecret,\n\t\tScope: strings.Join(apiScope, \" \"),\n\t\tAuthURL: config.AuthURL,\n\t\tTokenURL: config.TokenURL,\n\t}, nil\n}", "func GetWotoConfig() *wotoConfiguration {\n\tif _wotoConfig != nil {\n\t\treturn _wotoConfig\n\t}\n\t_w := wotoConfiguration{\n\t\tUIDKeyName: [MaxUIDIndex]string{},\n\t\tLastUID: [MaxUIDIndex]players.UID{},\n\t}\n\tfor i := BaseUIDIndex - UIDIndexOffSet;\n\t\t\t\ti <= MaxUIDIndex - UIDIndexOffSet; i++ {\n\t\t_w.UIDKeyName[i] = UIDIndexSuffix + strconv.Itoa(i + OffSetTokenParts)\n\t\t_w.LastUID[i] = players.GetMinimumUID(uint8(i + OffSetTokenParts))\n\t}\n\treturn &_w\n}", "func (c *VaultConfig) Copy() *VaultConfig {\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tvar o VaultConfig\n\to.Address = c.Address\n\n\to.Enabled = c.Enabled\n\n\to.Namespace = c.Namespace\n\n\to.RenewToken = c.RenewToken\n\n\tif c.Retry != nil {\n\t\to.Retry = c.Retry.Copy()\n\t}\n\n\tif c.SSL != nil {\n\t\to.SSL = c.SSL.Copy()\n\t}\n\n\to.Token = c.Token\n\n\to.VaultAgentTokenFile = c.VaultAgentTokenFile\n\n\tif c.Transport != nil {\n\t\to.Transport = c.Transport.Copy()\n\t}\n\n\to.UnwrapToken = c.UnwrapToken\n\n\to.DefaultLeaseDuration = c.DefaultLeaseDuration\n\to.LeaseRenewalThreshold = c.LeaseRenewalThreshold\n\n\to.K8SAuthRoleName = c.K8SAuthRoleName\n\to.K8SServiceAccountToken = c.K8SServiceAccountToken\n\to.K8SServiceAccountTokenPath = c.K8SServiceAccountTokenPath\n\to.K8SServiceMountPath = c.K8SServiceMountPath\n\n\treturn &o\n}", "func NewAndroidWorkProfileGeneralDeviceConfiguration()(*AndroidWorkProfileGeneralDeviceConfiguration) {\n m := &AndroidWorkProfileGeneralDeviceConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.androidWorkProfileGeneralDeviceConfiguration\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func GetConfiguration() *Configuration {\n\tconfigOnce.Do(func() {\n\t\tviper.SetConfigName(\"config\")\n\t\tviper.SetConfigType(\"yaml\")\n\t\tviper.AddConfigPath(\"./config\")\n\t\terr := viper.ReadInConfig()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"fatal error in reading config file: %s\", err))\n\t\t}\n\n\t\tconfig = &Configuration{\n\t\t\tAddr: viper.GetString(\"server.addr\"),\n\t\t\tMachineKey: viper.GetString(\"server.machineKey\"),\n\t\t\tOAuthServer: &OAuthServer{\n\t\t\t\tOrgURI: viper.GetString(\"oauthServer.orgUri\"),\n\t\t\t\tClientID: viper.GetString(\"oauthServer.clientId\"),\n\t\t\t\tClientSecret: viper.GetString(\"oauthServer.clientSecret\"),\n\t\t\t\tCallback: viper.GetString(\"oauthServer.callback\"),\n\t\t\t\tOAuthCallback: viper.GetString(\"oauthServer.oauthCallback\"),\n\t\t\t\tOAuthPKCECallback: viper.GetString(\"oauthServer.oauthPKCECallback\"),\n\t\t\t\tOAuthImplicitCallback: viper.GetString(\"oauthServer.oauthImplicitCallback\"),\n\t\t\t\tAuthorizeServer: viper.GetString(\"oauthServer.AuthorizeServer\"),\n\t\t\t},\n\t\t}\n\t})\n\treturn config\n}", "func MergeHealthcheckConfig(a, b Healthcheck) Healthcheck {\n\tresult := b\n\tif result.Interval == \"\" {\n\t\tresult.Interval = a.Interval\n\t}\n\tif result.Timeout == \"\" {\n\t\tresult.Timeout = a.Timeout\n\t}\n\tif len(result.Rules) == 0 {\n\t\tresult.Rules = a.Rules\n\t}\n\treturn result\n}", "func (in *PingPongSpec) DeepCopy() *PingPongSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PingPongSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newCanaryConfig(provider config.Provider) (*Config, error) {\n\traw := provider.Get(ConfigurationKey)\n\tvar cfg Config\n\tif err := raw.Populate(&cfg); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load canary configuration with error: %v\", err)\n\t}\n\tif err := cfg.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &cfg, nil\n}", "func GroupsConfigFromStruct(cfg *config.Config) map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"core\": map[string]interface{}{\n\t\t\t\"tracing_enabled\": cfg.Tracing.Enabled,\n\t\t\t\"tracing_exporter\": cfg.Tracing.Type,\n\t\t\t\"tracing_endpoint\": cfg.Tracing.Endpoint,\n\t\t\t\"tracing_collector\": cfg.Tracing.Collector,\n\t\t\t\"tracing_service_name\": cfg.Service.Name,\n\t\t},\n\t\t\"shared\": map[string]interface{}{\n\t\t\t\"jwt_secret\": cfg.TokenManager.JWTSecret,\n\t\t\t\"gatewaysvc\": cfg.Reva.Address,\n\t\t\t\"skip_user_groups_in_token\": cfg.SkipUserGroupsInToken,\n\t\t\t\"grpc_client_options\": cfg.Reva.GetGRPCClientConfig(),\n\t\t},\n\t\t\"grpc\": map[string]interface{}{\n\t\t\t\"network\": cfg.GRPC.Protocol,\n\t\t\t\"address\": cfg.GRPC.Addr,\n\t\t\t\"tls_settings\": map[string]interface{}{\n\t\t\t\t\"enabled\": cfg.GRPC.TLS.Enabled,\n\t\t\t\t\"certificate\": cfg.GRPC.TLS.Cert,\n\t\t\t\t\"key\": cfg.GRPC.TLS.Key,\n\t\t\t},\n\t\t\t// TODO build services dynamically\n\t\t\t\"services\": map[string]interface{}{\n\t\t\t\t\"groupprovider\": map[string]interface{}{\n\t\t\t\t\t\"driver\": cfg.Driver,\n\t\t\t\t\t\"drivers\": map[string]interface{}{\n\t\t\t\t\t\t\"json\": map[string]interface{}{\n\t\t\t\t\t\t\t\"groups\": cfg.Drivers.JSON.File,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"ldap\": ldapConfigFromString(cfg.Drivers.LDAP),\n\t\t\t\t\t\t\"rest\": map[string]interface{}{\n\t\t\t\t\t\t\t\"client_id\": cfg.Drivers.REST.ClientID,\n\t\t\t\t\t\t\t\"client_secret\": cfg.Drivers.REST.ClientSecret,\n\t\t\t\t\t\t\t\"redis_address\": cfg.Drivers.REST.RedisAddr,\n\t\t\t\t\t\t\t\"redis_username\": cfg.Drivers.REST.RedisUsername,\n\t\t\t\t\t\t\t\"redis_password\": cfg.Drivers.REST.RedisPassword,\n\t\t\t\t\t\t\t\"id_provider\": cfg.Drivers.REST.IDProvider,\n\t\t\t\t\t\t\t\"api_base_url\": cfg.Drivers.REST.APIBaseURL,\n\t\t\t\t\t\t\t\"oidc_token_endpoint\": cfg.Drivers.REST.OIDCTokenEndpoint,\n\t\t\t\t\t\t\t\"target_api\": cfg.Drivers.REST.TargetAPI,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"interceptors\": map[string]interface{}{\n\t\t\t\t\"prometheus\": map[string]interface{}{\n\t\t\t\t\t\"namespace\": \"ocis\",\n\t\t\t\t\t\"subsystem\": \"groups\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func NewAndroidCustomConfiguration()(*AndroidCustomConfiguration) {\n m := &AndroidCustomConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.androidCustomConfiguration\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func (c *Config) ToStructDeep(pointer interface{}, mapping ...map[string]string) error {\n\tif j := c.getJson(); j != nil {\n\t\treturn j.ToStructDeep(pointer, mapping...)\n\t}\n\treturn errors.New(\"configuration not found\")\n}", "func (old ContainerConfig) Copy() ContainerConfig {\n\t// Copy all fields\n\tres := old\n\n\t// Make deep copy of slices\n\t// none yet - placeholder\n\n\treturn res\n}", "func (*OpenconfigPlatform_Components_Component_Properties_Property_Config) IsYANGGoStruct() {}", "func (c Configuration) Clone() Configuration {\n\treturn Configuration{\n\t\tEDATool: c.EDATool,\n\t\tInputFile: c.InputFile,\n\t\tOutputFile: c.OutputFile,\n\t\tLastUpdated: c.LastUpdated,\n\t}\n}", "func getWriterConfig() *hub.EventHubConfig {\n\treturn &hub.EventHubConfig{\n\t\tNamespace: viper.GetString(\"write_namespace\"),\n\t\tHub: viper.GetString(\"write_hub\"),\n\t\tKeyName: viper.GetString(\"write_keyname\"),\n\t\tKeyValue: viper.GetString(\"write_keyvalue\"),\n\t\tConnString: viper.GetString(\"write_connstring\"),\n\t\tTenantID: viper.GetString(\"write_tenantid\"),\n\t\tClientID: viper.GetString(\"write_clientid\"),\n\t\tClientSecret: viper.GetString(\"write_clientsecret\"),\n\t\tCertPath: viper.GetString(\"write_certpath\"),\n\t\tCertPassword: viper.GetString(\"write_certpassword\"),\n\t\tBatch: viper.GetBool(\"write_batch\"),\n\t\tPartKeyLabel: viper.GetString(\"partition_key_label\"),\n\t\tADXMapping: viper.GetString(\"write_adxmapping\"),\n\t\tSerializer: serializers.SerializerConfig{DataFormat: viper.GetString(\"write_serializer\")},\n\t}\n}", "func ToInternal(in *Config, old *api.Config, setVersionFields bool) (*api.Config, error) {\n\tc := &api.Config{}\n\tif old != nil {\n\t\tc = old.DeepCopy()\n\t}\n\n\t// setting c.PluginVersion = in.PluginVersion is done up-front by the plugin\n\t// code. It could be done here as well (gated by setVersionFields) but\n\t// would/should be a no-op. To simplify the logic, we don't do it.\n\n\tc.ComponentLogLevel.APIServer = &in.ComponentLogLevel.APIServer\n\tc.ComponentLogLevel.ControllerManager = &in.ComponentLogLevel.ControllerManager\n\tc.ComponentLogLevel.Node = &in.ComponentLogLevel.Node\n\n\tc.SecurityPatchPackages = in.SecurityPatchPackages\n\tc.SSHSourceAddressPrefixes = in.SSHSourceAddressPrefixes\n\n\tif setVersionFields {\n\t\tinVersion, found := in.Versions[c.PluginVersion]\n\t\tif !found {\n\t\t\treturn nil, fmt.Errorf(\"version %q not found\", c.PluginVersion)\n\t\t}\n\n\t\t// Generic offering configurables\n\t\tc.ImageOffer = inVersion.ImageOffer\n\t\tc.ImagePublisher = inVersion.ImagePublisher\n\t\tc.ImageSKU = inVersion.ImageSKU\n\t\tc.ImageVersion = inVersion.ImageVersion\n\n\t\t// Container images configuration\n\t\tc.Images.AlertManager = inVersion.Images.AlertManager\n\t\tc.Images.AnsibleServiceBroker = inVersion.Images.AnsibleServiceBroker\n\t\tc.Images.ClusterMonitoringOperator = inVersion.Images.ClusterMonitoringOperator\n\t\tc.Images.ConfigReloader = inVersion.Images.ConfigReloader\n\t\tc.Images.Console = inVersion.Images.Console\n\t\tc.Images.ControlPlane = inVersion.Images.ControlPlane\n\t\tc.Images.Grafana = inVersion.Images.Grafana\n\t\tc.Images.KubeRbacProxy = inVersion.Images.KubeRbacProxy\n\t\tc.Images.KubeStateMetrics = inVersion.Images.KubeStateMetrics\n\t\tc.Images.Node = inVersion.Images.Node\n\t\tc.Images.NodeExporter = inVersion.Images.NodeExporter\n\t\tc.Images.OAuthProxy = inVersion.Images.OAuthProxy\n\t\tc.Images.Prometheus = inVersion.Images.Prometheus\n\t\tc.Images.PrometheusConfigReloader = inVersion.Images.PrometheusConfigReloader\n\t\tc.Images.PrometheusOperator = inVersion.Images.PrometheusOperator\n\t\tc.Images.Registry = inVersion.Images.Registry\n\t\tc.Images.RegistryConsole = inVersion.Images.RegistryConsole\n\t\tc.Images.Router = inVersion.Images.Router\n\t\tc.Images.ServiceCatalog = inVersion.Images.ServiceCatalog\n\t\tc.Images.TemplateServiceBroker = inVersion.Images.TemplateServiceBroker\n\t\tc.Images.WebConsole = inVersion.Images.WebConsole\n\n\t\tc.Images.Format = inVersion.Images.Format\n\n\t\tc.Images.Httpd = inVersion.Images.Httpd\n\t\tc.Images.MasterEtcd = inVersion.Images.MasterEtcd\n\n\t\tc.Images.GenevaLogging = inVersion.Images.GenevaLogging\n\t\tc.Images.GenevaStatsd = inVersion.Images.GenevaStatsd\n\t\tc.Images.GenevaTDAgent = inVersion.Images.GenevaTDAgent\n\n\t\tc.Images.AzureControllers = inVersion.Images.AzureControllers\n\t\tc.Images.Canary = inVersion.Images.Canary\n\t\tc.Images.AroAdmissionController = inVersion.Images.AroAdmissionController\n\t\tc.Images.EtcdBackup = inVersion.Images.EtcdBackup\n\t\tc.Images.MetricsBridge = inVersion.Images.MetricsBridge\n\t\tc.Images.Startup = inVersion.Images.Startup\n\t\tc.Images.Sync = inVersion.Images.Sync\n\t\tc.Images.TLSProxy = inVersion.Images.TLSProxy\n\n\t\tc.Images.LogAnalyticsAgent = inVersion.Images.LogAnalyticsAgent\n\t\tc.Images.MetricsServer = inVersion.Images.MetricsServer\n\t}\n\n\t// use setVersionFields to override the secrets below otherwise\n\t// they become un-updatable..\n\tif c.Certificates.GenevaLogging.Key == nil || setVersionFields {\n\t\tc.Certificates.GenevaLogging.Key = in.Certificates.GenevaLogging.Key\n\t}\n\tif c.Certificates.GenevaLogging.Cert == nil || setVersionFields {\n\t\tc.Certificates.GenevaLogging.Cert = in.Certificates.GenevaLogging.Cert\n\t}\n\tif c.Certificates.GenevaMetrics.Key == nil || setVersionFields {\n\t\tc.Certificates.GenevaMetrics.Key = in.Certificates.GenevaMetrics.Key\n\t}\n\tif c.Certificates.GenevaMetrics.Cert == nil || setVersionFields {\n\t\tc.Certificates.GenevaMetrics.Cert = in.Certificates.GenevaMetrics.Cert\n\t}\n\tif c.Certificates.PackageRepository.Key == nil || setVersionFields {\n\t\tc.Certificates.PackageRepository.Key = in.Certificates.PackageRepository.Key\n\t}\n\tif c.Certificates.PackageRepository.Cert == nil || setVersionFields {\n\t\tc.Certificates.PackageRepository.Cert = in.Certificates.PackageRepository.Cert\n\t}\n\n\t// Geneva integration configurables\n\tif c.GenevaLoggingSector == \"\" {\n\t\tc.GenevaLoggingSector = in.GenevaLoggingSector\n\t}\n\tif c.GenevaLoggingAccount == \"\" {\n\t\tc.GenevaLoggingAccount = in.GenevaLoggingAccount\n\t}\n\tif c.GenevaLoggingNamespace == \"\" {\n\t\tc.GenevaLoggingNamespace = in.GenevaLoggingNamespace\n\t}\n\tif c.GenevaLoggingControlPlaneAccount == \"\" {\n\t\tc.GenevaLoggingControlPlaneAccount = in.GenevaLoggingControlPlaneAccount\n\t}\n\tif c.GenevaLoggingControlPlaneEnvironment == \"\" {\n\t\tc.GenevaLoggingControlPlaneEnvironment = in.GenevaLoggingControlPlaneEnvironment\n\t}\n\tif c.GenevaLoggingControlPlaneRegion == \"\" {\n\t\tc.GenevaLoggingControlPlaneRegion = in.GenevaLoggingControlPlaneRegion\n\t}\n\tif c.GenevaMetricsAccount == \"\" {\n\t\tc.GenevaMetricsAccount = in.GenevaMetricsAccount\n\t}\n\tif c.GenevaMetricsEndpoint == \"\" {\n\t\tc.GenevaMetricsEndpoint = in.GenevaMetricsEndpoint\n\t}\n\n\tif c.Images.ImagePullSecret == nil || setVersionFields {\n\t\tc.Images.ImagePullSecret = in.ImagePullSecret\n\t}\n\tif c.Images.GenevaImagePullSecret == nil || setVersionFields {\n\t\tc.Images.GenevaImagePullSecret = in.GenevaImagePullSecret\n\t}\n\n\treturn c, nil\n}", "func NewOauth2(in *yaml.Node, context *compiler.Context) (*Oauth2, error) {\n\terrors := make([]error, 0)\n\tx := &Oauth2{}\n\tm, ok := compiler.UnpackMap(in)\n\tif !ok {\n\t\tmessage := fmt.Sprintf(\"has unexpected value: %+v (%T)\", in, in)\n\t\terrors = append(errors, compiler.NewError(context, message))\n\t} else {\n\t\tallowedKeys := []string{\"scopes\"}\n\t\tvar allowedPatterns []*regexp.Regexp\n\t\tinvalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)\n\t\tif len(invalidKeys) > 0 {\n\t\t\tmessage := fmt.Sprintf(\"has invalid %s: %+v\", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, \", \"))\n\t\t\terrors = append(errors, compiler.NewError(context, message))\n\t\t}\n\t\t// Scopes scopes = 1;\n\t\tv1 := compiler.MapValueForKey(m, \"scopes\")\n\t\tif v1 != nil {\n\t\t\tvar err error\n\t\t\tx.Scopes, err = NewScopes(v1, compiler.NewContext(\"scopes\", v1, context))\n\t\t\tif err != nil {\n\t\t\t\terrors = append(errors, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn x, compiler.NewErrorGroupOrNil(errors)\n}", "func OverwriteWebhooks(current, desired client.Object) error {\n\tswitch config := current.(type) {\n\tcase *admissionregistrationv1.MutatingWebhookConfiguration:\n\t\td := desired.(*admissionregistrationv1.MutatingWebhookConfiguration)\n\t\tconfig.Webhooks = d.DeepCopy().Webhooks\n\tcase *admissionregistrationv1.ValidatingWebhookConfiguration:\n\t\td := desired.(*admissionregistrationv1.ValidatingWebhookConfiguration)\n\t\tconfig.Webhooks = d.DeepCopy().Webhooks\n\tcase *admissionregistrationv1beta1.MutatingWebhookConfiguration:\n\t\td := desired.(*admissionregistrationv1beta1.MutatingWebhookConfiguration)\n\t\tconfig.Webhooks = d.DeepCopy().Webhooks\n\tcase *admissionregistrationv1beta1.ValidatingWebhookConfiguration:\n\t\td := desired.(*admissionregistrationv1beta1.ValidatingWebhookConfiguration)\n\t\tconfig.Webhooks = d.DeepCopy().Webhooks\n\tdefault:\n\t\treturn fmt.Errorf(\"unexpected webhook config type: %T\", current)\n\t}\n\n\treturn nil\n}", "func (w *wireguardServerConfig) Get(name string) (*api.WireguardServerConfig, error) {\n\tkey := path.Join(w.prefix, name)\n\tdata, err := w.store.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif data == nil {\n\t\treturn nil, nil\n\t}\n\tvar obj api.WireguardServerConfig\n\treturn &obj, json.Unmarshal(data, &obj)\n}", "func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {\n\tmergedConfig := &restclient.Config{}\n\n\t// configClusterInfo holds the information identify the server provided by .kubeconfig\n\tconfigClientConfig := &restclient.Config{}\n\tconfigClientConfig.CAFile = configClusterInfo.CertificateAuthority\n\tconfigClientConfig.CAData = configClusterInfo.CertificateAuthorityData\n\tconfigClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify\n\tconfigClientConfig.ServerName = configClusterInfo.TLSServerName\n\tmergo.Merge(mergedConfig, configClientConfig, mergo.WithOverride)\n\n\treturn mergedConfig, nil\n}", "func UnmarshalConfig(data []byte) (*cb.Config, error) {\n\tconfig := &cb.Config{}\n\terr := proto.Unmarshal(data, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn config, nil\n}", "func copyTLSConfig(cfg *tls.Config) *tls.Config {\n\treturn &tls.Config{\n\t\tCertificates: cfg.Certificates,\n\t\tCipherSuites: cfg.CipherSuites,\n\t\tClientAuth: cfg.ClientAuth,\n\t\tClientCAs: cfg.ClientCAs,\n\t\tClientSessionCache: cfg.ClientSessionCache,\n\t\tCurvePreferences: cfg.CurvePreferences,\n\t\tInsecureSkipVerify: cfg.InsecureSkipVerify,\n\t\tMaxVersion: cfg.MaxVersion,\n\t\tMinVersion: cfg.MinVersion,\n\t\tNextProtos: cfg.NextProtos,\n\t\tPreferServerCipherSuites: cfg.PreferServerCipherSuites,\n\t\tRand: cfg.Rand,\n\t\tRootCAs: cfg.RootCAs,\n\t\tServerName: cfg.ServerName,\n\t\tSessionTicketsDisabled: cfg.SessionTicketsDisabled,\n\t}\n}", "func (*OpenconfigOfficeAp_System_Aaa_Authentication_Config) IsYANGGoStruct() {}", "func (fOpenCfg *FileOpenConfig) CopyOut() FileOpenConfig {\n\n if fOpenCfg.fileOpenModes == nil {\n fOpenCfg.fileOpenModes = make([]FileOpenMode, 0)\n }\n\n fOpStat2 := FileOpenConfig{}\n fOpStat2.isInitialized = fOpenCfg.isInitialized\n fOpStat2.fileOpenType = fOpenCfg.fileOpenType\n lenFOpenModes := len(fOpenCfg.fileOpenModes)\n\n if lenFOpenModes == 0 {\n fOpenCfg.fileOpenModes = append(fOpenCfg.fileOpenModes, FOpenMode.ModeNone())\n lenFOpenModes = 1\n }\n\n fOpStat2.fileOpenModes = make([]FileOpenMode, lenFOpenModes)\n\n for i := 0; i < lenFOpenModes; i++ {\n fOpStat2.fileOpenModes[i] = fOpenCfg.fileOpenModes[i]\n }\n\n return fOpStat2\n}", "func Get() *Config {\n\tif cfg != nil {\n\t\treturn cfg\n\t}\n\n\tcfg = &Config{\n\t\tConsumerGroupName: \"stream-company-profile-consumer-group\",\n\t\tStreamCompanyProfileTopic: \"stream-company-profile\",\n\t\tZookeeperChroot: \"\",\n\t\tInitialOffset: int64(-1),\n\t}\n\n\terr := gofigure.Gofigure(cfg)\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tos.Exit(1)\n\t}\n\n\treturn cfg\n}", "func NewApplicationConfig() *AppSettings {\r\n\tconf := AppSettings{}\r\n\r\n\tp := os.Getenv(\"REPORT_DB_PORT\")\r\n\tport, err := strconv.Atoi(p)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tconf.Database = ServerDetail{\r\n\t\tEndpoint: os.Getenv(\"REPORT_DB_HOST\"),\r\n\t\tPort: port,\r\n\t\tUsername: os.Getenv(\"REPORT_DB_USER\"),\r\n\t\tPassword: os.Getenv(\"REPORT_DB_PASSWORD\"),\r\n\t\tName: os.Getenv(\"REPORT_DB_NAME\"),\r\n\t}\r\n\r\n\tb := os.Getenv(\"REPORT_OAUTH2_VERIFY_SSL\")\r\n\tverify := false\r\n\tif b == \"true\" || b == \"t\" || b == \"\" {\r\n\t\tverify = true\r\n\t}\r\n\r\n\tconf.OAuth2 = Detail{\r\n\t\tEndpoint: os.Getenv(\"REPORT_OAUTH2_BACKEND_URI\"),\r\n\t\tEndpointWeb: os.Getenv(\"REPORT_OAUTH2_WEB_URI\"),\r\n\t\tUsername: os.Getenv(\"REPORT_CLIENT_ID\"),\r\n\t\tPassword: os.Getenv(\"REPORT_CLIENT_SECRET\"),\r\n\t\tScope: os.Getenv(\"REPORT_OAUTH2_SCOPE\"),\r\n\t\tRedirectURI: os.Getenv(\"REPORT_REDIRECT_URI\"),\r\n\t\tName: os.Getenv(\"REPORT_OAUTH2_SERVICE_NAME\"),\r\n\t\tSSLVerify: verify,\r\n\t}\r\n\r\n\tconf.Admin = Admin{\r\n\t\tAPIKey: os.Getenv(\"REPORT_OAUTH2_API\"),\r\n\t}\r\n\r\n\tbo := os.Getenv(\"REPORT_BILLPAY_VERIFY_SSL\")\r\n\tverify2 := false\r\n\tif bo == \"true\" || bo == \"t\" || bo == \"\" {\r\n\t\tverify2 = true\r\n\t}\r\n\r\n\tconf.Billpay = Billpay{\r\n\t\tEndpoint: os.Getenv(\"REPORT_BILLPAY_URI\"),\r\n\t\tAPIKey: os.Getenv(\"REPORT_BILLPAY_APIKEY\"),\r\n\t\tCACert: os.Getenv(\"REPORT_BILLPAY_CACERT\"),\r\n\t\tSSLVerify: verify2,\r\n\t}\r\n\r\n\t// Log\r\n\tconf.Log = Log{\r\n\t\tLevel: os.Getenv(\"REPORT_LOG_LEVEL\"),\r\n\t\tFormat: os.Getenv(\"REPORT_LOG_FORMAT\"),\r\n\t}\r\n\r\n\treturn &conf\r\n}", "func newConfigV101() *configV1 {\n\tconf := new(configV1)\n\tconf.Version = mcCurrentConfigVersion\n\t// make sure to allocate map's otherwise Golang\n\t// exits silently without providing any errors\n\tconf.Hosts = make(map[string]*hostConfig)\n\tconf.Aliases = make(map[string]string)\n\n\tlocalHostConfig := new(hostConfig)\n\tlocalHostConfig.AccessKeyID = \"\"\n\tlocalHostConfig.SecretAccessKey = \"\"\n\n\ts3HostConf := new(hostConfig)\n\ts3HostConf.AccessKeyID = globalAccessKeyID\n\ts3HostConf.SecretAccessKey = globalSecretAccessKey\n\n\t// Your example host config\n\texampleHostConf := new(hostConfig)\n\texampleHostConf.AccessKeyID = globalAccessKeyID\n\texampleHostConf.SecretAccessKey = globalSecretAccessKey\n\n\tplayHostConfig := new(hostConfig)\n\tplayHostConfig.AccessKeyID = \"\"\n\tplayHostConfig.SecretAccessKey = \"\"\n\n\tdlHostConfig := new(hostConfig)\n\tdlHostConfig.AccessKeyID = \"\"\n\tdlHostConfig.SecretAccessKey = \"\"\n\n\tconf.Hosts[exampleHostURL] = exampleHostConf\n\tconf.Hosts[\"localhost:*\"] = localHostConfig\n\tconf.Hosts[\"127.0.0.1:*\"] = localHostConfig\n\tconf.Hosts[\"s3*.amazonaws.com\"] = s3HostConf\n\tconf.Hosts[\"play.minio.io:9000\"] = playHostConfig\n\tconf.Hosts[\"dl.minio.io:9000\"] = dlHostConfig\n\n\taliases := make(map[string]string)\n\taliases[\"s3\"] = \"https://s3.amazonaws.com\"\n\taliases[\"play\"] = \"https://play.minio.io:9000\"\n\taliases[\"dl\"] = \"https://dl.minio.io:9000\"\n\taliases[\"localhost\"] = \"http://localhost:9000\"\n\tconf.Aliases = aliases\n\n\treturn conf\n}", "func GetConfig() *config {\n\tif instance == nil {\n\t\tinstance = new(config)\n\t\tinstance.SubmitSMResponseTimeLow = 0\n\t\tinstance.SubmitSMResponseTimeHigh = 0\n\t\tinstance.SubmitSMWindowMax = 100\n\t\tinstance.DeliverSMWindowMax = 99\n\t\tinstance.APIPort = 8090\n\t\tinstance.UcpPort = 8080\n\t\tinstance.MaxTPS = 100\n\n\t\treadConfig()\n\t}\n\treturn instance\n}" ]
[ "0.66872096", "0.6104361", "0.55187887", "0.5312633", "0.5312633", "0.5270062", "0.51149684", "0.491319", "0.49072093", "0.48406395", "0.47211596", "0.46793032", "0.46748686", "0.4630237", "0.46169022", "0.46119437", "0.4608377", "0.45484298", "0.45417875", "0.4538724", "0.45174056", "0.45113668", "0.45048836", "0.45031476", "0.4500385", "0.4445933", "0.44212434", "0.44212434", "0.44183287", "0.44044167", "0.4400778", "0.4386746", "0.43860784", "0.43715766", "0.43657407", "0.4333492", "0.4328913", "0.4326927", "0.4322333", "0.4320102", "0.43195167", "0.43181777", "0.4314637", "0.43105856", "0.428308", "0.42699596", "0.42576325", "0.4243722", "0.4216871", "0.42165124", "0.4188535", "0.41830266", "0.41811043", "0.417752", "0.4165489", "0.4162579", "0.4158983", "0.4154159", "0.41532144", "0.41373053", "0.41302478", "0.41296056", "0.41267794", "0.41210657", "0.41184294", "0.4113559", "0.4109803", "0.41055724", "0.4092658", "0.40822944", "0.40807784", "0.4077828", "0.4069653", "0.4067671", "0.4064043", "0.40631214", "0.40600836", "0.40596575", "0.4058689", "0.4058216", "0.4050087", "0.40465233", "0.40363234", "0.40353262", "0.40285963", "0.40229863", "0.40218794", "0.4018305", "0.4015143", "0.40150303", "0.4013169", "0.4011445", "0.40090472", "0.40088183", "0.40053722", "0.40046036", "0.40035903", "0.39971247", "0.39879796", "0.3985289" ]
0.7560912
0
Copy the values of the OpenIDConnectWellKnownConfiguration and return it as a new struct.
func (opts OpenIDConnectWellKnownConfiguration) Copy() (optsCopy OpenIDConnectWellKnownConfiguration) { optsCopy = OpenIDConnectWellKnownConfiguration{ OAuth2WellKnownConfiguration: opts.OAuth2WellKnownConfiguration.Copy(), OpenIDConnectDiscoveryOptions: opts.OpenIDConnectDiscoveryOptions, } if opts.OpenIDConnectFrontChannelLogoutDiscoveryOptions != nil { optsCopy.OpenIDConnectFrontChannelLogoutDiscoveryOptions = &OpenIDConnectFrontChannelLogoutDiscoveryOptions{} *optsCopy.OpenIDConnectFrontChannelLogoutDiscoveryOptions = *opts.OpenIDConnectFrontChannelLogoutDiscoveryOptions } if opts.OpenIDConnectBackChannelLogoutDiscoveryOptions != nil { optsCopy.OpenIDConnectBackChannelLogoutDiscoveryOptions = &OpenIDConnectBackChannelLogoutDiscoveryOptions{} *optsCopy.OpenIDConnectBackChannelLogoutDiscoveryOptions = *opts.OpenIDConnectBackChannelLogoutDiscoveryOptions } if opts.OpenIDConnectSessionManagementDiscoveryOptions != nil { optsCopy.OpenIDConnectSessionManagementDiscoveryOptions = &OpenIDConnectSessionManagementDiscoveryOptions{} *optsCopy.OpenIDConnectSessionManagementDiscoveryOptions = *opts.OpenIDConnectSessionManagementDiscoveryOptions } if opts.OpenIDConnectRPInitiatedLogoutDiscoveryOptions != nil { optsCopy.OpenIDConnectRPInitiatedLogoutDiscoveryOptions = &OpenIDConnectRPInitiatedLogoutDiscoveryOptions{} *optsCopy.OpenIDConnectRPInitiatedLogoutDiscoveryOptions = *opts.OpenIDConnectRPInitiatedLogoutDiscoveryOptions } if opts.OpenIDConnectPromptCreateDiscoveryOptions != nil { optsCopy.OpenIDConnectPromptCreateDiscoveryOptions = &OpenIDConnectPromptCreateDiscoveryOptions{} *optsCopy.OpenIDConnectPromptCreateDiscoveryOptions = *opts.OpenIDConnectPromptCreateDiscoveryOptions } if opts.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions != nil { optsCopy.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions = &OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions{} *optsCopy.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions = *opts.OpenIDConnectClientInitiatedBackChannelAuthFlowDiscoveryOptions } if opts.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions != nil { optsCopy.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions = &OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions{} *optsCopy.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions = *opts.OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions } if opts.OpenIDFederationDiscoveryOptions != nil { optsCopy.OpenIDFederationDiscoveryOptions = &OpenIDFederationDiscoveryOptions{} *optsCopy.OpenIDFederationDiscoveryOptions = *opts.OpenIDFederationDiscoveryOptions } return optsCopy }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewOpenIDConnectWellKnownConfiguration(c *schema.IdentityProvidersOpenIDConnect) (config OpenIDConnectWellKnownConfiguration) {\n\tconfig = OpenIDConnectWellKnownConfiguration{\n\t\tOAuth2WellKnownConfiguration: OAuth2WellKnownConfiguration{\n\t\t\tCommonDiscoveryOptions: CommonDiscoveryOptions{\n\t\t\t\tSubjectTypesSupported: []string{\n\t\t\t\t\tSubjectTypePublic,\n\t\t\t\t\tSubjectTypePairwise,\n\t\t\t\t},\n\t\t\t\tResponseTypesSupported: []string{\n\t\t\t\t\tResponseTypeAuthorizationCodeFlow,\n\t\t\t\t\tResponseTypeImplicitFlowIDToken,\n\t\t\t\t\tResponseTypeImplicitFlowToken,\n\t\t\t\t\tResponseTypeImplicitFlowBoth,\n\t\t\t\t\tResponseTypeHybridFlowIDToken,\n\t\t\t\t\tResponseTypeHybridFlowToken,\n\t\t\t\t\tResponseTypeHybridFlowBoth,\n\t\t\t\t},\n\t\t\t\tGrantTypesSupported: []string{\n\t\t\t\t\tGrantTypeAuthorizationCode,\n\t\t\t\t\tGrantTypeImplicit,\n\t\t\t\t\tGrantTypeClientCredentials,\n\t\t\t\t\tGrantTypeRefreshToken,\n\t\t\t\t},\n\t\t\t\tResponseModesSupported: []string{\n\t\t\t\t\tResponseModeFormPost,\n\t\t\t\t\tResponseModeQuery,\n\t\t\t\t\tResponseModeFragment,\n\t\t\t\t\tResponseModeJWT,\n\t\t\t\t\tResponseModeFormPostJWT,\n\t\t\t\t\tResponseModeQueryJWT,\n\t\t\t\t\tResponseModeFragmentJWT,\n\t\t\t\t},\n\t\t\t\tScopesSupported: []string{\n\t\t\t\t\tScopeOfflineAccess,\n\t\t\t\t\tScopeOpenID,\n\t\t\t\t\tScopeProfile,\n\t\t\t\t\tScopeGroups,\n\t\t\t\t\tScopeEmail,\n\t\t\t\t},\n\t\t\t\tClaimsSupported: []string{\n\t\t\t\t\tClaimAuthenticationMethodsReference,\n\t\t\t\t\tClaimAudience,\n\t\t\t\t\tClaimAuthorizedParty,\n\t\t\t\t\tClaimClientIdentifier,\n\t\t\t\t\tClaimExpirationTime,\n\t\t\t\t\tClaimIssuedAt,\n\t\t\t\t\tClaimIssuer,\n\t\t\t\t\tClaimJWTID,\n\t\t\t\t\tClaimRequestedAt,\n\t\t\t\t\tClaimSubject,\n\t\t\t\t\tClaimAuthenticationTime,\n\t\t\t\t\tClaimNonce,\n\t\t\t\t\tClaimPreferredEmail,\n\t\t\t\t\tClaimEmailVerified,\n\t\t\t\t\tClaimEmailAlts,\n\t\t\t\t\tClaimGroups,\n\t\t\t\t\tClaimPreferredUsername,\n\t\t\t\t\tClaimFullName,\n\t\t\t\t},\n\t\t\t\tTokenEndpointAuthMethodsSupported: []string{\n\t\t\t\t\tClientAuthMethodClientSecretBasic,\n\t\t\t\t\tClientAuthMethodClientSecretPost,\n\t\t\t\t\tClientAuthMethodClientSecretJWT,\n\t\t\t\t\tClientAuthMethodPrivateKeyJWT,\n\t\t\t\t\tClientAuthMethodNone,\n\t\t\t\t},\n\t\t\t\tTokenEndpointAuthSigningAlgValuesSupported: []string{\n\t\t\t\t\tSigningAlgHMACUsingSHA256,\n\t\t\t\t\tSigningAlgHMACUsingSHA384,\n\t\t\t\t\tSigningAlgHMACUsingSHA512,\n\t\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\t\tSigningAlgRSAUsingSHA384,\n\t\t\t\t\tSigningAlgRSAUsingSHA512,\n\t\t\t\t\tSigningAlgECDSAUsingP256AndSHA256,\n\t\t\t\t\tSigningAlgECDSAUsingP384AndSHA384,\n\t\t\t\t\tSigningAlgECDSAUsingP521AndSHA512,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA256,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA384,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA512,\n\t\t\t\t},\n\t\t\t},\n\t\t\tOAuth2DiscoveryOptions: OAuth2DiscoveryOptions{\n\t\t\t\tCodeChallengeMethodsSupported: []string{\n\t\t\t\t\tPKCEChallengeMethodSHA256,\n\t\t\t\t},\n\t\t\t\tRevocationEndpointAuthMethodsSupported: []string{\n\t\t\t\t\tClientAuthMethodClientSecretBasic,\n\t\t\t\t\tClientAuthMethodClientSecretPost,\n\t\t\t\t\tClientAuthMethodClientSecretJWT,\n\t\t\t\t\tClientAuthMethodPrivateKeyJWT,\n\t\t\t\t\tClientAuthMethodNone,\n\t\t\t\t},\n\t\t\t\tRevocationEndpointAuthSigningAlgValuesSupported: []string{\n\t\t\t\t\tSigningAlgHMACUsingSHA256,\n\t\t\t\t\tSigningAlgHMACUsingSHA384,\n\t\t\t\t\tSigningAlgHMACUsingSHA512,\n\t\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\t\tSigningAlgRSAUsingSHA384,\n\t\t\t\t\tSigningAlgRSAUsingSHA512,\n\t\t\t\t\tSigningAlgECDSAUsingP256AndSHA256,\n\t\t\t\t\tSigningAlgECDSAUsingP384AndSHA384,\n\t\t\t\t\tSigningAlgECDSAUsingP521AndSHA512,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA256,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA384,\n\t\t\t\t\tSigningAlgRSAPSSUsingSHA512,\n\t\t\t\t},\n\t\t\t\tIntrospectionEndpointAuthMethodsSupported: []string{\n\t\t\t\t\tClientAuthMethodClientSecretBasic,\n\t\t\t\t\tClientAuthMethodNone,\n\t\t\t\t},\n\t\t\t},\n\t\t\tOAuth2JWTIntrospectionResponseDiscoveryOptions: &OAuth2JWTIntrospectionResponseDiscoveryOptions{\n\t\t\t\tIntrospectionSigningAlgValuesSupported: []string{\n\t\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\t\tSigningAlgNone,\n\t\t\t\t},\n\t\t\t},\n\t\t\tOAuth2PushedAuthorizationDiscoveryOptions: &OAuth2PushedAuthorizationDiscoveryOptions{\n\t\t\t\tRequirePushedAuthorizationRequests: c.PAR.Enforce,\n\t\t\t},\n\t\t\tOAuth2IssuerIdentificationDiscoveryOptions: &OAuth2IssuerIdentificationDiscoveryOptions{\n\t\t\t\tAuthorizationResponseIssuerParameterSupported: true,\n\t\t\t},\n\t\t},\n\n\t\tOpenIDConnectDiscoveryOptions: OpenIDConnectDiscoveryOptions{\n\t\t\tIDTokenSigningAlgValuesSupported: []string{\n\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\tSigningAlgNone,\n\t\t\t},\n\t\t\tUserinfoSigningAlgValuesSupported: []string{\n\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\tSigningAlgNone,\n\t\t\t},\n\t\t\tRequestObjectSigningAlgValuesSupported: []string{\n\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t\tSigningAlgRSAUsingSHA384,\n\t\t\t\tSigningAlgRSAUsingSHA512,\n\t\t\t\tSigningAlgECDSAUsingP256AndSHA256,\n\t\t\t\tSigningAlgECDSAUsingP384AndSHA384,\n\t\t\t\tSigningAlgECDSAUsingP521AndSHA512,\n\t\t\t\tSigningAlgRSAPSSUsingSHA256,\n\t\t\t\tSigningAlgRSAPSSUsingSHA384,\n\t\t\t\tSigningAlgRSAPSSUsingSHA512,\n\t\t\t\tSigningAlgNone,\n\t\t\t},\n\t\t},\n\t\tOpenIDConnectPromptCreateDiscoveryOptions: &OpenIDConnectPromptCreateDiscoveryOptions{\n\t\t\tPromptValuesSupported: []string{\n\t\t\t\tPromptNone,\n\t\t\t\tPromptConsent,\n\t\t\t},\n\t\t},\n\t\tOpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions: &OpenIDConnectJWTSecuredAuthorizationResponseModeDiscoveryOptions{\n\t\t\tAuthorizationSigningAlgValuesSupported: []string{\n\t\t\t\tSigningAlgRSAUsingSHA256,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, alg := range c.Discovery.ResponseObjectSigningAlgs {\n\t\tif !utils.IsStringInSlice(alg, config.IDTokenSigningAlgValuesSupported) {\n\t\t\tconfig.IDTokenSigningAlgValuesSupported = append(config.IDTokenSigningAlgValuesSupported, alg)\n\t\t}\n\n\t\tif !utils.IsStringInSlice(alg, config.UserinfoSigningAlgValuesSupported) {\n\t\t\tconfig.UserinfoSigningAlgValuesSupported = append(config.UserinfoSigningAlgValuesSupported, alg)\n\t\t}\n\n\t\tif !utils.IsStringInSlice(alg, config.IntrospectionSigningAlgValuesSupported) {\n\t\t\tconfig.IntrospectionSigningAlgValuesSupported = append(config.IntrospectionSigningAlgValuesSupported, alg)\n\t\t}\n\n\t\tif !utils.IsStringInSlice(alg, config.AuthorizationSigningAlgValuesSupported) {\n\t\t\tconfig.AuthorizationSigningAlgValuesSupported = append(config.AuthorizationSigningAlgValuesSupported, alg)\n\t\t}\n\t}\n\n\tsort.Sort(SortedSigningAlgs(config.IDTokenSigningAlgValuesSupported))\n\tsort.Sort(SortedSigningAlgs(config.UserinfoSigningAlgValuesSupported))\n\tsort.Sort(SortedSigningAlgs(config.IntrospectionSigningAlgValuesSupported))\n\tsort.Sort(SortedSigningAlgs(config.AuthorizationSigningAlgValuesSupported))\n\n\tif c.EnablePKCEPlainChallenge {\n\t\tconfig.CodeChallengeMethodsSupported = append(config.CodeChallengeMethodsSupported, PKCEChallengeMethodPlain)\n\t}\n\n\treturn config\n}", "func (opts OAuth2WellKnownConfiguration) Copy() (optsCopy OAuth2WellKnownConfiguration) {\n\toptsCopy = OAuth2WellKnownConfiguration{\n\t\tCommonDiscoveryOptions: opts.CommonDiscoveryOptions,\n\t\tOAuth2DiscoveryOptions: opts.OAuth2DiscoveryOptions,\n\t}\n\n\tif opts.OAuth2DeviceAuthorizationGrantDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2DeviceAuthorizationGrantDiscoveryOptions = &OAuth2DeviceAuthorizationGrantDiscoveryOptions{}\n\t\t*optsCopy.OAuth2DeviceAuthorizationGrantDiscoveryOptions = *opts.OAuth2DeviceAuthorizationGrantDiscoveryOptions\n\t}\n\n\tif opts.OAuth2MutualTLSClientAuthenticationDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2MutualTLSClientAuthenticationDiscoveryOptions = &OAuth2MutualTLSClientAuthenticationDiscoveryOptions{}\n\t\t*optsCopy.OAuth2MutualTLSClientAuthenticationDiscoveryOptions = *opts.OAuth2MutualTLSClientAuthenticationDiscoveryOptions\n\t}\n\n\tif opts.OAuth2IssuerIdentificationDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2IssuerIdentificationDiscoveryOptions = &OAuth2IssuerIdentificationDiscoveryOptions{}\n\t\t*optsCopy.OAuth2IssuerIdentificationDiscoveryOptions = *opts.OAuth2IssuerIdentificationDiscoveryOptions\n\t}\n\n\tif opts.OAuth2JWTIntrospectionResponseDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2JWTIntrospectionResponseDiscoveryOptions = &OAuth2JWTIntrospectionResponseDiscoveryOptions{}\n\t\t*optsCopy.OAuth2JWTIntrospectionResponseDiscoveryOptions = *opts.OAuth2JWTIntrospectionResponseDiscoveryOptions\n\t}\n\n\tif opts.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions = &OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions{}\n\t\t*optsCopy.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions = *opts.OAuth2JWTSecuredAuthorizationRequestDiscoveryOptions\n\t}\n\n\tif opts.OAuth2PushedAuthorizationDiscoveryOptions != nil {\n\t\toptsCopy.OAuth2PushedAuthorizationDiscoveryOptions = &OAuth2PushedAuthorizationDiscoveryOptions{}\n\t\t*optsCopy.OAuth2PushedAuthorizationDiscoveryOptions = *opts.OAuth2PushedAuthorizationDiscoveryOptions\n\t}\n\n\treturn optsCopy\n}", "func (r *Routes) configureWellKnown(healthFunc func() bool) {\n\twellKnown := r.Group(\"/.well-known\")\n\t{\n\t\twellKnown.GET(\"/schema-discovery\", func(ctx *gin.Context) {\n\t\t\tdiscovery := struct {\n\t\t\t\tSchemaURL string `json:\"schema_url\"`\n\t\t\t\tSchemaType string `json:\"schema_type\"`\n\t\t\t\tUIURL string `json:\"ui_url\"`\n\t\t\t}{\n\t\t\t\tSchemaURL: \"/swagger.json\",\n\t\t\t\tSchemaType: \"swagger-2.0\",\n\t\t\t}\n\t\t\tctx.JSON(http.StatusOK, &discovery)\n\t\t})\n\t\twellKnown.GET(\"/health\", healthHandler(healthFunc))\n\t}\n\n\tr.GET(\"/swagger.json\", func(ctx *gin.Context) {\n\t\tctx.String(http.StatusOK, string(SwaggerJSON))\n\t})\n}", "func (r *Routes) configureWellKnown(healthFunc func() bool) {\n\twellKnown := r.Group(\"/.well-known\")\n\t{\n\t\twellKnown.GET(\"/schema-discovery\", func(ctx *gin.Context) {\n\t\t\tdiscovery := struct {\n\t\t\t\tSchemaURL string `json:\"schema_url\"`\n\t\t\t\tSchemaType string `json:\"schema_type\"`\n\t\t\t\tUIURL string `json:\"ui_url\"`\n\t\t\t}{\n\t\t\t\tSchemaURL: \"/swagger.json\",\n\t\t\t\tSchemaType: \"swagger-2.0\",\n\t\t\t}\n\t\t\tctx.JSON(http.StatusOK, &discovery)\n\t\t})\n\t\twellKnown.GET(\"/health\", healthHandler(healthFunc))\n\t}\n\n\tr.GET(\"/swagger.json\", func(ctx *gin.Context) {\n\t\tctx.String(http.StatusOK, string(SwaggerJSON))\n\t})\n}", "func (conf *ThrapConfig) Clone() *ThrapConfig {\n\tif conf == nil {\n\t\treturn nil\n\t}\n\n\tc := &ThrapConfig{\n\t\tVCS: make(map[string]*VCSConfig, len(conf.VCS)),\n\t\tOrchestrator: make(map[string]*OrchestratorConfig, len(conf.Orchestrator)),\n\t\tRegistry: make(map[string]*RegistryConfig, len(conf.Registry)),\n\t\tSecrets: make(map[string]*SecretsConfig, len(conf.Secrets)),\n\t}\n\n\tfor k, v := range conf.VCS {\n\t\tc.VCS[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Orchestrator {\n\t\tc.Orchestrator[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Registry {\n\t\tc.Registry[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Secrets {\n\t\tc.Secrets[k] = v.Clone()\n\t}\n\n\treturn conf\n}", "func NewDeviceEnrollmentWindowsHelloForBusinessConfiguration()(*DeviceEnrollmentWindowsHelloForBusinessConfiguration) {\n m := &DeviceEnrollmentWindowsHelloForBusinessConfiguration{\n DeviceEnrollmentConfiguration: *NewDeviceEnrollmentConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.deviceEnrollmentWindowsHelloForBusinessConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func getOauthConfig(m configmap.Mapper) *oauth2.Config {\n\t// If not impersonating, use standard scopes\n\tif impersonate, _ := m.Get(\"impersonate\"); impersonate == \"\" {\n\t\treturn dropboxConfig\n\t}\n\t// Make a copy of the config\n\tconfig := *dropboxConfig\n\t// Make a copy of the scopes with extra scopes requires appended\n\tconfig.Scopes = append(config.Scopes, \"members.read\", \"team_data.member\")\n\treturn &config\n}", "func (o InstanceOutput) OauthConfig() InstanceOauthConfigPtrOutput {\n\treturn o.ApplyT(func(v *Instance) InstanceOauthConfigPtrOutput { return v.OauthConfig }).(InstanceOauthConfigPtrOutput)\n}", "func WrapConfig(hfn http.HandlerFunc, cfg *config.APICfg, brk brokers.Broker, str stores.Store, mgr *oldPush.Manager, c push.Client) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tnStr := str.Clone()\n\t\tdefer nStr.Close()\n\t\tgorillaContext.Set(r, \"brk\", brk)\n\t\tgorillaContext.Set(r, \"str\", nStr)\n\t\tgorillaContext.Set(r, \"mgr\", mgr)\n\t\tgorillaContext.Set(r, \"apsc\", c)\n\t\tgorillaContext.Set(r, \"auth_resource\", cfg.ResAuth)\n\t\tgorillaContext.Set(r, \"auth_service_token\", cfg.ServiceToken)\n\t\tgorillaContext.Set(r, \"push_worker_token\", cfg.PushWorkerToken)\n\t\tgorillaContext.Set(r, \"push_enabled\", cfg.PushEnabled)\n\t\thfn.ServeHTTP(w, r)\n\n\t})\n}", "func (o *Config) Copy(s Config) {\n o.Enable = s.Enable\n o.RouterId = s.RouterId\n o.AsNumber = s.AsNumber\n o.BfdProfile = s.BfdProfile\n o.RejectDefaultRoute = s.RejectDefaultRoute\n o.InstallRoute = s.InstallRoute\n o.AggregateMed = s.AggregateMed\n o.DefaultLocalPreference = s.DefaultLocalPreference\n o.AsFormat = s.AsFormat\n o.AlwaysCompareMed = s.AlwaysCompareMed\n o.DeterministicMedComparison = s.DeterministicMedComparison\n o.EcmpMultiAs = s.EcmpMultiAs\n o.EnforceFirstAs = s.EnforceFirstAs\n o.EnableGracefulRestart = s.EnableGracefulRestart\n o.StaleRouteTime = s.StaleRouteTime\n o.LocalRestartTime = s.LocalRestartTime\n o.MaxPeerRestartTime = s.MaxPeerRestartTime\n o.ReflectorClusterId = s.ReflectorClusterId\n o.ConfederationMemberAs = s.ConfederationMemberAs\n o.AllowRedistributeDefaultRoute = s.AllowRedistributeDefaultRoute\n}", "func GetOpenIDConnectConfiguration(ctx context.Context, client *http.Client, url string) (config Configuration, err error) {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn config, errors.Wrap(err, \"authproxy/oidc: couldn't create HTTP request\")\n\t}\n\n\treq = req.WithContext(ctx)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn config, errors.Wrap(err, \"authproxy/oidc: HTTP request failed\")\n\t}\n\n\tdefer func() {\n\t\terrClose := resp.Body.Close()\n\t\tif errClose != nil {\n\t\t\terr = errors.Wrap(errClose, \"authproxy/oidc: couldn't close HTTP response body\")\n\t\t}\n\t}()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn config, errors.Errorf(\"authproxy:oidc received non-200 status code: %d\", resp.StatusCode)\n\t}\n\n\terr = json.NewDecoder(resp.Body).Decode(&config)\n\tif err != nil {\n\t\treturn config, errors.Wrap(err, \"authproxy/oidc: couldn't decode HTTP response body to JSON\")\n\t}\n\n\tconfig.FillDefaultValuesIfEmpty()\n\n\treturn config, nil\n}", "func (o *Config) Clone() *Config {\n\tvar ret Config\n\tif err := json.Unmarshal([]byte(o.ToJSON()), &ret); err != nil {\n\t\tpanic(err)\n\t}\n\treturn &ret\n}", "func (sc *StoreConfig) ExtraConfig() map[string]interface{} {\n\treturn sc.OtherConfig\n}", "func (e *EndpointCore) ExtraConfig() (map[string]interface{}, error) {\n\treturn utils.MergeInterfaceMaps(\n\t\tmap[string]interface{}{\n\t\t\t\"host\": e.Host,\n\t\t\t\"port\": e.Port,\n\t\t\t\"name\": utils.FirstNonEmpty(e.Name, string(e.ID)),\n\t\t}, e.Configuration), nil\n}", "func (conf *RegistryConfig) Clone() *RegistryConfig {\n\tif conf == nil {\n\t\treturn nil\n\t}\n\trc := &RegistryConfig{\n\t\tID: conf.ID,\n\t\tProvider: conf.Provider,\n\t\tAddr: conf.Addr,\n\t\tRepo: conf.Repo.Clone(),\n\t\tConfig: make(map[string]interface{}, len(conf.Config)),\n\t}\n\tfor k, v := range conf.Config {\n\t\trc.Config[k] = v\n\t}\n\n\treturn rc\n}", "func populateClientConfig(config *Config) *Config {\n\tif config == nil {\n\t\tconfig = &Config{}\n\t}\n\tversions := config.Versions\n\tif len(versions) == 0 {\n\t\tversions = protocol.SupportedVersions\n\t}\n\n\thandshakeTimeout := protocol.DefaultHandshakeTimeout\n\tif config.HandshakeTimeout != 0 {\n\t\thandshakeTimeout = config.HandshakeTimeout\n\t}\n\tidleTimeout := protocol.DefaultIdleTimeout\n\tif config.IdleTimeout != 0 {\n\t\tidleTimeout = config.IdleTimeout\n\t}\n\n\tmaxReceiveStreamFlowControlWindow := config.MaxReceiveStreamFlowControlWindow\n\tif maxReceiveStreamFlowControlWindow == 0 {\n\t\tmaxReceiveStreamFlowControlWindow = protocol.DefaultMaxReceiveStreamFlowControlWindowClient\n\t}\n\tmaxReceiveConnectionFlowControlWindow := config.MaxReceiveConnectionFlowControlWindow\n\tif maxReceiveConnectionFlowControlWindow == 0 {\n\t\tmaxReceiveConnectionFlowControlWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindowClient\n\t}\n\n\treturn &Config{\n\t\tVersions: versions,\n\t\tHandshakeTimeout: handshakeTimeout,\n\t\tIdleTimeout: idleTimeout,\n\t\tRequestConnectionIDTruncation: config.RequestConnectionIDTruncation,\n\t\tMaxReceiveStreamFlowControlWindow: maxReceiveStreamFlowControlWindow,\n\t\tMaxReceiveConnectionFlowControlWindow: maxReceiveConnectionFlowControlWindow,\n\t\tKeepAlive: config.KeepAlive,\n\t\tCacheHandshake: config.CacheHandshake,\n\t\tCreatePaths: config.CreatePaths,\n\t}\n}", "func ConfigClone(c *tls.Config,) *tls.Config", "func (cfg *appConfig) copy() appConfig { return *cfg }", "func (ut *teamOpeningConfigPayload) Publicize() *TeamOpeningConfigPayload {\n\tvar pub TeamOpeningConfigPayload\n\tif ut.BuyIncrementPrice != nil {\n\t\tpub.BuyIncrementPrice = *ut.BuyIncrementPrice\n\t}\n\tif ut.BuyIncrementQuan != nil {\n\t\tpub.BuyIncrementQuan = *ut.BuyIncrementQuan\n\t}\n\tif ut.ID != nil {\n\t\tpub.ID = ut.ID\n\t}\n\tif ut.LiquidationFee != nil {\n\t\tpub.LiquidationFee = *ut.LiquidationFee\n\t}\n\tif ut.OpeningPrice != nil {\n\t\tpub.OpeningPrice = *ut.OpeningPrice\n\t}\n\tif ut.OpeningShares != nil {\n\t\tpub.OpeningShares = *ut.OpeningShares\n\t}\n\tif ut.SellDecrementPrice != nil {\n\t\tpub.SellDecrementPrice = *ut.SellDecrementPrice\n\t}\n\tif ut.SellDecrementQuan != nil {\n\t\tpub.SellDecrementQuan = *ut.SellDecrementQuan\n\t}\n\tif ut.StartTradeDtTm != nil {\n\t\tpub.StartTradeDtTm = *ut.StartTradeDtTm\n\t}\n\treturn &pub\n}", "func (c *connAttrs) clone() *connAttrs {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\treturn &connAttrs{\n\t\t_host: c._host,\n\t\t_timeout: c._timeout,\n\t\t_pingInterval: c._pingInterval,\n\t\t_bufferSize: c._bufferSize,\n\t\t_bulkSize: c._bulkSize,\n\t\t_tcpKeepAlive: c._tcpKeepAlive,\n\t\t_tlsConfig: c._tlsConfig.Clone(),\n\t\t_defaultSchema: c._defaultSchema,\n\t\t_dialer: c._dialer,\n\t\t_applicationName: c._applicationName,\n\t\t_sessionVariables: maps.Clone(c._sessionVariables),\n\t\t_locale: c._locale,\n\t\t_fetchSize: c._fetchSize,\n\t\t_lobChunkSize: c._lobChunkSize,\n\t\t_dfv: c._dfv,\n\t\t_legacy: c._legacy,\n\t\t_cesu8Decoder: c._cesu8Decoder,\n\t\t_cesu8Encoder: c._cesu8Encoder,\n\t}\n}", "func CreateDeviceEnrollmentWindowsHelloForBusinessConfigurationFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewDeviceEnrollmentWindowsHelloForBusinessConfiguration(), nil\n}", "func (c Configuration) Clone() Configuration {\n\treturn Configuration{\n\t\tEDATool: c.EDATool,\n\t\tInputFile: c.InputFile,\n\t\tOutputFile: c.OutputFile,\n\t\tLastUpdated: c.LastUpdated,\n\t}\n}", "func GetWotoConfig() *wotoConfiguration {\n\tif _wotoConfig != nil {\n\t\treturn _wotoConfig\n\t}\n\t_w := wotoConfiguration{\n\t\tUIDKeyName: [MaxUIDIndex]string{},\n\t\tLastUID: [MaxUIDIndex]players.UID{},\n\t}\n\tfor i := BaseUIDIndex - UIDIndexOffSet;\n\t\t\t\ti <= MaxUIDIndex - UIDIndexOffSet; i++ {\n\t\t_w.UIDKeyName[i] = UIDIndexSuffix + strconv.Itoa(i + OffSetTokenParts)\n\t\t_w.LastUID[i] = players.GetMinimumUID(uint8(i + OffSetTokenParts))\n\t}\n\treturn &_w\n}", "func (o MicrosoftGraphSharedPcConfiguration) MarshalJSON() ([]byte, error) {\n\ttoSerialize := map[string]interface{}{}\n\tif o.Id != nil {\n\t\ttoSerialize[\"id\"] = o.Id\n\t}\n\tif o.LastModifiedDateTime != nil {\n\t\ttoSerialize[\"lastModifiedDateTime\"] = o.LastModifiedDateTime\n\t}\n\tif o.CreatedDateTime != nil {\n\t\ttoSerialize[\"createdDateTime\"] = o.CreatedDateTime\n\t}\n\tif o.Description == nil {\n\t\tif o.isExplicitNullDescription {\n\t\t\ttoSerialize[\"description\"] = o.Description\n\t\t}\n\t} else {\n\t\ttoSerialize[\"description\"] = o.Description\n\t}\n\tif o.DisplayName != nil {\n\t\ttoSerialize[\"displayName\"] = o.DisplayName\n\t}\n\tif o.Version != nil {\n\t\ttoSerialize[\"version\"] = o.Version\n\t}\n\tif o.Assignments != nil {\n\t\ttoSerialize[\"assignments\"] = o.Assignments\n\t}\n\tif o.DeviceStatuses != nil {\n\t\ttoSerialize[\"deviceStatuses\"] = o.DeviceStatuses\n\t}\n\tif o.UserStatuses != nil {\n\t\ttoSerialize[\"userStatuses\"] = o.UserStatuses\n\t}\n\tif o.DeviceStatusOverview == nil {\n\t\tif o.isExplicitNullDeviceStatusOverview {\n\t\t\ttoSerialize[\"deviceStatusOverview\"] = o.DeviceStatusOverview\n\t\t}\n\t} else {\n\t\ttoSerialize[\"deviceStatusOverview\"] = o.DeviceStatusOverview\n\t}\n\tif o.UserStatusOverview == nil {\n\t\tif o.isExplicitNullUserStatusOverview {\n\t\t\ttoSerialize[\"userStatusOverview\"] = o.UserStatusOverview\n\t\t}\n\t} else {\n\t\ttoSerialize[\"userStatusOverview\"] = o.UserStatusOverview\n\t}\n\tif o.DeviceSettingStateSummaries != nil {\n\t\ttoSerialize[\"deviceSettingStateSummaries\"] = o.DeviceSettingStateSummaries\n\t}\n\tif o.AccountManagerPolicy == nil {\n\t\tif o.isExplicitNullAccountManagerPolicy {\n\t\t\ttoSerialize[\"accountManagerPolicy\"] = o.AccountManagerPolicy\n\t\t}\n\t} else {\n\t\ttoSerialize[\"accountManagerPolicy\"] = o.AccountManagerPolicy\n\t}\n\tif o.AllowedAccounts != nil {\n\t\ttoSerialize[\"allowedAccounts\"] = o.AllowedAccounts\n\t}\n\tif o.AllowLocalStorage != nil {\n\t\ttoSerialize[\"allowLocalStorage\"] = o.AllowLocalStorage\n\t}\n\tif o.DisableAccountManager != nil {\n\t\ttoSerialize[\"disableAccountManager\"] = o.DisableAccountManager\n\t}\n\tif o.DisableEduPolicies != nil {\n\t\ttoSerialize[\"disableEduPolicies\"] = o.DisableEduPolicies\n\t}\n\tif o.DisablePowerPolicies != nil {\n\t\ttoSerialize[\"disablePowerPolicies\"] = o.DisablePowerPolicies\n\t}\n\tif o.DisableSignInOnResume != nil {\n\t\ttoSerialize[\"disableSignInOnResume\"] = o.DisableSignInOnResume\n\t}\n\tif o.Enabled != nil {\n\t\ttoSerialize[\"enabled\"] = o.Enabled\n\t}\n\tif o.IdleTimeBeforeSleepInSeconds == nil {\n\t\tif o.isExplicitNullIdleTimeBeforeSleepInSeconds {\n\t\t\ttoSerialize[\"idleTimeBeforeSleepInSeconds\"] = o.IdleTimeBeforeSleepInSeconds\n\t\t}\n\t} else {\n\t\ttoSerialize[\"idleTimeBeforeSleepInSeconds\"] = o.IdleTimeBeforeSleepInSeconds\n\t}\n\tif o.KioskAppDisplayName == nil {\n\t\tif o.isExplicitNullKioskAppDisplayName {\n\t\t\ttoSerialize[\"kioskAppDisplayName\"] = o.KioskAppDisplayName\n\t\t}\n\t} else {\n\t\ttoSerialize[\"kioskAppDisplayName\"] = o.KioskAppDisplayName\n\t}\n\tif o.KioskAppUserModelId == nil {\n\t\tif o.isExplicitNullKioskAppUserModelId {\n\t\t\ttoSerialize[\"kioskAppUserModelId\"] = o.KioskAppUserModelId\n\t\t}\n\t} else {\n\t\ttoSerialize[\"kioskAppUserModelId\"] = o.KioskAppUserModelId\n\t}\n\tif o.MaintenanceStartTime == nil {\n\t\tif o.isExplicitNullMaintenanceStartTime {\n\t\t\ttoSerialize[\"maintenanceStartTime\"] = o.MaintenanceStartTime\n\t\t}\n\t} else {\n\t\ttoSerialize[\"maintenanceStartTime\"] = o.MaintenanceStartTime\n\t}\n\treturn json.Marshal(toSerialize)\n}", "func NewThingConfig(resp *iot.CreateCertificateFromCsrOutput) *ThingConfig {\n\treturn &ThingConfig{\n\t\tCertificateArn: *resp.CertificateArn,\n\t\tCertificateID: *resp.CertificateId,\n\t\tCertificatePem: *resp.CertificatePem,\n\t}\n}", "func (conf *OrchestratorConfig) Clone() *OrchestratorConfig {\n\tif conf == nil {\n\t\treturn nil\n\t}\n\treturn &OrchestratorConfig{\n\t\tID: conf.ID,\n\t\tAddr: conf.Addr,\n\t}\n}", "func (s *NotifierConfig) CommonConfig() *NotifierConfig {\n\treturn s\n}", "func (s *store) parseConfig(raw json.RawMessage) (*config, error) {\n\tc := &config{}\n\tif err := sidecred.UnmarshalConfig(raw, &c); err != nil {\n\t\treturn nil, err\n\t}\n\tif c.SecretTemplate == \"\" {\n\t\tc.SecretTemplate = s.secretTemplate\n\t}\n\treturn c, nil\n}", "func (c Config) Copy() Config {\n\tdst := Config{}\n\tdst.Credentials = c.Credentials\n\tdst.Endpoint = c.Endpoint\n\tdst.Region = c.Region\n\tdst.DisableSSL = c.DisableSSL\n\tdst.ManualSend = c.ManualSend\n\tdst.HTTPClient = c.HTTPClient\n\tdst.LogHTTPBody = c.LogHTTPBody\n\tdst.LogLevel = c.LogLevel\n\tdst.Logger = c.Logger\n\tdst.MaxRetries = c.MaxRetries\n\tdst.DisableParamValidation = c.DisableParamValidation\n\tdst.DisableComputeChecksums = c.DisableComputeChecksums\n\tdst.S3ForcePathStyle = c.S3ForcePathStyle\n\tdst.DomainMode = c.DomainMode\n\tdst.SignerVersion = c.SignerVersion\n\treturn dst\n}", "func (d *WindowDefinition) Clone() *WindowDefinition {\n\tif d == nil {\n\t\treturn nil\n\t}\n\tother := *d\n\tother.Base = d.Base.Clone()\n\tother.Partitions = cloneExprs(d.Partitions)\n\tother.OrderingTerms = cloneOrderingTerms(d.OrderingTerms)\n\tother.Frame = d.Frame.Clone()\n\treturn &other\n}", "func (eventMessage EventMessageWithConf) CloneWithConf(source string) EventMessageWithConf {\n\tnewMessage := EventMessageWithConf{\n\t\tConf: eventMessage.Conf,\n\t}\n\tnewMessage.Header = eventMessage.Header\n\tnewMessage.Header.Source = source\n\tnewMessage.Header.Timestamp = time.Now().UnixNano() / int64(time.Millisecond)\n\treturn newMessage\n}", "func (*PDSCH_Config) XXX_OneofWrappers() []interface{} {\n\treturn []interface{}{\n\t\t(*PDSCH_Config_ReleaseDmrs_DownlinkForPDSCH_MappingTypeA)(nil),\n\t\t(*PDSCH_Config_SetupDmrs_DownlinkForPDSCH_MappingTypeA)(nil),\n\t\t(*PDSCH_Config_ReleaseDmrs_DownlinkForPDSCH_MappingTypeB)(nil),\n\t\t(*PDSCH_Config_SetupDmrs_DownlinkForPDSCH_MappingTypeB)(nil),\n\t\t(*PDSCH_Config_ReleasePdsch_TimeDomainAllocationList)(nil),\n\t\t(*PDSCH_Config_SetupPdsch_TimeDomainAllocationList)(nil),\n\t\t(*PDSCH_Config_StaticBundling)(nil),\n\t\t(*PDSCH_Config_DynamicBundling)(nil),\n\t\t(*PDSCH_Config_ReleaseP_ZP_CSI_RS_ResourceSet)(nil),\n\t\t(*PDSCH_Config_SetupP_ZP_CSI_RS_ResourceSet)(nil),\n\t}\n}", "func (c *config) newConfig(redirect string) *oauth2.Config {\n\treturn &oauth2.Config{\n\t\tClientID: c.Client,\n\t\tClientSecret: c.Secret,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: fmt.Sprintf(\"%s/site/oauth2/authorize\", c.URL),\n\t\t\tTokenURL: fmt.Sprintf(\"%s/site/oauth2/access_token\", c.URL),\n\t\t},\n\t\tRedirectURL: fmt.Sprintf(\"%s/authorize\", redirect),\n\t}\n}", "func (*PCCH_Config) XXX_OneofWrappers() []interface{} {\n\treturn []interface{}{\n\t\t(*PCCH_Config_OneT)(nil),\n\t\t(*PCCH_Config_HalfT)(nil),\n\t\t(*PCCH_Config_QuarterT)(nil),\n\t\t(*PCCH_Config_OneEighthT)(nil),\n\t\t(*PCCH_Config_OneSixteenthT)(nil),\n\t\t(*PCCH_Config_SCS15KHZoneT)(nil),\n\t\t(*PCCH_Config_SCS30KHZoneT_SCS15KHZhalfT)(nil),\n\t\t(*PCCH_Config_SCS60KHZoneT_SCS30KHZhalfT_SCS15KHZquarterT)(nil),\n\t\t(*PCCH_Config_SCS120KHZoneT_SCS60KHZhalfT_SCS30KHZquarterT_SCS15KHZoneEighthT)(nil),\n\t\t(*PCCH_Config_SCS120KHZhalfT_SCS60KHZquarterT_SCS30KHZoneEighthT_SCS15KHZoneSixteenthT)(nil),\n\t\t(*PCCH_Config_SCS120KHZquarterT_SCS60KHZoneEighthT_SCS30KHZoneSixteenthT)(nil),\n\t\t(*PCCH_Config_SCS120KHZoneEighthT_SCS60KHZoneSixteenthT)(nil),\n\t\t(*PCCH_Config_SCS120KHZoneSixteenthT)(nil),\n\t}\n}", "func ReadConfig(conf *api.ConfigMap) config.Configuration {\n\tif len(conf.Data) == 0 {\n\t\treturn config.NewDefault()\n\t}\n\n\tvar errors []int\n\tvar skipUrls []string\n\tvar whitelist []string\n\n\tif val, ok := conf.Data[customHTTPErrors]; ok {\n\t\tdelete(conf.Data, customHTTPErrors)\n\t\tfor _, i := range strings.Split(val, \",\") {\n\t\t\tj, err := strconv.Atoi(i)\n\t\t\tif err != nil {\n\t\t\t\tglog.Warningf(\"%v is not a valid http code: %v\", i, err)\n\t\t\t} else {\n\t\t\t\terrors = append(errors, j)\n\t\t\t}\n\t\t}\n\t}\n\tif val, ok := conf.Data[skipAccessLogUrls]; ok {\n\t\tdelete(conf.Data, skipAccessLogUrls)\n\t\tskipUrls = strings.Split(val, \",\")\n\t}\n\tif val, ok := conf.Data[whitelistSourceRange]; ok {\n\t\tdelete(conf.Data, whitelistSourceRange)\n\t\twhitelist = append(whitelist, strings.Split(val, \",\")...)\n\t}\n\n\tto := config.Configuration{}\n\tto.Backend = defaults.Backend{\n\t\tCustomHTTPErrors: filterErrors(errors),\n\t\tSkipAccessLogURLs: skipUrls,\n\t\tWhitelistSourceRange: whitelist,\n\t}\n\tdef := config.NewDefault()\n\tif err := mergo.Merge(&to, def); err != nil {\n\t\tglog.Warningf(\"unexpected error merging defaults: %v\", err)\n\t}\n\n\tmetadata := &mapstructure.Metadata{}\n\tdecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{\n\t\tTagName: \"structs\",\n\t\tResult: &to,\n\t\tWeaklyTypedInput: true,\n\t\tMetadata: metadata,\n\t})\n\n\terr = decoder.Decode(conf.Data)\n\tif err != nil {\n\t\tglog.Infof(\"%v\", err)\n\t}\n\treturn to\n}", "func (client *XenClient) PBDGetOtherConfig(self string) (result map[string]string, err error) {\n\tobj, err := client.APICall(\"PBD.get_other_config\", self)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinterim := reflect.ValueOf(obj)\n\tresult = map[string]string{}\n\tfor _, key := range interim.MapKeys() {\n\t\tobj := interim.MapIndex(key)\n\t\tresult[key.String()] = obj.String()\n\t}\n\n\treturn\n}", "func newConfigV101() *configV1 {\n\tconf := new(configV1)\n\tconf.Version = mcCurrentConfigVersion\n\t// make sure to allocate map's otherwise Golang\n\t// exits silently without providing any errors\n\tconf.Hosts = make(map[string]*hostConfig)\n\tconf.Aliases = make(map[string]string)\n\n\tlocalHostConfig := new(hostConfig)\n\tlocalHostConfig.AccessKeyID = \"\"\n\tlocalHostConfig.SecretAccessKey = \"\"\n\n\ts3HostConf := new(hostConfig)\n\ts3HostConf.AccessKeyID = globalAccessKeyID\n\ts3HostConf.SecretAccessKey = globalSecretAccessKey\n\n\t// Your example host config\n\texampleHostConf := new(hostConfig)\n\texampleHostConf.AccessKeyID = globalAccessKeyID\n\texampleHostConf.SecretAccessKey = globalSecretAccessKey\n\n\tplayHostConfig := new(hostConfig)\n\tplayHostConfig.AccessKeyID = \"\"\n\tplayHostConfig.SecretAccessKey = \"\"\n\n\tdlHostConfig := new(hostConfig)\n\tdlHostConfig.AccessKeyID = \"\"\n\tdlHostConfig.SecretAccessKey = \"\"\n\n\tconf.Hosts[exampleHostURL] = exampleHostConf\n\tconf.Hosts[\"localhost:*\"] = localHostConfig\n\tconf.Hosts[\"127.0.0.1:*\"] = localHostConfig\n\tconf.Hosts[\"s3*.amazonaws.com\"] = s3HostConf\n\tconf.Hosts[\"play.minio.io:9000\"] = playHostConfig\n\tconf.Hosts[\"dl.minio.io:9000\"] = dlHostConfig\n\n\taliases := make(map[string]string)\n\taliases[\"s3\"] = \"https://s3.amazonaws.com\"\n\taliases[\"play\"] = \"https://play.minio.io:9000\"\n\taliases[\"dl\"] = \"https://dl.minio.io:9000\"\n\taliases[\"localhost\"] = \"http://localhost:9000\"\n\tconf.Aliases = aliases\n\n\treturn conf\n}", "func NewCombinedFromConfig(other map[string]interface{}) (Provider, error) {\n\tstatus, err := NewOpenWBStatusProviderFromConfig(other)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := &combinedProvider{status: status}\n\treturn o, nil\n}", "func UnmarshalConfigurationPatch(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(ConfigurationPatch)\n\terr = core.UnmarshalPrimitive(m, \"api_key\", &obj.ApiKey)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"lets_encrypt_environment\", &obj.LetsEncryptEnvironment)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"lets_encrypt_private_key\", &obj.LetsEncryptPrivateKey)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"lets_encrypt_preferred_chain\", &obj.LetsEncryptPreferredChain)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"cloud_internet_services_apikey\", &obj.CloudInternetServicesApikey)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"cloud_internet_services_crn\", &obj.CloudInternetServicesCrn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"classic_infrastructure_username\", &obj.ClassicInfrastructureUsername)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"classic_infrastructure_password\", &obj.ClassicInfrastructurePassword)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"max_ttl\", &obj.MaxTTL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crl_expiry\", &obj.CrlExpiry)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crl_disable\", &obj.CrlDisable)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crl_distribution_points_encoded\", &obj.CrlDistributionPointsEncoded)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"issuing_certificates_urls_encoded\", &obj.IssuingCertificatesUrlsEncoded)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_secret_groups\", &obj.AllowedSecretGroups)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"ttl\", &obj.TTL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_localhost\", &obj.AllowLocalhost)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_domains\", &obj.AllowedDomains)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_domains_template\", &obj.AllowedDomainsTemplate)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_bare_domains\", &obj.AllowBareDomains)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_subdomains\", &obj.AllowSubdomains)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_glob_domains\", &obj.AllowGlobDomains)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_any_name\", &obj.AllowAnyName)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"enforce_hostnames\", &obj.EnforceHostnames)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allow_ip_sans\", &obj.AllowIpSans)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_uri_sans\", &obj.AllowedUriSans)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"allowed_other_sans\", &obj.AllowedOtherSans)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"server_flag\", &obj.ServerFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"client_flag\", &obj.ClientFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"code_signing_flag\", &obj.CodeSigningFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"email_protection_flag\", &obj.EmailProtectionFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"key_type\", &obj.KeyType)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"key_bits\", &obj.KeyBits)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"key_usage\", &obj.KeyUsage)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"ext_key_usage\", &obj.ExtKeyUsage)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"ext_key_usage_oids\", &obj.ExtKeyUsageOids)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"use_csr_common_name\", &obj.UseCsrCommonName)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"use_csr_sans\", &obj.UseCsrSans)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"ou\", &obj.Ou)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"organization\", &obj.Organization)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"country\", &obj.Country)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locality\", &obj.Locality)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"province\", &obj.Province)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"street_address\", &obj.StreetAddress)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"postal_code\", &obj.PostalCode)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"serial_number\", &obj.SerialNumber)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"require_cn\", &obj.RequireCn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"policy_identifiers\", &obj.PolicyIdentifiers)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"basic_constraints_valid_for_non_ca\", &obj.BasicConstraintsValidForNonCa)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"not_before_duration\", &obj.NotBeforeDuration)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func NewWindows10XVpnConfiguration()(*Windows10XVpnConfiguration) {\n m := &Windows10XVpnConfiguration{\n DeviceManagementResourceAccessProfileBase: *NewDeviceManagementResourceAccessProfileBase(),\n }\n odataTypeValue := \"#microsoft.graph.windows10XVpnConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func (src *HNCConfiguration) ConvertTo(dstRaw conversion.Hub) error {\n\tdst := dstRaw.(*v1a2.HNCConfiguration)\n\n\t// Spec\n\tsrcSpecTypes := src.Spec.Types\n\tdstSpecRscs := []v1a2.ResourceSpec{}\n\tfor _, st := range srcSpecTypes {\n\t\tdr := v1a2.ResourceSpec{}\n\t\t// Hack the group from APIVersion by removing the version, e.g.\n\t\t// 1) \"rbac.authorization.k8s.io/v1\" => \"rbac.authorization.k8s.io\";\n\t\t// 2) \"v1\" => \"\" (for core type).\n\t\tgv := strings.Split(st.APIVersion, \"/\")\n\t\tif len(gv) == 2 {\n\t\t\tdr.Group = gv[0]\n\t\t}\n\t\t// Hack the resource from Kind by using the lower case and plural form, e.g.\n\t\t// 1) \"Role\" => \"roles\"\n\t\t// 2) \"NetworkPolicy\" => \"networkpolicies\"\n\t\tlk := strings.ToLower(st.Kind)\n\t\tif strings.HasSuffix(lk, \"y\") {\n\t\t\tlk = strings.TrimSuffix(lk, \"y\") + \"ie\"\n\t\t}\n\t\tdr.Resource = lk + \"s\"\n\t\tdtm, ok := toV1A2[st.Mode]\n\t\tif !ok {\n\t\t\t// This should never happen with the enum schema validation.\n\t\t\tdtm = v1a2.Ignore\n\t\t}\n\t\tdr.Mode = dtm\n\t\t// We will only convert non-enforced types since in v1a2 we removed the\n\t\t// enforced types from spec. Having enforced types configured in the spec\n\t\t// would cause 'MultipleConfigurationsForType' condition.\n\t\tif !v1a2.IsEnforcedType(dr) {\n\t\t\tdstSpecRscs = append(dstSpecRscs, dr)\n\t\t}\n\t}\n\tdst.Spec.Resources = dstSpecRscs\n\n\t// We don't need to convert status because controllers will update it.\n\tdst.Status = v1a2.HNCConfigurationStatus{}\n\n\t// rote conversion - ObjectMeta\n\tdst.ObjectMeta = src.ObjectMeta\n\n\treturn nil\n}", "func (o ServiceOutput) OpenapiConfig() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Service) pulumi.StringPtrOutput { return v.OpenapiConfig }).(pulumi.StringPtrOutput)\n}", "func (c *TelemetryConfig) Copy() *TelemetryConfig {\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tvar o TelemetryConfig\n\to.MetricsPrefix = StringCopy(c.MetricsPrefix)\n\n\tif c.Stdout != nil {\n\t\to.Stdout = c.Stdout.Copy()\n\t}\n\tif c.DogStatsD != nil {\n\t\to.DogStatsD = c.DogStatsD.Copy()\n\t}\n\tif c.Prometheus != nil {\n\t\to.Prometheus = c.Prometheus.Copy()\n\t}\n\n\treturn &o\n}", "func (c *Config) Clone() *Config {\n\tglobal := *c.Global\n\tnewconf := &Config{\n\t\tGlobal: &global,\n\t\tServers: make(map[string]*Server, len(c.Servers)),\n\t\tErrors: make([]error, 0),\n\t\tfilename: c.filename,\n\t}\n\tfor name, srv := range c.Servers {\n\t\tnewsrv := *srv\n\t\tnewsrv.parent = newconf\n\t\tnewconf.Servers[name] = &newsrv\n\t}\n\treturn newconf\n}", "func (o *OAuth2) Config() *wx.AppConfig {\n\treturn o.config\n}", "func newConfig() (*config, error) {\n\tec2Metadata := ec2metadata.New(session.Must(session.NewSession()))\n\tregion, err := ec2Metadata.Region()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get region from ec2 metadata\")\n\t}\n\n\tinstanceID, err := ec2Metadata.GetMetadata(\"instance-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get instance id from ec2 metadata\")\n\t}\n\n\tmac, err := ec2Metadata.GetMetadata(\"mac\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get mac from ec2 metadata\")\n\t}\n\n\tsecurityGroups, err := ec2Metadata.GetMetadata(\"security-groups\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get security groups from ec2 metadata\")\n\t}\n\n\tinterfaces, err := ec2Metadata.GetMetadata(\"network/interfaces/macs\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get interfaces from ec2 metadata\")\n\t}\n\n\tsubnet, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/subnet-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get subnet from ec2 metadata\")\n\t}\n\n\tvpc, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/vpc-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get vpc from ec2 metadata\")\n\t}\n\n\treturn &config{region: region,\n\t\tsubnet: subnet,\n\t\tindex: int64(len(strings.Split(interfaces, \"\\n\"))),\n\t\tinstanceID: instanceID,\n\t\tsecurityGroups: strings.Split(securityGroups, \"\\n\"),\n\t\tvpc: vpc,\n\t}, nil\n}", "func (fOpenCfg *FileOpenConfig) CopyOut() FileOpenConfig {\n\n if fOpenCfg.fileOpenModes == nil {\n fOpenCfg.fileOpenModes = make([]FileOpenMode, 0)\n }\n\n fOpStat2 := FileOpenConfig{}\n fOpStat2.isInitialized = fOpenCfg.isInitialized\n fOpStat2.fileOpenType = fOpenCfg.fileOpenType\n lenFOpenModes := len(fOpenCfg.fileOpenModes)\n\n if lenFOpenModes == 0 {\n fOpenCfg.fileOpenModes = append(fOpenCfg.fileOpenModes, FOpenMode.ModeNone())\n lenFOpenModes = 1\n }\n\n fOpStat2.fileOpenModes = make([]FileOpenMode, lenFOpenModes)\n\n for i := 0; i < lenFOpenModes; i++ {\n fOpStat2.fileOpenModes[i] = fOpenCfg.fileOpenModes[i]\n }\n\n return fOpStat2\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsCaller) GetRegistrationConfig(opts *bind.CallOpts) (struct {\n\tEnabled bool\n\tWindowSizeInBlocks uint32\n\tAllowedPerWindow uint16\n\tKeeperRegistry common.Address\n\tMinLINKJuels *big.Int\n\tWindowStart uint64\n\tApprovedInCurrentWindow uint16\n}, error) {\n\tvar out []interface{}\n\terr := _UpkeepRegistrationRequests.contract.Call(opts, &out, \"getRegistrationConfig\")\n\n\toutstruct := new(struct {\n\t\tEnabled bool\n\t\tWindowSizeInBlocks uint32\n\t\tAllowedPerWindow uint16\n\t\tKeeperRegistry common.Address\n\t\tMinLINKJuels *big.Int\n\t\tWindowStart uint64\n\t\tApprovedInCurrentWindow uint16\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Enabled = *abi.ConvertType(out[0], new(bool)).(*bool)\n\toutstruct.WindowSizeInBlocks = *abi.ConvertType(out[1], new(uint32)).(*uint32)\n\toutstruct.AllowedPerWindow = *abi.ConvertType(out[2], new(uint16)).(*uint16)\n\toutstruct.KeeperRegistry = *abi.ConvertType(out[3], new(common.Address)).(*common.Address)\n\toutstruct.MinLINKJuels = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int)\n\toutstruct.WindowStart = *abi.ConvertType(out[5], new(uint64)).(*uint64)\n\toutstruct.ApprovedInCurrentWindow = *abi.ConvertType(out[6], new(uint16)).(*uint16)\n\n\treturn *outstruct, err\n\n}", "func (*DRX_Config) XXX_OneofWrappers() []interface{} {\n\treturn []interface{}{\n\t\t(*DRX_Config_SubMilliSeconds)(nil),\n\t\t(*DRX_Config_MilliSeconds)(nil),\n\t\t(*DRX_Config_Ms10)(nil),\n\t\t(*DRX_Config_Ms20)(nil),\n\t\t(*DRX_Config_Ms32)(nil),\n\t\t(*DRX_Config_Ms40)(nil),\n\t\t(*DRX_Config_Ms60)(nil),\n\t\t(*DRX_Config_Ms64)(nil),\n\t\t(*DRX_Config_Ms70)(nil),\n\t\t(*DRX_Config_Ms80)(nil),\n\t\t(*DRX_Config_Ms128)(nil),\n\t\t(*DRX_Config_Ms160)(nil),\n\t\t(*DRX_Config_Ms256)(nil),\n\t\t(*DRX_Config_Ms320)(nil),\n\t\t(*DRX_Config_Ms512)(nil),\n\t\t(*DRX_Config_Ms640)(nil),\n\t\t(*DRX_Config_Ms1024)(nil),\n\t\t(*DRX_Config_Ms1280)(nil),\n\t\t(*DRX_Config_Ms2048)(nil),\n\t\t(*DRX_Config_Ms2560)(nil),\n\t\t(*DRX_Config_Ms5120)(nil),\n\t\t(*DRX_Config_Ms10240)(nil),\n\t}\n}", "func (dc *DomainConfig) Copy() (*DomainConfig, error) {\n\tnewDc := &DomainConfig{}\n\terr := reprint.FromTo(dc, newDc) // Deep copy\n\treturn newDc, err\n\n\t// NB(tlim): The old version of this copied the structure by gob-encoding\n\t// and decoding it. gob doesn't like the dc.RegisterInstance or\n\t// dc.DNSProviderInstances fields, so we saved a temporary copy of those,\n\t// nil'ed out the original, did the gob copy, and then manually copied those\n\t// fields using the temp variables we saved. It looked like:\n\t//reg, dnsps := dc.RegistrarInstance, dc.DNSProviderInstances\n\t//dc.RegistrarInstance, dc.DNSProviderInstances = nil, nil\n\t// (perform the copy)\n\t//dc.RegistrarInstance, dc.DNSProviderInstances = reg, dnsps\n\t//newDc.RegistrarInstance, newDc.DNSProviderInstances = reg, dnsps\n}", "func expandCustomDocumentEnrichmentConfiguration(tfList []interface{}) *types.CustomDocumentEnrichmentConfiguration {\n\tif len(tfList) == 0 || tfList[0] == nil {\n\t\treturn nil\n\t}\n\n\ttfMap, ok := tfList[0].(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tresult := &types.CustomDocumentEnrichmentConfiguration{}\n\n\tif v, ok := tfMap[\"inline_configurations\"]; ok && v.(*schema.Set).Len() > 0 {\n\t\tresult.InlineConfigurations = expandInlineCustomDocumentEnrichmentConfiguration(v.(*schema.Set).List())\n\t}\n\n\tif v, ok := tfMap[\"post_extraction_hook_configuration\"].([]interface{}); ok && len(v) > 0 {\n\t\tresult.PostExtractionHookConfiguration = expandHookConfiguration(v)\n\t}\n\n\tif v, ok := tfMap[\"pre_extraction_hook_configuration\"].([]interface{}); ok && len(v) > 0 {\n\t\tresult.PreExtractionHookConfiguration = expandHookConfiguration(v)\n\t}\n\n\tif v, ok := tfMap[\"role_arn\"].(string); ok && v != \"\" {\n\t\tresult.RoleArn = aws.String(v)\n\t}\n\n\treturn result\n}", "func populateConfig(p *ProviderConfig, credentials aws.Credentials) cty.Value {\n\tmerged := make(map[string]cty.Value)\n\tmerged[\"region\"] = cty.StringVal(p.Spec.Region)\n\tmerged[\"access_key\"] = cty.StringVal(credentials.AccessKeyID)\n\tmerged[\"secret_key\"] = cty.StringVal(credentials.SecretAccessKey)\n\tmerged[\"assume_role\"] = cty.ListValEmpty(assumeRoleObjectType())\n\tmerged[\"ignore_tags\"] = cty.ListValEmpty(ignoreTagsObjectType())\n\tmerged[\"endpoints\"] = cty.SetValEmpty(endpointSetElementType())\n\n\tmerged[\"token\"] = cty.NullVal(cty.String)\n\tmerged[\"allowed_account_ids\"] = cty.SetValEmpty(cty.String)\n\tmerged[\"forbidden_account_ids\"] = cty.SetValEmpty(cty.String)\n\tmerged[\"insecure\"] = cty.NullVal(cty.Bool)\n\tmerged[\"max_retries\"] = cty.NullVal(cty.Number)\n\tmerged[\"profile\"] = cty.NullVal(cty.String)\n\tmerged[\"s3_force_path_style\"] = cty.NullVal(cty.Bool)\n\tmerged[\"shared_credentials_file\"] = cty.NullVal(cty.String)\n\tmerged[\"skip_credentials_validation\"] = cty.NullVal(cty.Bool)\n\tmerged[\"skip_get_ec2_platforms\"] = cty.NullVal(cty.Bool)\n\tmerged[\"skip_metadata_api_check\"] = cty.NullVal(cty.Bool)\n\tmerged[\"skip_region_validation\"] = cty.NullVal(cty.Bool)\n\tmerged[\"skip_requesting_account_id\"] = cty.NullVal(cty.Bool)\n\tmerged[\"token\"] = cty.NullVal(cty.String)\n\n\treturn cty.ObjectVal(merged)\n}", "func NewVpnConfiguration()(*VpnConfiguration) {\n m := &VpnConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.vpnConfiguration\"\n m.SetOdataType(&odataTypeValue)\n return m\n}", "func NewSpecConfig(config *protoform.Config, kubeClient *kubernetes.Clientset, opssightClient *opssightclientset.Clientset, hubClient *hubclientset.Clientset, opssight *opssightapi.OpsSight, isBlackDuckClusterScope bool, dryRun bool) *SpecConfig {\n\topssightSpec := &opssight.Spec\n\tname := opssight.Name\n\tnames := map[string]string{\n\t\t\"perceptor\": \"core\",\n\t\t\"pod-perceiver\": \"pod-processor\",\n\t\t\"image-perceiver\": \"image-processor\",\n\t\t\"artifactory-perceiver\": \"artifactory-processor\",\n\t\t\"quay-perceiver\": \"quay-processor\",\n\t\t\"scanner\": \"scanner\",\n\t\t\"perceptor-imagefacade\": \"image-getter\",\n\t\t\"skyfire\": \"skyfire\",\n\t\t\"prometheus\": \"prometheus\",\n\t\t\"configmap\": \"opssight\",\n\t\t\"perceiver-service-account\": \"processor\",\n\t}\n\tbaseImageURL := \"docker.io/blackducksoftware\"\n\tversion := \"2.2.5\"\n\timages := map[string]string{\n\t\t\"perceptor\": fmt.Sprintf(\"%s/opssight-core:%s\", baseImageURL, version),\n\t\t\"pod-perceiver\": fmt.Sprintf(\"%s/opssight-pod-processor:%s\", baseImageURL, version),\n\t\t\"image-perceiver\": fmt.Sprintf(\"%s/opssight-image-processor:%s\", baseImageURL, version),\n\t\t\"artifactory-perceiver\": fmt.Sprintf(\"%s/opssight-artifactory-processor:%s\", baseImageURL, version),\n\t\t\"quay-perceiver\": fmt.Sprintf(\"%s/opssight-quay-processor:%s\", baseImageURL, version),\n\t\t\"scanner\": fmt.Sprintf(\"%s/opssight-scanner:%s\", baseImageURL, version),\n\t\t\"perceptor-imagefacade\": fmt.Sprintf(\"%s/opssight-image-getter:%s\", baseImageURL, version),\n\t\t\"skyfire\": \"gcr.io/saas-hub-stg/blackducksoftware/pyfire:master\",\n\t\t\"prometheus\": \"docker.io/prom/prometheus:v2.1.0\",\n\t}\n\tif opssightSpec.IsUpstream {\n\t\tnames = map[string]string{\n\t\t\t\"perceptor\": \"perceptor\",\n\t\t\t\"pod-perceiver\": \"pod-perceiver\",\n\t\t\t\"image-perceiver\": \"image-perceiver\",\n\t\t\t\"artifactory-perceiver\": \"artifactory-perceiver\",\n\t\t\t\"quay-perceiver\": \"quay-perceiver\",\n\t\t\t\"scanner\": \"scanner\",\n\t\t\t\"perceptor-imagefacade\": \"image-facade\",\n\t\t\t\"skyfire\": \"skyfire\",\n\t\t\t\"prometheus\": \"prometheus\",\n\t\t\t\"configmap\": \"perceptor\",\n\t\t\t\"perceiver-service-account\": \"perceiver\",\n\t\t}\n\t\tbaseImageURL = \"gcr.io/saas-hub-stg/blackducksoftware\"\n\t\tversion = \"master\"\n\t\timages = map[string]string{\n\t\t\t\"perceptor\": fmt.Sprintf(\"%s/perceptor:%s\", baseImageURL, version),\n\t\t\t\"pod-perceiver\": fmt.Sprintf(\"%s/pod-perceiver:%s\", baseImageURL, version),\n\t\t\t\"image-perceiver\": fmt.Sprintf(\"%s/image-perceiver:%s\", baseImageURL, version),\n\t\t\t\"artifactory-perceiver\": fmt.Sprintf(\"%s/artifactory-perceiver:%s\", baseImageURL, version),\n\t\t\t\"quay-perceiver\": fmt.Sprintf(\"%s/quay-perceiver:%s\", baseImageURL, version),\n\t\t\t\"scanner\": fmt.Sprintf(\"%s/perceptor-scanner:%s\", baseImageURL, version),\n\t\t\t\"perceptor-imagefacade\": fmt.Sprintf(\"%s/perceptor-imagefacade:%s\", baseImageURL, version),\n\t\t\t\"skyfire\": \"gcr.io/saas-hub-stg/blackducksoftware/pyfire:master\",\n\t\t\t\"prometheus\": \"docker.io/prom/prometheus:v2.1.0\"}\n\t}\n\n\tfor componentName, componentImage := range images {\n\t\timage := appsutil.GenerateImageTag(componentImage, opssightSpec.ImageRegistries, opssightSpec.RegistryConfiguration)\n\t\timages[componentName] = image\n\t}\n\n\tconfigMap := &MainOpssightConfigMap{\n\t\tLogLevel: opssightSpec.LogLevel,\n\t\tBlackDuck: &BlackDuckConfig{\n\t\t\tConnectionsEnvironmentVariableName: opssightSpec.Blackduck.ConnectionsEnvironmentVariableName,\n\t\t\tTLSVerification: opssightSpec.Blackduck.TLSVerification,\n\t\t},\n\t\tImageFacade: &ImageFacadeConfig{\n\t\t\tCreateImagesOnly: false,\n\t\t\tHost: \"localhost\",\n\t\t\tPort: 3004,\n\t\t\tImagePullerType: opssightSpec.ScannerPod.ImageFacade.ImagePullerType,\n\t\t},\n\t\tPerceiver: &PerceiverConfig{\n\t\t\tCertificate: opssightSpec.Perceiver.Certificate,\n\t\t\tCertificateKey: opssightSpec.Perceiver.CertificateKey,\n\t\t\tImage: &ImagePerceiverConfig{},\n\t\t\tPod: &PodPerceiverConfig{\n\t\t\t\tNamespaceFilter: opssightSpec.Perceiver.PodPerceiver.NamespaceFilter,\n\t\t\t},\n\t\t\tArtifactory: &ArtifactoryPerceiverConfig{\n\t\t\t\tDumper: opssightSpec.Perceiver.EnableArtifactoryPerceiverDumper,\n\t\t\t},\n\t\t\tAnnotationIntervalSeconds: opssightSpec.Perceiver.AnnotationIntervalSeconds,\n\t\t\tDumpIntervalMinutes: opssightSpec.Perceiver.DumpIntervalMinutes,\n\t\t\tPort: 3002,\n\t\t},\n\t\tPerceptor: &PerceptorConfig{\n\t\t\tTimings: &PerceptorTimingsConfig{\n\t\t\t\tCheckForStalledScansPauseHours: opssightSpec.Perceptor.CheckForStalledScansPauseHours,\n\t\t\t\tClientTimeoutMilliseconds: opssightSpec.Perceptor.ClientTimeoutMilliseconds,\n\t\t\t\tModelMetricsPauseSeconds: opssightSpec.Perceptor.ModelMetricsPauseSeconds,\n\t\t\t\tStalledScanClientTimeoutHours: opssightSpec.Perceptor.StalledScanClientTimeoutHours,\n\t\t\t\tUnknownImagePauseMilliseconds: opssightSpec.Perceptor.UnknownImagePauseMilliseconds,\n\t\t\t},\n\t\t\tHost: util.GetResourceName(name, util.OpsSightName, names[\"perceptor\"]),\n\t\t\tPort: 3001,\n\t\t\tUseMockMode: false,\n\t\t},\n\t\tScanner: &ScannerConfig{\n\t\t\tBlackDuckClientTimeoutSeconds: opssightSpec.ScannerPod.Scanner.ClientTimeoutSeconds,\n\t\t\tImageDirectory: opssightSpec.ScannerPod.ImageDirectory,\n\t\t\tPort: 3003,\n\t\t},\n\t\tSkyfire: &SkyfireConfig{\n\t\t\tBlackDuckClientTimeoutSeconds: opssightSpec.Skyfire.HubClientTimeoutSeconds,\n\t\t\tBlackDuckDumpPauseSeconds: opssightSpec.Skyfire.HubDumpPauseSeconds,\n\t\t\tKubeDumpIntervalSeconds: opssightSpec.Skyfire.KubeDumpIntervalSeconds,\n\t\t\tPerceptorDumpIntervalSeconds: opssightSpec.Skyfire.PerceptorDumpIntervalSeconds,\n\t\t\tPort: 3005,\n\t\t\tPrometheusPort: 3006,\n\t\t\tUseInClusterConfig: true,\n\t\t},\n\t}\n\treturn &SpecConfig{\n\t\tconfig: config,\n\t\tkubeClient: kubeClient,\n\t\topssightClient: opssightClient,\n\t\thubClient: hubClient,\n\t\topssight: opssight,\n\t\tconfigMap: configMap,\n\t\tisBlackDuckClusterScope: isBlackDuckClusterScope,\n\t\tdryRun: dryRun,\n\t\tnames: names,\n\t\timages: images,\n\t}\n}", "func newCanaryConfig(provider config.Provider) (*Config, error) {\n\traw := provider.Get(ConfigurationKey)\n\tvar cfg Config\n\tif err := raw.Populate(&cfg); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load canary configuration with error: %v\", err)\n\t}\n\tif err := cfg.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &cfg, nil\n}", "func expandGoogleChannelConfig(c *Client, f *GoogleChannelConfig) (map[string]interface{}, error) {\n\tm := make(map[string]interface{})\n\tres := f\n\t_ = res\n\tif v, err := dcl.DeriveField(\"projects/%s/locations/%s/googleChannelConfig\", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location)); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding Name into name: %w\", err)\n\t} else if !dcl.IsEmptyValueIndirect(v) {\n\t\tm[\"name\"] = v\n\t}\n\tif v := f.CryptoKeyName; dcl.ValueShouldBeSent(v) {\n\t\tm[\"cryptoKeyName\"] = v\n\t}\n\tif v, err := dcl.EmptyValue(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding Project into project: %w\", err)\n\t} else if !dcl.IsEmptyValueIndirect(v) {\n\t\tm[\"project\"] = v\n\t}\n\tif v, err := dcl.EmptyValue(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding Location into location: %w\", err)\n\t} else if !dcl.IsEmptyValueIndirect(v) {\n\t\tm[\"location\"] = v\n\t}\n\n\treturn m, nil\n}", "func (*PUSCH_Config) XXX_OneofWrappers() []interface{} {\n\treturn []interface{}{\n\t\t(*PUSCH_Config_ReleaseDmrs_UplinkForPUSCH_MappingTypeA)(nil),\n\t\t(*PUSCH_Config_SetupDmrs_UplinkForPUSCH_MappingTypeA)(nil),\n\t\t(*PUSCH_Config_ReleaseDmrs_UplinkForPUSCH_MappingTypeB)(nil),\n\t\t(*PUSCH_Config_SetupDmrs_UplinkForPUSCH_MappingTypeB)(nil),\n\t\t(*PUSCH_Config_ReleasePusch_TimeDomainAllocationList)(nil),\n\t\t(*PUSCH_Config_SetupPusch_TimeDomainAllocationList)(nil),\n\t\t(*PUSCH_Config_ReleaseUci_OnPUSCH)(nil),\n\t\t(*PUSCH_Config_SetupUci_OnPUSCH)(nil),\n\t}\n}", "func (proxy *StandAloneProxyConfig) DeepCopy() *StandAloneProxyConfig {\n\tif proxy == nil {\n\t\treturn nil\n\t}\n\tcloned := new(StandAloneProxyConfig)\n\tcloned.proxyCredentials = make(map[string]*ProxyUser)\n\tcloned.managementServers = make(map[url.URL]*ManagementServer)\n\tcloned.managedArrays = make(map[string]*StorageArray)\n\tfor key, value := range proxy.managedArrays {\n\t\tarray := *value\n\t\tcloned.managedArrays[key] = &array\n\t}\n\tfor key, value := range proxy.managementServers {\n\t\tcloned.managementServers[key] = value.DeepCopy()\n\t}\n\tfor key, value := range proxy.proxyCredentials {\n\t\tcreds := *value\n\t\tcloned.proxyCredentials[key] = &creds\n\t}\n\treturn cloned\n}", "func NewAndroidWorkProfileGeneralDeviceConfiguration()(*AndroidWorkProfileGeneralDeviceConfiguration) {\n m := &AndroidWorkProfileGeneralDeviceConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.androidWorkProfileGeneralDeviceConfiguration\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func (o *SyntheticsPrivateLocationCreationResponse) GetConfig() interface{} {\n\tif o == nil || o.Config == nil {\n\t\tvar ret interface{}\n\t\treturn ret\n\t}\n\treturn o.Config\n}", "func (m *ExternalConnection) GetConfiguration()(Configurationable) {\n return m.configuration\n}", "func (c *VaultConfig) Merge(o *VaultConfig) *VaultConfig {\n\tif c == nil {\n\t\tif o == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn o.Copy()\n\t}\n\n\tif o == nil {\n\t\treturn c.Copy()\n\t}\n\n\tr := c.Copy()\n\n\tif o.Address != nil {\n\t\tr.Address = o.Address\n\t}\n\n\tif o.Enabled != nil {\n\t\tr.Enabled = o.Enabled\n\t}\n\n\tif o.Namespace != nil {\n\t\tr.Namespace = o.Namespace\n\t}\n\n\tif o.RenewToken != nil {\n\t\tr.RenewToken = o.RenewToken\n\t}\n\n\tif o.Retry != nil {\n\t\tr.Retry = r.Retry.Merge(o.Retry)\n\t}\n\n\tif o.SSL != nil {\n\t\tr.SSL = r.SSL.Merge(o.SSL)\n\t}\n\n\tif o.Token != nil {\n\t\tr.Token = o.Token\n\t}\n\n\tif o.VaultAgentTokenFile != nil {\n\t\tr.VaultAgentTokenFile = o.VaultAgentTokenFile\n\t}\n\n\tif o.Transport != nil {\n\t\tr.Transport = r.Transport.Merge(o.Transport)\n\t}\n\n\tif o.UnwrapToken != nil {\n\t\tr.UnwrapToken = o.UnwrapToken\n\t}\n\n\tif o.DefaultLeaseDuration != nil {\n\t\tr.DefaultLeaseDuration = o.DefaultLeaseDuration\n\t}\n\n\tif o.LeaseRenewalThreshold != nil {\n\t\tr.LeaseRenewalThreshold = o.LeaseRenewalThreshold\n\t}\n\n\tif o.K8SAuthRoleName != nil {\n\t\tr.K8SAuthRoleName = o.K8SAuthRoleName\n\t}\n\n\tif o.K8SServiceAccountToken != nil {\n\t\tr.K8SServiceAccountToken = o.K8SServiceAccountToken\n\t}\n\n\tif o.K8SServiceAccountTokenPath != nil {\n\t\tr.K8SServiceAccountTokenPath = o.K8SServiceAccountTokenPath\n\t}\n\n\tif o.K8SServiceMountPath != nil {\n\t\tr.K8SServiceMountPath = o.K8SServiceMountPath\n\t}\n\n\treturn r\n}", "func (a *AllApiService) ConfigurationCloneConfiguration(ctx _context.Context, body ConfigurationCloneConfiguration) (ConfigurationCloneConfigurationResult, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ConfigurationCloneConfigurationResult\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/configuration/cloneConfiguration\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v ConfigurationCloneConfigurationResult\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (old ContainerConfig) Copy() ContainerConfig {\n\t// Copy all fields\n\tres := old\n\n\t// Make deep copy of slices\n\t// none yet - placeholder\n\n\treturn res\n}", "func (config *internalConfiguration) setConfiguration(newConf *CoreConfiguration) {\n\n\tnewConf.apiPlatformClientID = config.APIPlatformClientID\n\tnewConf.apiPlatformHost = config.APIPlatformHost\n\tnewConf.idcsHost = config.IDCSHost\n\tnewConf.apiPlatformClientSecret = config.APIPlatformClientSecret\n\tnewConf.apiPlatformUser = config.APIPlatformUser\n\tnewConf.apiPlatformUserPassword = config.APIPlatformUserPassword\n\tnewConf.apiPlatformScope = config.APIPlatformScope\n}", "func (cc *ConnConfig) Copy() *ConnConfig {\n\tnewConfig := new(ConnConfig)\n\t*newConfig = *cc\n\tnewConfig.Config = *newConfig.Config.Copy()\n\treturn newConfig\n}", "func (c *Config) Copy() *Config {\n\tconfigCopy := &Config{\n\t\tFilter: c.Filter,\n\t\tIncludeMsgTypes: util.CopyStringSlice(c.IncludeMsgTypes),\n\t\tSubject: c.Subject,\n\t}\n\tif c.Email != nil {\n\t\tconfigCopy.Email = &EmailNotifierConfig{\n\t\t\tEmails: util.CopyStringSlice(c.Email.Emails),\n\t\t}\n\t}\n\tif c.Chat != nil {\n\t\tconfigCopy.Chat = &ChatNotifierConfig{\n\t\t\tRoomID: c.Chat.RoomID,\n\t\t}\n\t}\n\tif c.PubSub != nil {\n\t\tconfigCopy.PubSub = &PubSubNotifierConfig{\n\t\t\tTopic: c.PubSub.Topic,\n\t\t}\n\t}\n\tif c.Monorail != nil {\n\t\tconfigCopy.Monorail = &MonorailNotifierConfig{\n\t\t\tProject: c.Monorail.Project,\n\t\t\tOwner: c.Monorail.Owner,\n\t\t\tCC: util.CopyStringSlice(c.Monorail.CC),\n\t\t\tComponents: util.CopyStringSlice(c.Monorail.Components),\n\t\t\tLabels: util.CopyStringSlice(c.Monorail.Labels),\n\t\t}\n\t}\n\treturn configCopy\n}", "func (*OtherConfigV1540) XXX_OneofWrappers() []interface{} {\n\treturn []interface{}{\n\t\t(*OtherConfigV1540_ReleaseOverheatingAssistanceConfig)(nil),\n\t\t(*OtherConfigV1540_SetupOverheatingAssistanceConfig)(nil),\n\t}\n}", "func CloneConfig(host string, verifyTLS bool, apiKey string, project string, config string, name string) (models.ConfigInfo, Error) {\n\tpostBody := map[string]interface{}{\"name\": name}\n\tbody, err := json.Marshal(postBody)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Invalid config info\"}\n\t}\n\n\tvar params []queryParam\n\tparams = append(params, queryParam{Key: \"project\", Value: project})\n\tparams = append(params, queryParam{Key: \"config\", Value: config})\n\n\turl, err := generateURL(host, \"/v3/configs/config/clone\", params)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to generate url\"}\n\t}\n\n\tstatusCode, _, response, err := PostRequest(url, verifyTLS, apiKeyHeader(apiKey), body)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to clone config\", Code: statusCode}\n\t}\n\n\tvar result map[string]interface{}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tconfigInfo, ok := result[\"config\"].(map[string]interface{})\n\tif !ok {\n\t\treturn models.ConfigInfo{}, Error{Err: fmt.Errorf(\"Unexpected type parsing config info, expected map[string]interface{}, got %T\", result[\"config\"]), Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\tinfo := models.ParseConfigInfo(configInfo)\n\treturn info, Error{}\n}", "func (in *AdmissionWebhookConfigurationSpec) DeepCopy() *AdmissionWebhookConfigurationSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AdmissionWebhookConfigurationSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (*OtherConfig) XXX_OneofWrappers() []interface{} {\n\treturn []interface{}{\n\t\t(*OtherConfig_Release)(nil),\n\t\t(*OtherConfig_Setup)(nil),\n\t}\n}", "func NewGetConfigurationOK() *GetConfigurationOK {\n\treturn &GetConfigurationOK{}\n}", "func (o *Config) Copy(s Config) {\n\to.Enable = s.Enable\n\to.RouterId = s.RouterId\n\to.AsNumber = s.AsNumber\n\to.BfdProfile = s.BfdProfile\n\to.RejectDefaultRoute = s.RejectDefaultRoute\n\to.InstallRoute = s.InstallRoute\n\to.AggregateMed = s.AggregateMed\n\to.DefaultLocalPreference = s.DefaultLocalPreference\n\to.AsFormat = s.AsFormat\n\to.AlwaysCompareMed = s.AlwaysCompareMed\n\to.DeterministicMedComparison = s.DeterministicMedComparison\n\to.EcmpMultiAs = s.EcmpMultiAs\n\to.EnforceFirstAs = s.EnforceFirstAs\n\to.EnableGracefulRestart = s.EnableGracefulRestart\n\to.StaleRouteTime = s.StaleRouteTime\n\to.LocalRestartTime = s.LocalRestartTime\n\to.MaxPeerRestartTime = s.MaxPeerRestartTime\n\to.ReflectorClusterId = s.ReflectorClusterId\n\to.ConfederationMemberAs = s.ConfederationMemberAs\n\to.AllowRedistributeDefaultRoute = s.AllowRedistributeDefaultRoute\n}", "func NewAndroidCustomConfiguration()(*AndroidCustomConfiguration) {\n m := &AndroidCustomConfiguration{\n DeviceConfiguration: *NewDeviceConfiguration(),\n }\n odataTypeValue := \"#microsoft.graph.androidCustomConfiguration\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func (c *ContentKeyPolicyWidevineConfiguration) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"@odata.type\":\n\t\t\terr = unpopulate(val, \"ODataType\", &c.ODataType)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"widevineTemplate\":\n\t\t\terr = unpopulate(val, \"WidevineTemplate\", &c.WidevineTemplate)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *TLSConfig) Clone() *TLSConfig {\n\tnamed := map[string]KeyCert{}\n\tfor key, val := range c.Named {\n\t\tnamed[key] = val\n\t}\n\n\troots := append([]string{}, c.RootCAs...)\n\tacmeHosts := append([]string{}, c.ACMEHosts...)\n\n\treturn &TLSConfig{named, roots, c.Default, c.ACMEDirectoryURL, acmeHosts, c.ACMECacheDir}\n}", "func (c *Config) CommonConfig() config.Common {\n\treturn c.Common\n}", "func (in *PingPongSpec) DeepCopy() *PingPongSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PingPongSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func FlattenOIDC(config apps.AppConfiguration) map[string]interface{} {\n\ttfOut := map[string]interface{}{}\n\tif config.RedirectURI != nil {\n\t\ttfOut[\"redirect_uri\"] = *config.RedirectURI\n\t}\n\tif config.LoginURL != nil {\n\t\ttfOut[\"login_url\"] = *config.LoginURL\n\t}\n\t// Terraform typeMap wants all strings so we convert int32 to string here\n\tif config.RefreshTokenExpirationMinutes != nil {\n\t\ttfOut[\"refresh_token_expiration_minutes\"] = strconv.FormatInt(int64(*config.RefreshTokenExpirationMinutes), 10)\n\t}\n\tif config.OidcApplicationType != nil {\n\t\ttfOut[\"oidc_application_type\"] = strconv.FormatInt(int64(*config.OidcApplicationType), 10)\n\t}\n\tif config.TokenEndpointAuthMethod != nil {\n\t\ttfOut[\"token_endpoint_auth_method\"] = strconv.FormatInt(int64(*config.TokenEndpointAuthMethod), 10)\n\t}\n\tif config.AccessTokenExpirationMinutes != nil {\n\t\ttfOut[\"access_token_expiration_minutes\"] = strconv.FormatInt(int64(*config.AccessTokenExpirationMinutes), 10)\n\t}\n\treturn tfOut\n}", "func merge(existing, kind *Config) error {\n\t// verify assumptions about kubeadm / kind kubeconfigs\n\tif err := checkKubeadmExpectations(kind); err != nil {\n\t\treturn err\n\t}\n\n\t// insert or append cluster entry\n\tshouldAppend := true\n\tfor i := range existing.Clusters {\n\t\tif existing.Clusters[i].Name == kind.Clusters[0].Name {\n\t\t\texisting.Clusters[i] = kind.Clusters[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Clusters = append(existing.Clusters, kind.Clusters[0])\n\t}\n\n\t// insert or append user entry\n\tshouldAppend = true\n\tfor i := range existing.Users {\n\t\tif existing.Users[i].Name == kind.Users[0].Name {\n\t\t\texisting.Users[i] = kind.Users[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Users = append(existing.Users, kind.Users[0])\n\t}\n\n\t// insert or append context entry\n\tshouldAppend = true\n\tfor i := range existing.Contexts {\n\t\tif existing.Contexts[i].Name == kind.Contexts[0].Name {\n\t\t\texisting.Contexts[i] = kind.Contexts[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Contexts = append(existing.Contexts, kind.Contexts[0])\n\t}\n\n\t// set the current context\n\texisting.CurrentContext = kind.CurrentContext\n\n\t// TODO: We should not need this, but it allows broken clients that depend\n\t// on apiVersion and kind to work. Notably the upstream javascript client.\n\t// See: https://github.com/kubernetes-sigs/kind/issues/1242\n\tif len(existing.OtherFields) == 0 {\n\t\t// TODO: Should we be deep-copying? for now we don't need to\n\t\t// and doing so would be a pain (re and de-serialize maybe?) :shrug:\n\t\texisting.OtherFields = kind.OtherFields\n\t}\n\n\treturn nil\n}", "func (p Provider) OAuth2Config(c *Client) (cfg oauth2.Config) {\n\tcfg = oauth2.Config{\n\t\tClientID: c.ID,\n\t\tClientSecret: c.Secret,\n\t\tEndpoint: p.Endpoint(),\n\t}\n\n\tif len(c.RedirectURIs) > 0 {\n\t\tcfg.RedirectURL = c.RedirectURIs[0]\n\t}\n\n\treturn cfg\n}", "func (a *MaintenanceWindowApiService) GetMaintenanceWindowConfig(ctx _context.Context, uid string) ApiGetMaintenanceWindowConfigRequest {\n\treturn ApiGetMaintenanceWindowConfigRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tuid: uid,\n\t}\n}", "func (cc *CompactConfig) Clone() CompactConfig {\n\tvar clone CompactConfig\n\tm := make(map[string]CompactSerializer, len(cc.serializers))\n\tfor k, v := range cc.serializers {\n\t\tm[k] = v\n\t}\n\tclone.serializers = m\n\treturn clone\n}", "func newConfig() *config {\n\treturn &config{\n\t\tAddr: \":80\",\n\t\tCacheSize: 1000,\n\t\tLogLevel: \"info\",\n\t\tRequestTimeout: 3000,\n\t\tTargetAddr: \"https://places.aviasales.ru\",\n\t}\n}", "func (c *Config) ToStruct(pointer interface{}, mapping ...map[string]string) error {\n\tif j := c.getJson(); j != nil {\n\t\treturn j.ToStruct(pointer, mapping...)\n\t}\n\treturn errors.New(\"configuration not found\")\n}", "func (c *client) WorkspaceConfiguration(ctx context.Context, params *ConfigurationParams) ([]interface{}, error) {\n\tvar result []interface{}\n\terr := c.Conn.Call(ctx, MethodWorkspaceConfiguration, params, &result)\n\n\treturn result, err\n}", "func NewConfiguration() *Configuration {\n\tcfg := &Configuration{\n\t\tBasePath: \"https://api.payrobot.io\",\n\t\tDefaultHeader: make(map[string]string),\n\t\tUserAgent: \"OpenAPI-Generator/1.0.0/go\",\n\t\tDebug: false,\n\t\tServers: []ServerConfiguration{\n\t\t\t{\n\t\t\t\tUrl: \"https://api.payrobot.io\",\n\t\t\t\tDescription: \"Production server (uses live data)\",\n\t\t\t},\n\t\t},\n\t}\n\treturn cfg\n}", "func (e ExternalService) Configuration() (cfg interface{}, _ error) {\n\tswitch strings.ToLower(e.Kind) {\n\tcase \"awscodecommit\":\n\t\tcfg = &schema.AWSCodeCommitConnection{}\n\tcase \"bitbucketserver\":\n\t\tcfg = &schema.BitbucketServerConnection{}\n\tcase \"github\":\n\t\tcfg = &schema.GitHubConnection{}\n\tcase \"gitlab\":\n\t\tcfg = &schema.GitLabConnection{}\n\tcase \"gitolite\":\n\t\tcfg = &schema.GitoliteConnection{}\n\tcase \"phabricator\":\n\t\tcfg = &schema.PhabricatorConnection{}\n\tcase \"other\":\n\t\tcfg = &schema.OtherExternalServiceConnection{}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown external service kind %q\", e.Kind)\n\t}\n\treturn cfg, jsonc.Unmarshal(e.Config, cfg)\n}", "func newProxyConfig(config PodConfig) interface{} {\n\tswitch config.ProxyType {\n\tcase NoopProxyType:\n\t\treturn nil\n\tcase CCProxyType:\n\t\tvar ccConfig CCProxyConfig\n\t\terr := mapstructure.Decode(config.ProxyConfig, &ccConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ccConfig\n\tdefault:\n\t\treturn nil\n\t}\n}", "func (c *Config) ToStructDeep(pointer interface{}, mapping ...map[string]string) error {\n\tif j := c.getJson(); j != nil {\n\t\treturn j.ToStructDeep(pointer, mapping...)\n\t}\n\treturn errors.New(\"configuration not found\")\n}", "func NewConfig(d *schema.ResourceData) (*Settings, error) {\n\t// winRM\n\twinRMUsername := d.Get(\"winrm_username\").(string)\n\twinRMPassword := d.Get(\"winrm_password\").(string)\n\twinRMHost := d.Get(\"winrm_hostname\").(string)\n\twinRMPort := d.Get(\"winrm_port\").(int)\n\twinRMProto := d.Get(\"winrm_proto\").(string)\n\twinRMInsecure := d.Get(\"winrm_insecure\").(bool)\n\tkrbRealm := d.Get(\"krb_realm\").(string)\n\tkrbConfig := d.Get(\"krb_conf\").(string)\n\tkrbSpn := d.Get(\"krb_spn\").(string)\n\twinRMUseNTLM := d.Get(\"winrm_use_ntlm\").(bool)\n\twinRMPassCredentials := d.Get(\"winrm_pass_credentials\").(bool)\n\tdomainController := d.Get(\"domain_controller\").(string)\n\n\tcfg := &Settings{\n\t\tDomainName: krbRealm,\n\t\tDomainController: domainController,\n\t\tWinRMHost: winRMHost,\n\t\tWinRMPort: winRMPort,\n\t\tWinRMProto: winRMProto,\n\t\tWinRMUsername: winRMUsername,\n\t\tWinRMPassword: winRMPassword,\n\t\tWinRMInsecure: winRMInsecure,\n\t\tKrbRealm: krbRealm,\n\t\tKrbConfig: krbConfig,\n\t\tKrbSpn: krbSpn,\n\t\tWinRMUseNTLM: winRMUseNTLM,\n\t\tWinRMPassCredentials: winRMPassCredentials,\n\t}\n\n\treturn cfg, nil\n}", "func NewClientConfiguration(pfmServicesEnabled bool, isAutomaticBatchUpdateEnabled bool, isDevelopmentModeEnabled bool, isNonEuroAccountsSupported bool, isAutoCategorizationEnabled bool, mandatorLicense MandatorLicense, preferredConsentType PreferredConsentType, userNotificationCallbackUrl NullableString, userSynchronizationCallbackUrl NullableString, refreshTokensValidityPeriod int32, userAccessTokensValidityPeriod int32, clientAccessTokensValidityPeriod int32, maxUserLoginAttempts int32, transactionImportLimitation int32, isUserAutoVerificationEnabled bool, isMandatorAdmin bool, isWebScrapingEnabled bool, isXs2aEnabled bool, pinStorageAvailableInWebForm bool, paymentsEnabled bool, isStandalonePaymentsEnabled bool, availableBankGroups []string, products []Product, applicationName NullableString, finTSProductRegistrationNumber NullableString, storeSecretsAvailableInWebForm bool, supportSubjectDefault NullableString, supportEmail NullableString, aisWebFormMode WebFormMode, pisWebFormMode WebFormMode, pisStandaloneWebFormMode WebFormMode, betaBanksEnabled bool, categoryRestrictions []Category, autoDismountWebForm bool, corsAllowedOrigins []string, ) *ClientConfiguration {\n\tthis := ClientConfiguration{}\n\tthis.PfmServicesEnabled = pfmServicesEnabled\n\tthis.IsAutomaticBatchUpdateEnabled = isAutomaticBatchUpdateEnabled\n\tthis.IsDevelopmentModeEnabled = isDevelopmentModeEnabled\n\tthis.IsNonEuroAccountsSupported = isNonEuroAccountsSupported\n\tthis.IsAutoCategorizationEnabled = isAutoCategorizationEnabled\n\tthis.MandatorLicense = mandatorLicense\n\tthis.PreferredConsentType = preferredConsentType\n\tthis.UserNotificationCallbackUrl = userNotificationCallbackUrl\n\tthis.UserSynchronizationCallbackUrl = userSynchronizationCallbackUrl\n\tthis.RefreshTokensValidityPeriod = refreshTokensValidityPeriod\n\tthis.UserAccessTokensValidityPeriod = userAccessTokensValidityPeriod\n\tthis.ClientAccessTokensValidityPeriod = clientAccessTokensValidityPeriod\n\tthis.MaxUserLoginAttempts = maxUserLoginAttempts\n\tthis.TransactionImportLimitation = transactionImportLimitation\n\tthis.IsUserAutoVerificationEnabled = isUserAutoVerificationEnabled\n\tthis.IsMandatorAdmin = isMandatorAdmin\n\tthis.IsWebScrapingEnabled = isWebScrapingEnabled\n\tthis.IsXs2aEnabled = isXs2aEnabled\n\tthis.PinStorageAvailableInWebForm = pinStorageAvailableInWebForm\n\tthis.PaymentsEnabled = paymentsEnabled\n\tthis.IsStandalonePaymentsEnabled = isStandalonePaymentsEnabled\n\tthis.AvailableBankGroups = availableBankGroups\n\tthis.Products = products\n\tthis.ApplicationName = applicationName\n\tthis.FinTSProductRegistrationNumber = finTSProductRegistrationNumber\n\tthis.StoreSecretsAvailableInWebForm = storeSecretsAvailableInWebForm\n\tthis.SupportSubjectDefault = supportSubjectDefault\n\tthis.SupportEmail = supportEmail\n\tthis.AisWebFormMode = aisWebFormMode\n\tthis.PisWebFormMode = pisWebFormMode\n\tthis.PisStandaloneWebFormMode = pisStandaloneWebFormMode\n\tthis.BetaBanksEnabled = betaBanksEnabled\n\tthis.CategoryRestrictions = categoryRestrictions\n\tthis.AutoDismountWebForm = autoDismountWebForm\n\tthis.CorsAllowedOrigins = corsAllowedOrigins\n\treturn &this\n}", "func (client *XenClient) SMGetOtherConfig(self string) (result map[string]string, err error) {\n\tobj, err := client.APICall(\"SM.get_other_config\", self)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinterim := reflect.ValueOf(obj)\n\tresult = map[string]string{}\n\tfor _, key := range interim.MapKeys() {\n\t\tobj := interim.MapIndex(key)\n\t\tresult[key.String()] = obj.String()\n\t}\n\n\treturn\n}", "func (s *Server) SyncWithConfiguration(cfg remote.ServerConfigurationResponse) error {\n\tc := Configuration{\n\t\tCrashDetectionEnabled: config.Get().System.CrashDetection.CrashDetectionEnabled,\n\t}\n\tif err := json.Unmarshal(cfg.Settings, &c); err != nil {\n\t\treturn errors.WithStackIf(err)\n\t}\n\n\ts.cfg.mu.Lock()\n\tdefer s.cfg.mu.Unlock()\n\n\t// Lock the new configuration. Since we have the deferred Unlock above we need\n\t// to make sure that the NEW configuration object is already locked since that\n\t// defer is running on the memory address for \"s.cfg.mu\" which we're explicitly\n\t// changing on the next line.\n\tc.mu.Lock()\n\n\t//goland:noinspection GoVetCopyLock\n\ts.cfg = c\n\n\ts.Lock()\n\ts.procConfig = cfg.ProcessConfiguration\n\ts.Unlock()\n\n\treturn nil\n}", "func (*ChannelConfiguration) XXX_OneofWrappers() []interface{} {\n\treturn []interface{}{\n\t\t(*ChannelConfiguration_LoraModulationConfig)(nil),\n\t\t(*ChannelConfiguration_FskModulationConfig)(nil),\n\t}\n}", "func cloneConfig(c config) config {\n\tnewc := make(config)\n\tfor k, v := range c {\n\t\tswitch v := v.(type) {\n\t\tcase string:\n\t\t\tnewc[k] = v\n\t\tcase map[string]interface{}:\n\t\t\tm := make(map[string]string)\n\t\t\tfor k2, v2 := range v {\n\t\t\t\tm[k2] = v2.(string)\n\t\t\t}\n\t\t\tnewc[k] = m\n\t\tcase []interface{}:\n\t\t\ts := make([]string, 0, len(v))\n\t\t\tfor _, v2 := range v {\n\t\t\t\ts = append(s, v2.(string))\n\t\t\t}\n\t\t\tnewc[k] = s\n\t\t}\n\t}\n\treturn newc\n}", "func (r OAuthFlow) Clone() (*OAuthFlow, error) {\n\trbytes, err := yaml.Marshal(r)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tvalue := OAuthFlow{}\n\tif err := yaml.Unmarshal(rbytes, &value); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\treturn &value, nil\n}", "func MergeHealthcheckConfig(a, b Healthcheck) Healthcheck {\n\tresult := b\n\tif result.Interval == \"\" {\n\t\tresult.Interval = a.Interval\n\t}\n\tif result.Timeout == \"\" {\n\t\tresult.Timeout = a.Timeout\n\t}\n\tif len(result.Rules) == 0 {\n\t\tresult.Rules = a.Rules\n\t}\n\treturn result\n}", "func (m *WellKnown) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAuthorizationEndpoint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIDTokenSigningAlgValuesSupported(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIssuer(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateJwksURI(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateResponseTypesSupported(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubjectTypesSupported(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTokenEndpoint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (*ConfigKind) XXX_OneofWrappers() []interface{} {\n\treturn []interface{}{\n\t\t(*ConfigKind_Local)(nil),\n\t\t(*ConfigKind_Global)(nil),\n\t}\n}" ]
[ "0.6973411", "0.6471807", "0.5365221", "0.5365221", "0.49971724", "0.47366124", "0.46528473", "0.45895526", "0.45788094", "0.45408705", "0.45383167", "0.45270938", "0.45160753", "0.4492056", "0.4454131", "0.44524926", "0.44451955", "0.4425715", "0.44178215", "0.43918446", "0.43864527", "0.43643537", "0.4342315", "0.43318263", "0.4290668", "0.42903772", "0.4289385", "0.4275275", "0.4275261", "0.42639187", "0.4256372", "0.42452127", "0.42411977", "0.42408302", "0.42353132", "0.42290127", "0.42204276", "0.4201049", "0.41852707", "0.41831976", "0.4181257", "0.41725135", "0.41622898", "0.41596857", "0.4156879", "0.41542867", "0.41497582", "0.41466048", "0.41445208", "0.41329673", "0.41315916", "0.4130837", "0.41204214", "0.41199937", "0.411513", "0.4112149", "0.41120064", "0.411021", "0.41057816", "0.41039383", "0.40980202", "0.4093921", "0.40921798", "0.40886697", "0.4084548", "0.4080121", "0.4070901", "0.40707415", "0.40707284", "0.40649694", "0.40644607", "0.40587872", "0.40536478", "0.40457532", "0.40402982", "0.4038953", "0.4033925", "0.4030929", "0.40277627", "0.40276614", "0.4027042", "0.40255916", "0.40224814", "0.40151295", "0.40049255", "0.39940605", "0.39913604", "0.39913002", "0.3985458", "0.39800432", "0.39759657", "0.39714268", "0.3968217", "0.3967207", "0.39634365", "0.395636", "0.39554504", "0.39551944", "0.3953143", "0.395243" ]
0.7166091
0
FullName returns the full name of the command. For commands with parents this ensures that the parent commands are part of the command path.
func (cmd *Command) FullName() string { namePath := []string{} if cmd.parent != nil { namePath = append(namePath, cmd.parent.FullName()) } return strings.Join(append(namePath, cmd.Name), " ") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c Command) FullName() string {\n\tif c.commandNamePath == nil {\n\t\treturn c.Name\n\t}\n\treturn strings.Join(c.commandNamePath, \" \")\n}", "func cmdPath(cmd *cobra.Command) string {\n\tif cmd.Parent() == nil || cmd.Parent().Name() == \"ethereal\" {\n\t\treturn cmd.Name()\n\t}\n\treturn fmt.Sprintf(\"%s:%s\", cmdPath(cmd.Parent()), cmd.Name())\n}", "func (s *BashScript) FullPath() string {\n\treturn path.Join(s.BasePath, s.Path)\n}", "func (tb *TaskBuilder) GetFullName(in string) string {\n\tvar n strings.Builder\n\n\tn.WriteString(fmt.Sprintf(`\"%v\".\"%v\".\"%v\"`, tb.db, tb.schema, in))\n\n\treturn n.String()\n}", "func Command(cmd *cobra.Command) string {\n\tnames := make([]string, 0)\n\tcurrent := cmd\n\tfor {\n\t\tnames = append(names, current.Name())\n\t\tcurrent = current.Parent()\n\t\tif current == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treverse := make([]string, len(names))\n\tfor i, entry := range names {\n\t\treverse[len(names)-i-1] = entry\n\t}\n\n\treturn \"_\" + strings.Join(reverse, \"__\")\n}", "func (c *common) GetFullName() string { return c.FullName }", "func GetFullName(parentName, name string) string {\n\treturn parentName + \" \" + name\n}", "func GetFullName(parentName, name string) string {\n\treturn parentName + \" \" + name\n}", "func (o *Operator) GetFullName() string {\n\tif o != nil {\n\t\treturn fmt.Sprintf(\"%s %s\", o.GetFirstName(), o.GetLastName())\n\t}\n\treturn \"\"\n}", "func (pi ProcessInfo) GetFullName() string {\n\tif len(pi.Group) > 0 {\n\t\treturn fmt.Sprintf(\"%s:%s\", pi.Group, pi.Name)\n\t}\n\treturn pi.Name\n}", "func commandName(filename string) string {\n\treturn filepath.Base(strings.ReplaceAll(filename, filepath.Ext(filename), \"\"))\n}", "func RootCmdName() string {\n\treturn path.Base(os.Args[0])\n}", "func (p *PushEventRepository) GetFullName() string {\n\tif p == nil || p.FullName == nil {\n\t\treturn \"\"\n\t}\n\treturn *p.FullName\n}", "func (f *Factory) Command() string {\n\tif len(os.Args) == 0 {\n\t\treturn \"\"\n\t}\n\tbase := filepath.Base(os.Args[0])\n\targs := append([]string{base}, os.Args[1:]...)\n\treturn strings.Join(args, \" \")\n}", "func (o *BasicBot) GetFullName() interface{} {\n\tif o == nil {\n\t\tvar ret interface{}\n\t\treturn ret\n\t}\n\treturn o.FullName\n}", "func (qualifiedName *QualifiedName) GetFullQualifiedName() string {\n\toutput := []string{}\n\n\tif len(qualifiedName.GetNamespace()) > 0 {\n\t\toutput = append(output, \"/\", qualifiedName.GetNamespace(), \"/\")\n\t}\n\tif len(qualifiedName.GetPackageName()) > 0 {\n\t\toutput = append(output, qualifiedName.GetPackageName(), \"/\")\n\t}\n\toutput = append(output, qualifiedName.GetEntity())\n\n\treturn strings.Join(output, \"\")\n}", "func (s *Module) FullName() string {\n\tif rev := s.Current(); rev != \"\" {\n\t\treturn s.Name + \"@\" + rev\n\t}\n\treturn s.Name\n}", "func (r *Repository) GetFullName() string {\n\tif r == nil || r.FullName == nil {\n\t\treturn \"\"\n\t}\n\treturn *r.FullName\n}", "func (cmd *CLI) Name() string {\n\tvar name string\n\tif cmd.parent != nil {\n\t\tname = strings.Join([]string{cmd.parent.Name(), cmd.name}, \" \")\n\t} else {\n\t\tname = cmd.name\n\t}\n\treturn name\n}", "func (pod *PodDetails) GetFullName() string {\n\treturn pod.Namespace + \"/\" + pod.Name\n}", "func FullName() string {\n\treturn C.GoString(C.getFullName())\n}", "func (r *Repository) GetSafeFullName() string {\n\treturn strings.Replace(r.FullName, \"/\", \"_\", -1)\n}", "func (r *RelativePath) FullPath() string {\n\treturn \"/\" + strings.Join(r.stack, \"/\")\n}", "func (o LookupPoolResultOutput) FullPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupPoolResult) string { return v.FullPath }).(pulumi.StringOutput)\n}", "func (rr RunResult) Command() string {\n\tvar sb strings.Builder\n\tsb.WriteString(strings.TrimPrefix(rr.Args[0], \"../../\"))\n\tfor _, a := range rr.Args[1:] {\n\t\tif strings.Contains(a, \" \") {\n\t\t\tsb.WriteString(fmt.Sprintf(` \"%s\"`, a))\n\t\t\tcontinue\n\t\t}\n\t\tsb.WriteString(fmt.Sprintf(\" %s\", a))\n\t}\n\treturn sb.String()\n}", "func Self() string {\n\tname := os.Args[0]\n\tif filepath.Base(name) == name {\n\t\tif lp, err := exec.LookPath(name); err == nil {\n\t\t\tname = lp\n\t\t}\n\t}\n\treturn name\n}", "func (app *App) CommandName() string {\n\tif app.invoked == nil {\n\t\treturn \"\"\n\t}\n\treturn app.invoked.Name()\n}", "func (ext *Extension) FullName() string {\n\treturn fmt.Sprintf(\"%s.%s\", ext.ContainingType, ext.Name)\n}", "func (o *AssetHolderName) GetFullName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.FullName\n}", "func (p *TempFileProc) FullPath() string {\n\treturn filepath.Join(p.Dir, p.Name)\n}", "func (g *UICommand) Name() string {\n\treturn g.fs.Name()\n}", "func (c *Cfg) fullName(prefix string) string {\n\tvar buf [64]byte\n\tvar walk func(*Cfg)\n\tb := append(buf[:0], prefix...)\n\twalk = func(c *Cfg) {\n\t\tif c != nil && c.Name != \"\" {\n\t\t\twalk(c.parent)\n\t\t\tb = append(append(b, ' '), Name(c)...)\n\t\t}\n\t}\n\tif walk(c.parent); c.Name != \"\" {\n\t\tif b = append(b, ' '); strings.IndexByte(c.Name, nameSep) == -1 {\n\t\t\tb = append(b, c.Name...)\n\t\t} else {\n\t\t\tb = append(append(append(b, '{'), c.Name...), '}')\n\t\t}\n\t}\n\treturn strings.TrimSpace(string(b))\n}", "func (b *Being) GetFullName() *Name {\n\treturn b.Name\n}", "func (cmd *CLI) Parent() Command {\n\treturn cmd.parent\n}", "func (c ContainerImage) FullName(hasSecret bool) string {\n\tif hasSecret && c.Private {\n\t\treturn fmt.Sprintf(\"%s-secret:%s\", c.Repository, c.Tag)\n\t}\n\treturn fmt.Sprintf(\"%s:%s\", c.Repository, c.Tag)\n}", "func (data *Data) GetFullName() (n *Name) {\n\tdigest := data.ComputeDigest(false)\n\tnameV := data.GetName().GetValue()\n\tnameV = append(nameV, byte(TT_ImplicitSha256DigestComponent), byte(len(digest)))\n\tnameV = append(nameV, digest...)\n\tn, _ = NewName(nameV)\n\treturn n\n}", "func (n *Node) FullName() string {\n\treturn n.nameOffset(0)\n}", "func (node *Node) FullPath() string {\n\tvar path string\n\tif node.Parent != nil {\n\t\tpath = fmt.Sprintf(\"%s/%s\", node.Parent.FullPath(), node.Path)\n\t}\n\treturn path\n}", "func (t ObjectType) FullName() string {\n\tif t.Schema == \"\" {\n\t\treturn t.Name\n\t}\n\treturn t.Schema + \".\" + t.Name\n}", "func (c *Command) Name() string {\n\tidx := strings.Index(c.Usage, \" \")\n\tif idx == -1 {\n\t\treturn c.Usage\n\t}\n\treturn c.Usage[:idx]\n}", "func FullName(name string) string {\n\tswitch name {\n\tcase oci.Docker:\n\t\tif IsDockerDesktop(name) {\n\t\t\treturn \"Docker Desktop\"\n\t\t}\n\t\treturn \"Docker\"\n\tdefault:\n\t\treturn cases.Title(language.Und).String(name)\n\t}\n}", "func (Functions) CommandName(obj interface{}) string {\n\treturn nameOptions{}.convert(nameOf(obj))\n}", "func (cmd *Command) Name() string {\n\treturn strings.SplitN(cmd.Names, \",\", 2)[0]\n}", "func (c *JoinCommand) CommandName() string {\n\treturn commandName(\"join\")\n}", "func (c *TestAndSetCommand) CommandName() string {\n\treturn commandName(\"testAndSet\")\n}", "func (pool *PackagePool) FullPath(path string) string {\n\treturn filepath.Join(pool.rootPath, path)\n}", "func (c *GetCommand) CommandName() string {\n\treturn commandName(\"get\")\n}", "func (c *Command) Name() string {\n\tname := c.UsageLine\n\ti := strings.Index(name, \" \")\n\tif i >= 0 {\n\t\tname = name[:i]\n\t}\n\treturn name\n}", "func (c *Command) Name() string {\n\tname := c.UsageLine\n\ti := strings.Index(name, \" \")\n\tif i >= 0 {\n\t\tname = name[:i]\n\t}\n\treturn name\n}", "func (c *Command) Name() string {\n\tname := c.UsageLine\n\ti := strings.Index(name, \" \")\n\tif i >= 0 {\n\t\tname = name[:i]\n\t}\n\treturn name\n}", "func (c *RemoveCommand) CommandName() string {\n\treturn commandName(\"remove\")\n}", "func GetCmdName() string {\n\t_, name := filepath.Split(os.Args[0])\n\treturn name\n}", "func (n *node) FullName() string {\n\treturn n.namespace + \".\" + n.Name()\n}", "func (c *WatchCommand) CommandName() string {\n\treturn commandName(\"watch\")\n}", "func GetFullName(schema Schema) string {\n\tswitch sch := schema.(type) {\n\tcase *RecordSchema:\n\t\treturn getFullName(sch.GetName(), sch.Namespace)\n\tcase *EnumSchema:\n\t\treturn getFullName(sch.GetName(), sch.Namespace)\n\tcase *FixedSchema:\n\t\treturn getFullName(sch.GetName(), sch.Namespace)\n\tdefault:\n\t\treturn schema.GetName()\n\t}\n}", "func (c *Client) GetFullName() string {\n\tif c != nil {\n\t\treturn fmt.Sprintf(\"%s %s\", c.GetFirstName(), c.GetLastName())\n\t}\n\treturn \"\"\n}", "func (c UpdateFabric) CommandName() string {\n\treturn command_list.CommandUpdate\n}", "func TestGrandChildSameName(t *testing.T) {\n\tvar fooCmdArgs []string\n\trootCmd := &Command{Use: \"foo\", Args: NoArgs, Run: emptyRun}\n\tbarCmd := &Command{Use: \"bar\", Args: NoArgs, Run: emptyRun}\n\tfooCmd := &Command{\n\t\tUse: \"foo\",\n\t\tArgs: ExactArgs(2),\n\t\tRun: func(_ *Command, args []string) { fooCmdArgs = args },\n\t}\n\tbarCmd.AddCommand(fooCmd)\n\trootCmd.AddCommand(barCmd)\n\n\toutput, err := executeCommand(rootCmd, \"bar\", \"foo\", \"one\", \"two\")\n\tif output != \"\" {\n\t\tt.Errorf(\"Unexpected output: %v\", output)\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\n\tgot := strings.Join(fooCmdArgs, \" \")\n\tif got != onetwo {\n\t\tt.Errorf(\"fooCmdArgs expected: %v, got: %v\", onetwo, got)\n\t}\n}", "func Full() string {\n\treturn fmt.Sprintf(\"%s-%s\", OLMVersion, GitCommit)\n}", "func (c *FromCommand) Name() string {\n\treturn c.cmd.name\n}", "func (c *DeleteCommand) CommandName() string {\n\treturn commandName(\"delete\")\n}", "func (d *Discord) CommandPrefix() string {\n\tif len(os.Args) > 1 {\n\t\treturn fmt.Sprintf(\"r.\")\n\t}\n\treturn fmt.Sprintf(\"rt.\")\n}", "func repoFullName(repo, tag string, priority bool) string {\n\tif priority {\n\t\treturn fmt.Sprintf(\"%s:%s-priority\", repo, tag)\n\t}\n\treturn fmt.Sprintf(\"%s:%s\", repo, tag)\n}", "func (dog *Dog) GetFullName() string {\n\treturn dog.Name\n}", "func FullName(typ, namespace, name string) string {\n\treturn fmt.Sprintf(\"%s/%s/%s\", typ, namespace, name)\n}", "func (p *Partition) GetFullPath() string {\n\treturn p.Device + p.Name\n}", "func (c Command) Parent() *Command {\n\treturn c.parent\n}", "func (a *Action) ShortRepoPath() string {\n\treturn path.Join(a.ShortRepoUserName(), a.ShortRepoName())\n}", "func (e exe) Command() string {\n\treturn e.command\n}", "func SelfPath() string {\n\tpath, err := exec.LookPath(os.Args[0])\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn \"\"\n\t\t}\n\t\tif execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) {\n\t\t\treturn \"\"\n\t\t}\n\t\tpanic(err)\n\t}\n\tpath, err = filepath.Abs(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn \"\"\n\t\t}\n\t\tpanic(err)\n\t}\n\treturn path\n}", "func (c AutoplanCommand) SubCommandName() string {\n\treturn \"\"\n}", "func (cli *CLI) Command() string {\n\tif len(cli.Exec) > 0 {\n\t\treturn cli.Exec[0]\n\t}\n\treturn \"\"\n}", "func (o *BasicBot) GetFullNameOk() (*interface{}, bool) {\n\tif o == nil || o.FullName == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.FullName, true\n}", "func FullCmdLine() string {\n\tonce.Do(cmdLineOpener)\n\treturn procCmdLine.Raw\n}", "func (c *SetCommand) CommandName() string {\n\treturn commandName(\"set\")\n}", "func (this *YamlFile) GetFullPath() string {\n\treturn this.fullpath\n}", "func (c SyncCommand) CommandName() string {\n\treturn \"etcd:sync\"\n}", "func (c CommentCommand) CommandName() command.Name {\n\treturn c.Name\n}", "func (p *Pkg) fullName() string {\n\treturn fmt.Sprintf(\"%v_%v\", p.name, p.version)\n}", "func (c *JoinCommand) CommandName() string {\n\treturn \"etcd:join\"\n}", "func getFullPath(params *input.Params) string {\n\treturn fmt.Sprintf(FULL_PATH, params.ProjectName)\n}", "func prefix(c *cobra.Command) []string {\n\tprefixes := []string{}\n\tif c.Parent() == nil {\n\t\treturn prefixes\n\t}\n\n\tp := prefix(c.Parent())\n\tif len(p) == 0 {\n\t\tprefixes = append(prefixes, c.Name())\n\t\treturn prefixes\n\t}\n\n\tprefixes = append(prefixes, p...)\n\tprefixes = append(prefixes, fmt.Sprintf(\"%s.%s\", p[len(p)-1], c.Name()))\n\n\treturn prefixes\n}", "func (c *CmdReal) GetPath() string {\n\treturn c.cmd.Path\n}", "func (c *JoinCommand) CommandName() string {\n\treturn \"raft:join\"\n}", "func (f File) FullPath() string {\n\treturn path.Clean(string(f))\n}", "func (l *Local) FullPath(path string) string {\n\t// append the given path to the base path\n\treturn filepath.Join(l.basePath, path)\n}", "func FromCommandPath(commandPath string) string {\n\treturn strings.Join(strings.Split(commandPath, commandNameSeparator)[1:], commandNameSeparator)\n}", "func (b *Box) GetFullName() string {\n\treturn b.CartonName + \".\" + b.DomainName\n}", "func GetStackFullName(stack *types.Stack) string {\n\t// Use underscore as the delimiter because it is not allowed in stack name\n\t// (DNS subdomain format).\n\treturn stack.PodName + \"_\" + stack.PodNamespace\n}", "func (t *TypeSpecDef) FullPath() string {\n\treturn t.PkgPath + \".\" + t.Name()\n}", "func TestChildSameName(t *testing.T) {\n\tvar fooCmdArgs []string\n\trootCmd := &Command{Use: \"foo\", Args: NoArgs, Run: emptyRun}\n\tfooCmd := &Command{\n\t\tUse: \"foo\",\n\t\tArgs: ExactArgs(2),\n\t\tRun: func(_ *Command, args []string) { fooCmdArgs = args },\n\t}\n\tbarCmd := &Command{Use: \"bar\", Args: NoArgs, Run: emptyRun}\n\trootCmd.AddCommand(fooCmd, barCmd)\n\n\toutput, err := executeCommand(rootCmd, \"foo\", \"one\", \"two\")\n\tif output != \"\" {\n\t\tt.Errorf(\"Unexpected output: %v\", output)\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\n\tgot := strings.Join(fooCmdArgs, \" \")\n\tif got != onetwo {\n\t\tt.Errorf(\"fooCmdArgs expected: %v, got: %v\", onetwo, got)\n\t}\n}", "func RunCommandFull(args []string, includeStderr bool) (string, error) {\n\tcmd := exec.Command(args[0], args[1:]...)\n\tvar err error\n\tvar bytes []byte\n\n\tif includeStderr {\n\t\tbytes, err = cmd.CombinedOutput()\n\t} else {\n\t\tbytes, err = cmd.Output()\n\t}\n\n\ttrimmed := strings.TrimSpace(string(bytes))\n\n\treturn trimmed, err\n}", "func RunCommandFull(args []string, includeStderr bool) (string, error) {\n\tcmd := exec.Command(args[0], args[1:]...)\n\tvar err error\n\tvar bytes []byte\n\n\tif includeStderr {\n\t\tbytes, err = cmd.CombinedOutput()\n\t} else {\n\t\tbytes, err = cmd.Output()\n\t}\n\n\ttrimmed := strings.TrimSpace(string(bytes))\n\n\treturn trimmed, err\n}", "func (c *DefaultJoinCommand) CommandName() string {\n\treturn \"raft:join\"\n}", "func (sym *COFFSymbol) FullName(st StringTable) (string, error) {\n\tif ok, offset := isSymNameOffset(sym.Name); ok {\n\t\treturn st.String(offset)\n\t}\n\treturn cstring(sym.Name[:]), nil\n}", "func (admin *Admin) FullName() string {\n\treturn strings.TrimSpace(admin.FirstName + \" \" + admin.LastName)\n}", "func cliCmdPath() (string, error) {\n\tswitch runtime.GOOS {\n\tcase \"darwin\", \"linux\":\n\t\treturn path.Join(cliArchiveDir, \"bin\", \"conjure\"), nil\n\tdefault:\n\t\treturn \"\", errors.Errorf(\"OS %s not supported\", runtime.GOOS)\n\t}\n}", "func (o *AssetHolderName) GetFullNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.FullName, true\n}", "func GetFullyQualifiedName(b Builder) string {\n\tvar buf bytes.Buffer\n\tgetFullyQualifiedName(b, &buf)\n\treturn buf.String()\n}", "func (a Article) FullPath() string {\n\treturn path.Join(a.BasePath(), a.Filename)\n}" ]
[ "0.78929174", "0.6329963", "0.6185743", "0.60289514", "0.58822584", "0.58744377", "0.58583766", "0.58583766", "0.5854749", "0.5830984", "0.57478005", "0.5727687", "0.5695473", "0.56776136", "0.5658741", "0.5638062", "0.5595415", "0.5580354", "0.55536264", "0.5552651", "0.5534829", "0.5470772", "0.54385924", "0.5430248", "0.5423085", "0.5416719", "0.5388281", "0.5344867", "0.53356427", "0.5326985", "0.5313758", "0.53105205", "0.5299086", "0.52743226", "0.52665764", "0.52545506", "0.5238966", "0.5233222", "0.52251905", "0.5216386", "0.52026844", "0.5198305", "0.51721096", "0.516696", "0.5159858", "0.5153539", "0.51405984", "0.51230764", "0.51230764", "0.51230764", "0.51173604", "0.511616", "0.51152086", "0.51128787", "0.51098746", "0.51085454", "0.5103064", "0.50885314", "0.5083828", "0.50746536", "0.50390095", "0.50198054", "0.50128233", "0.50057256", "0.4994008", "0.49902385", "0.4982221", "0.49798542", "0.4969174", "0.49670127", "0.49653813", "0.4958378", "0.49449098", "0.4941004", "0.49400213", "0.493428", "0.4922652", "0.49179402", "0.4901345", "0.4899992", "0.4892705", "0.4888958", "0.4879362", "0.48771384", "0.48642555", "0.4862499", "0.48498225", "0.48327446", "0.4828535", "0.482718", "0.482603", "0.4823239", "0.4823239", "0.48197675", "0.48154843", "0.48152903", "0.48071843", "0.48044506", "0.47989407", "0.47983292" ]
0.7697167
1
Run is the entry point to the command graph. The positional arguments are parsed according to the Flag and Command definitions and the matching Action functions are run.
func (cmd *Command) Run(ctx context.Context, arguments []string) (deferErr error) { cmd.setupDefaults(arguments) parentContext := &Context{Context: ctx} if v, ok := ctx.Value(contextContextKey).(*Context); ok { parentContext = v } // handle the completion flag separately from the flagset since // completion could be attempted after a flag, but before its value was put // on the command line. this causes the flagset to interpret the completion // flag name as the value of the flag before it which is undesirable // note that we can only do this because the shell autocomplete function // always appends the completion flag at the end of the command shellComplete, arguments := checkShellCompleteFlag(cmd, arguments) cCtx := NewContext(cmd, nil, parentContext) cCtx.shellComplete = shellComplete cCtx.Command = cmd ctx = context.WithValue(ctx, contextContextKey, cCtx) if cmd.parent == nil { cmd.setupCommandGraph(cCtx) } a := args(arguments) set, err := cmd.parseFlags(&a, cCtx) cCtx.flagSet = set if checkCompletions(cCtx) { return nil } if err != nil { tracef("setting deferErr from %[1]v", err) deferErr = err cCtx.Command.isInError = true if cmd.OnUsageError != nil { err = cmd.OnUsageError(cCtx, err, cmd.parent != nil) err = cCtx.Command.handleExitCoder(cCtx, err) return err } _, _ = fmt.Fprintf(cCtx.Command.Root().Writer, "%s %s\n\n", "Incorrect Usage:", err.Error()) if cCtx.Command.Suggest { if suggestion, err := cmd.suggestFlagFromError(err, ""); err == nil { fmt.Fprintf(cCtx.Command.Root().Writer, "%s", suggestion) } } if !cmd.HideHelp { if cmd.parent == nil { tracef("running ShowAppHelp") if err := ShowAppHelp(cCtx); err != nil { tracef("SILENTLY IGNORING ERROR running ShowAppHelp %[1]v", err) } } else { tracef("running ShowCommandHelp with %[1]q", cmd.Name) if err := ShowCommandHelp(cCtx.parent, cmd.Name); err != nil { tracef("SILENTLY IGNORING ERROR running ShowCommandHelp with %[1]q %[2]v", cmd.Name, err) } } } return err } if checkHelp(cCtx) { return helpCommandAction(cCtx) } if cmd.parent == nil && !cCtx.Command.HideVersion && checkVersion(cCtx) { ShowVersion(cCtx) return nil } if cmd.After != nil && !cCtx.shellComplete { defer func() { if err := cmd.After(cCtx); err != nil { err = cCtx.Command.handleExitCoder(cCtx, err) if deferErr != nil { deferErr = newMultiError(deferErr, err) } else { deferErr = err } } }() } if err := cCtx.checkRequiredFlags(cmd.Flags); err != nil { cCtx.Command.isInError = true _ = ShowSubcommandHelp(cCtx) return err } for _, grp := range cmd.MutuallyExclusiveFlags { if err := grp.check(cCtx); err != nil { _ = ShowSubcommandHelp(cCtx) return err } } if cmd.Before != nil && !cCtx.shellComplete { if err := cmd.Before(cCtx); err != nil { deferErr = cCtx.Command.handleExitCoder(cCtx, err) return deferErr } } if err := runFlagActions(cCtx, cmd.appliedFlags); err != nil { return err } var subCmd *Command args := cCtx.Args() if args.Present() { name := args.First() if cCtx.Command.SuggestCommandFunc != nil { name = cCtx.Command.SuggestCommandFunc(cmd.Commands, name) } subCmd = cmd.Command(name) if subCmd == nil { hasDefault := cCtx.Command.DefaultCommand != "" isFlagName := checkStringSliceIncludes(name, cCtx.FlagNames()) var ( isDefaultSubcommand = false defaultHasSubcommands = false ) if hasDefault { dc := cCtx.Command.Command(cCtx.Command.DefaultCommand) defaultHasSubcommands = len(dc.Commands) > 0 for _, dcSub := range dc.Commands { if checkStringSliceIncludes(name, dcSub.Names()) { isDefaultSubcommand = true break } } } if isFlagName || (hasDefault && (defaultHasSubcommands && isDefaultSubcommand)) { argsWithDefault := cCtx.Command.argsWithDefaultCommand(args) if !reflect.DeepEqual(args, argsWithDefault) { subCmd = cCtx.Command.Command(argsWithDefault.First()) } } } } else if cmd.parent == nil && cCtx.Command.DefaultCommand != "" { if dc := cCtx.Command.Command(cCtx.Command.DefaultCommand); dc != cmd { subCmd = dc } } if subCmd != nil { /* newcCtx := NewContext(cCtx.Command, nil, cCtx) newcCtx.Command = cmd */ return subCmd.Run(ctx, cCtx.Args().Slice()) } if cmd.Action == nil { cmd.Action = helpCommandAction } if err := cmd.Action(cCtx); err != nil { tracef("calling handleExitCoder with %[1]v", err) deferErr = cCtx.Command.handleExitCoder(cCtx, err) } tracef("returning deferErr") return deferErr }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Run() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\targs := flag.Args()\n\tif len(args) < 1 {\n\t\tusage()\n\t}\n\n\tmutex.Lock()\n\tc, ok := commands[args[0]]\n\tmutex.Unlock()\n\tif !ok || !c.Runnable() {\n\t\tfmt.Fprintf(os.Stderr, \"%s: unknown subcommand %s\\nRun '%s help' for usage.\\n\", Name, args[0], Name)\n\t\tos.Exit(1)\n\t}\n\n\tfs := flag.NewFlagSet(c.Name(), flag.ExitOnError)\n\tfs.Usage = func() { Usage(c) }\n\tc.Register(fs)\n\tfs.Parse(args[1:])\n\terr := c.Run(fs.Args())\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: %s: %v\\n\", Name, c.Name(), err)\n\t\tos.Exit(1)\n\t}\n}", "func (a App) Run(args []string) error {\n\t// Ditch our first arg, root node is always\n\t// selected by default\n\targs = args[1:]\n\n\tcursor, err := a.Root()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// DefineFlags our global/root level flags\n\tcursor.Command.DefineFlags(a.fs)\n\ti := 0\n\n\tcmdChain := []string{}\n\tcmdChain = append(cmdChain, cursor.Command.Name())\n\n\tfor _, arg := range args {\n\t\tif sc, ok := cursor.SubCommands[arg]; ok {\n\t\t\tcursor = sc\n\t\t\tcmdChain = append(cmdChain, cursor.Command.Name())\n\t\t\ti += 1\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// DefineFlags sub command flags if we have a sub command.\n\tif i > 0 {\n\t\tcursor.Command.DefineFlags(a.fs)\n\t}\n\n\ta.fs.Usage = usage(\n\t\tstrings.Join(cmdChain, \" \"),\n\t\tcursor.Command.Description(),\n\t\ta.fs,\n\t)\n\n\ta.fs.Parse(args[i:])\n\n\treturn cursor.Command.Execute(a.fs.Args())\n}", "func (m *Main) Run(args ...string) error {\n\tname, args := cmd.ParseCommandName(args)\n\n\t// Extract name from args.\n\tswitch name {\n\tcase \"\", \"help\":\n\t\tif err := help.NewCommand().Run(args...); err != nil {\n\t\t\treturn fmt.Errorf(\"help failed: %s\", err)\n\t\t}\n\tcase \"compact-shard\":\n\t\tc := compact.NewCommand()\n\t\tif err := c.Run(args); err != nil {\n\t\t\treturn fmt.Errorf(\"compact-shard failed: %s\", err)\n\t\t}\n\tcase \"export\":\n\t\tc := export.NewCommand(&ossServer{logger: zap.NewNop()})\n\t\tif err := c.Run(args); err != nil {\n\t\t\treturn fmt.Errorf(\"export failed: %s\", err)\n\t\t}\n\tcase \"import\":\n\t\tcmd := importer.NewCommand(&ossServer{logger: zap.NewNop()})\n\t\tif err := cmd.Run(args); err != nil {\n\t\t\treturn fmt.Errorf(\"import failed: %s\", err)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(`unknown command \"%s\"`+\"\\n\"+`Run 'freets-tools help' for usage`+\"\\n\\n\", name)\n\t}\n\n\treturn nil\n}", "func (c *Cfg) Run(args ...string) {\n\tif args == nil {\n\t\targs = os.Args[1:]\n\t}\n\tc, cmd, args, err := c.Parse(args)\n\tif err == nil {\n\t\tif err = cmd.Main(args); err == nil {\n\t\t\tExit(0)\n\t\t\treturn\n\t\t}\n\t}\n\tif err == ErrHelp {\n\t\tw := newWriter(c)\n\t\tdefer w.done(os.Stderr, 0)\n\t\tw.help()\n\t} else {\n\t\tswitch e := err.(type) {\n\t\tcase UsageError:\n\t\t\tw := newWriter(c)\n\t\t\tdefer w.done(os.Stderr, 2)\n\t\t\tw.error(string(e))\n\t\tcase ExitCode:\n\t\t\tExit(int(e))\n\t\tdefault:\n\t\t\tverb := \"%v\"\n\t\t\tif Debug {\n\t\t\t\tverb = \"%+v\"\n\t\t\t}\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: \"+verb+\"\\n\", err)\n\t\t\tExit(1)\n\t\t}\n\t}\n}", "func Run() {\n\tif len(os.Args) < 2 {\n\t\tusageExit(errors.New(\"missing command name\"))\n\t}\n\tselected := os.Args[1]\n\tif selected == \"-h\" || selected == \"--help\" || selected == \"-help\" {\n\t\tusageExit(nil)\n\t}\n\tif strings.HasPrefix(selected, \"-\") {\n\t\tusageExit(errors.New(\"command name required as first argument\"))\n\t}\n\tcmd, ok := commands[selected]\n\tif !ok {\n\t\tusageExit(errors.New(\"unknown command '\" + selected + \"'\"))\n\t}\n\n\t// flags.Parse prints help in case of -help. I'm not a fan of this.\n\tcmd.flags.SetOutput(ioutil.Discard)\n\terr := cmd.flags.Parse(os.Args[2:])\n\tcmd.flags.SetOutput(Output)\n\tif err == flag.ErrHelp {\n\t\tcmdUsageExit(cmd.flags, nil)\n\t}\n\tif err != nil {\n\t\tcmdUsageExit(cmd.flags, err)\n\t}\n\terr = cmd.f()\n\tif err != nil {\n\t\tif isArgError(err) {\n\t\t\tcmdUsageExit(cmd.flags, err)\n\t\t}\n\t\tfmt.Fprintf(Output, \"Error running %s command: %+v\", cmd.flags.Name(), err)\n\t\tos.Exit(1)\n\t}\n\tos.Exit(0)\n}", "func (gc *command) Run(args []string) (err error) {\n\n\tgc.commandData.Target, gc.commandData.UserCommand = common.GetTargetAndClusterCommand(args)\n\n\tif common.HasOption(gc.commandData.UserCommand.Parameters, []string{\"-v\", \"--version\"}) {\n\t\tfmt.Printf(\"Version: %d.%d.%d\\n\", domain.VersionType.Major, domain.VersionType.Minor, domain.VersionType.Build)\n\t\treturn\n\t}\n\n\t// if no user command and args contains -h or --help\n\tif gc.commandData.UserCommand.Command == \"\" {\n\t\tprintHelp()\n\t\treturn\n\t}\n\n\tgeodeConnection := &GeodeConnection{}\n\n\terr = geodeConnection.GetConnectionData(&gc.commandData)\n\tif err != nil {\n\t\tprintHelp()\n\t\treturn\n\t}\n\n\t// From this point common code can handle the processing of the command\n\terr = gc.comm.ProcessCommand(&gc.commandData)\n\n\treturn\n}", "func (c *Command) Run(ctx *Context) {\n\tc.initialize()\n\n\tif c.ShowHelp == nil {\n\t\tc.ShowHelp = showHelp\n\t}\n\n\t// parse cli arguments\n\tcl := &commandline{\n\t\tflags: c.Flags,\n\t\tcommands: c.Commands,\n\t}\n\tvar err error\n\tif c.SkipFlagParsing {\n\t\tcl.args = ctx.args[1:]\n\t} else {\n\t\terr = cl.parse(ctx.args[1:])\n\t}\n\n\t// build context\n\tnewCtx := &Context{\n\t\tname: ctx.name + \" \" + c.Name,\n\t\tapp: ctx.app,\n\t\tcommand: c,\n\t\tflags: c.Flags,\n\t\tcommands: c.Commands,\n\t\targs: cl.args,\n\t\tparent: ctx,\n\t}\n\n\tif err != nil {\n\t\tnewCtx.ShowError(err)\n\t}\n\n\t// show --help\n\tif newCtx.GetBool(\"help\") {\n\t\tnewCtx.ShowHelpAndExit(0)\n\t}\n\n\t// command not found\n\tif cl.command == nil && len(c.Commands) > 0 && len(cl.args) > 0 {\n\t\tcmd := cl.args[0]\n\t\tif c.OnCommandNotFound != nil {\n\t\t\tc.OnCommandNotFound(newCtx, cmd)\n\t\t} else {\n\t\t\tnewCtx.ShowError(fmt.Errorf(\"no such command: %s\", cmd))\n\t\t}\n\t\treturn\n\t}\n\n\t// run command\n\tif cl.command != nil {\n\t\tcl.command.Run(newCtx)\n\t\treturn\n\t}\n\n\tif c.Action != nil {\n\t\tdefer newCtx.handlePanic()\n\t\tc.Action(newCtx)\n\t} else {\n\t\tnewCtx.ShowHelpAndExit(0)\n\t}\n}", "func (c Command) Run(args ...string) error {\n\treturn c.builder().Run(args...)\n}", "func (a *App) Run(arguments []string) {\n\ta.initialize()\n\n\t// parse cli arguments\n\tcl := &commandline{\n\t\tflags: a.Flags,\n\t\tcommands: a.Commands,\n\t}\n\terr := cl.parse(arguments[1:])\n\n\t// build context\n\tnewCtx := &Context{\n\t\tname: a.Name,\n\t\tapp: a,\n\t\tflags: a.Flags,\n\t\tcommands: a.Commands,\n\t\targs: cl.args,\n\t}\n\n\tif err != nil {\n\t\tnewCtx.ShowError(err)\n\t}\n\n\t// show --help\n\tif newCtx.GetBool(\"help\") {\n\t\tnewCtx.ShowHelpAndExit(0)\n\t}\n\t// show --version\n\tif newCtx.GetBool(\"version\") {\n\t\ta.ShowVersion(a)\n\t\tos.Exit(0)\n\t}\n\n\t// command not found\n\tif cl.command == nil && len(a.Commands) > 0 && len(cl.args) > 0 {\n\t\tcmd := cl.args[0]\n\t\tif a.OnCommandNotFound != nil {\n\t\t\ta.OnCommandNotFound(newCtx, cmd)\n\t\t} else {\n\t\t\tnewCtx.ShowError(fmt.Errorf(\"no such command: %s\", cmd))\n\t\t}\n\t\treturn\n\t}\n\n\t// run command\n\tif cl.command != nil {\n\t\tcl.command.Run(newCtx)\n\t\treturn\n\t}\n\n\tif a.Action != nil {\n\t\tdefer newCtx.handlePanic()\n\t\ta.Action(newCtx)\n\t} else {\n\t\tnewCtx.ShowHelpAndExit(0)\n\t}\n}", "func (c *PushCommand) Run(args []string) int {\n\n\treturn 0\n}", "func (p *Parser) Run(args []string) {\n\tif args == nil {\n\t\targs = os.Args\n\t}\n\n\t// split binFile and args\n\tbinFile, waitArgs := args[0], args[1:]\n\n\t// register help render\n\tp.SetHelpRender(func() {\n\t\tif p.Desc != \"\" {\n\t\t\tcolor.Infoln(p.Desc)\n\t\t}\n\n\t\tcolor.Comment.Println(\"Usage:\")\n\t\tcolor.Cyan.Println(\" \", binFile, \"[--Options...] [CliArgs...]\\n\")\n\n\t\tp.PrintHelpPanel()\n\t})\n\n\t// do parsing\n\tif err := p.Parse(waitArgs); err != nil {\n\t\tif err == flag.ErrHelp {\n\t\t\treturn // ignore help error\n\t\t}\n\n\t\tcolor.Errorf(\"Parse error: %s\\n\", err.Error())\n\t}\n\n\tif p.handle != nil {\n\t\tif err := p.handle(p); err != nil {\n\t\t\tcolor.Errorln(err)\n\t\t}\n\t}\n}", "func (r Root) Run(_ *cobra.Command, args []string) {\n\tfields := strings.Split(args[0], \" \")\n\n\tparsed, err := r.Parser.Parse(\n\t\texpression.NewMinute(fields[0]),\n\t\texpression.NewHour(fields[1]),\n\t\texpression.NewDayOfMonth(fields[2]),\n\t\texpression.NewMonth(fields[3]),\n\t\texpression.NewDayOfWeek(fields[4]),\n\t)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tfor _, v := range parsed {\n\t\tfmt.Printf(\"%-14s%s\\n\", v.Label, v.Value)\n\t}\n\n\tfmt.Printf(\"%-14s%s\\n\", \"command\", fields[5])\n}", "func (cli *CLI) Run(args []string) int {\n\t// Define option flag parse\n\tflags := flag.NewFlagSet(Name, flag.ContinueOnError)\n\tflags.SetOutput(cli.errStream)\n\n\tflags.BoolVar(&cli.nonum, \"nonum\", false, \"hide line numbers\")\n\tflags.StringVar(&cli.delim, \"delim\", \":\", \"a delimiter that separates elements of an argument\")\n\n\tflVersion := flags.Bool(\"version\", false, \"Print version information and quit.\")\n\n\t// Parse commandline flag\n\tif err := flags.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeError\n\t}\n\n\t// Show version\n\tif *flVersion {\n\t\tfmt.Fprintf(cli.errStream, \"%s version %s\\n\", Name, Version)\n\t\treturn ExitCodeOK\n\t}\n\n\tif e := cli.split(flags.Args()); e != nil {\n\t\tfmt.Fprintf(cli.errStream, \"Error splitting %s: %s\\n\", flag.Args(), e)\n\t\treturn ExitCodeError\n\t}\n\n\treturn ExitCodeOK\n}", "func Run(args []string) error {\n\tif len(args) == 0 {\n\t\treturn ErrUsage\n\t}\n\tswitch args[0] {\n\tcase \"init\":\n\t\treturn Init()\n\tcase \"new\":\n\t\tif len(args) != 2 {\n\t\t\treturn ErrUsage\n\t\t}\n\t\treturn New(args[1])\n\tcase \"up\":\n\t\treturn Up()\n\tcase \"down\":\n\t\treturn Down()\n\t}\n\treturn ErrUsage\n}", "func (c *SystemCommand) Run(args []string) int {\n\tvar debug bool\n\tf := flag.NewFlagSet(\"system\", flag.ContinueOnError)\n\tf.Usage = func() { c.UI.Output(c.Help()) }\n\tf.BoolVar(&debug, \"debug\", false, \"Debug mode enabled\")\n\tif err := f.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\tsetupLogging(debug)\n\treturn c.doSystemInformations()\n}", "func (f *FlagBase[T, C, V]) RunAction(ctx context.Context, cmd *Command) error {\n\tif f.Action != nil {\n\t\treturn f.Action(ctx, cmd, f.Get(cmd))\n\t}\n\n\treturn nil\n}", "func (cli *CLI) Run(args []string) int {\n\tf := flag.NewFlagSet(Name, flag.ContinueOnError)\n\tf.SetOutput(cli.outStream)\n\n\tf.Usage = func() {\n\t\tfmt.Fprintf(cli.outStream, usageText)\n\t\tf.PrintDefaults()\n\t\tfmt.Fprint(cli.outStream, exampleText)\n\t}\n\n\tvar opt Options\n\n\tf.StringVar(&opt.Config, []string{\"c\", \"-config\"}, \"\", \"the path to the configuration file\")\n\tf.StringVar(&opt.Endpoint, []string{\"e\", \"-endpoint\"}, \"\", \"specify github api endpoint\")\n\tf.StringVar(&opt.Token, []string{\"t\", \"-token\"}, \"\", \"github personal token for using API\")\n\tf.StringVar(&opt.Belongs, []string{\"b\", \"-belongs\"}, \"\", \"organization/team on github\")\n\tf.BoolVar(&opt.Syslog, []string{\"s\", \"-syslog\"}, false, \"use syslog for log output\")\n\tf.BoolVar(&opt.Version, []string{\"v\", \"-version\"}, false, \"print the version and exit\")\n\n\tif err := f.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeError\n\t}\n\tparsedArgs := f.Args()\n\n\tif opt.Version {\n\t\tfmt.Fprintf(cli.outStream, \"%s version %s\\n\", Name, Version)\n\t\treturn ExitCodeOK\n\t}\n\n\tif len(parsedArgs) == 0 {\n\t\tf.Usage()\n\t\treturn ExitCodeOK\n\t}\n\n\tif parsedArgs[0] != \"keys\" && parsedArgs[0] != \"pam\" {\n\t\tfmt.Fprintf(cli.errStream, \"invalid argument: %s\\n\", parsedArgs[0])\n\t\treturn ExitCodeError\n\t}\n\n\tc := NewConfig(&opt)\n\toct := NewOctopass(c, cli, nil)\n\tif err := oct.Run(parsedArgs); err != nil {\n\t\tfmt.Fprintf(cli.errStream, \"%s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\n\treturn ExitCodeOK\n}", "func (c *CLI) Run(args []string) (int, error) {\n\tif err := c.Flags.Parse(args); err != nil {\n\t\tif errors.Is(err, flag.ErrHelp) {\n\t\t\tc.cli.Args = c.Flags.Args()\n\t\t\treturn c.cli.Run()\n\t\t}\n\n\t\tc.UI.Error(err.Error())\n\t\treturn 1, nil\n\t}\n\n\tif n := c.Flags.NArg() - 1; n > 1 {\n\t\tc.UI.Error(fmt.Sprintf(\"expected a hostname, got %d args: %v\", n, c.Flags.Args()))\n\t\treturn 1, nil\n\t} else if n == 0 {\n\t\tc.UI.Error(c.cli.HelpFunc(c.cli.Commands))\n\t\treturn 1, nil\n\t}\n\n\tc.cli.Args = c.Flags.Args()\n\treturn c.cli.Run()\n}", "func (c *Subcommand) Run(flags *flag.FlagSet) error {\n\tif c.runFn != nil {\n\t\treturn c.runFn(flags)\n\t}\n\treturn nil\n}", "func (s *Shell) Run(ctx context.Context, args []string) error {\n\tcmds := map[string]*Command{}\n\tfor _, cmd := range s.Commands {\n\t\tcmdNames := append(cmd.Aliases, cmd.Name)\n\t\tfor i := range cmdNames {\n\t\t\tcmds[cmdNames[i]] = cmd\n\t\t}\n\t}\n\n\tif len(args) == 1 && args[0] == \"\" {\n\t\ts.WriteUsage(os.Stdout)\n\t\treturn nil\n\t}\n\n\tcmd := args[0]\n\tvar cleanedArgs []string\n\tif len(args) > 1 {\n\t\tcleanedArgs = args[1:]\n\t}\n\n\tc, ok := cmds[cmd]\n\tif ok {\n\t\treturn c.Do(ctx, s, cleanedArgs)\n\t}\n\treturn s.hasNoSuchCommand(ctx, cmd)\n}", "func (c command) run() error {\n\tc.FlagSet.Parse(flag.Args()[1:])\n\treturn c.f(c.FlagSet)\n}", "func (c *cli) Run(args []string) int {\n\t// Parse CLI args and flags\n\tflags, err := c.parseArgs(args)\n\n\tif err != nil {\n\t\tfmt.Fprintf(c.stderr, \"%v\\n\", err)\n\t\treturn 1\n\t}\n\n\t// Exit immediately if user asked for a help or an app version\n\tif flags.isHelp || flags.isVersion {\n\t\treturn 0\n\t}\n\n\t// Setup logrus\n\tc.configureLogger(flags.isVerbose)\n\n\t// Load config\n\tconfigChan, err := c.prepareConfigChan(flags.configPath)\n\n\tif err != nil {\n\t\tfmt.Fprintf(c.stderr, \"failed to load config: %v\\n\", err)\n\t\treturn 1\n\t}\n\n\t// Run the server (this is blocking call)\n\terr = c.runServer(configChan)\n\n\tif err != nil {\n\t\tfmt.Fprintf(c.stderr, \"server error: %v\\n\", err)\n\t\treturn 1\n\t}\n\n\treturn 0\n}", "func (f *Factory) Run() error {\n\tif f.Command == nil || f.Command == f {\n\t\tfmt.Printf(\"Supported commands:\\n %v\", f.Name)\n\t\tfor k := range f.Commands {\n\t\t\tfmt.Printf(\" %v\", k)\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t} else {\n\t\tfmt.Printf(\"Command %v: %v\\n\", f.CommandName, f.Command.GetDescription())\n\t}\n\tflag.Usage()\n\tos.Exit(1)\n\treturn nil\n}", "func Run(state *core.BuildState, label core.AnnotatedOutputLabel, args []string, remote, env, inTmp bool, dir, overrideCmd string) {\n\tprepareRun()\n\n\trun(context.Background(), state, label, args, false, false, remote, env, false, inTmp, dir, overrideCmd)\n}", "func (c *NodeInfoCommand) Run(args []string) int {\n\tc.ui.Info(c.Synopsis())\n\n\tflags := flag.NewFlagSet(c.name, flag.ContinueOnError)\n\tif err := flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tpeerInfo, err := c.eth.NodeInfo()\n\tif err != nil {\n\t\tc.ui.Error(fmt.Sprintf(\"fail to call eth.NodeInfo() %+v\", err))\n\t\treturn 1\n\t}\n\n\tc.ui.Info(fmt.Sprintf(\"nodeinfo: %v\", peerInfo))\n\n\treturn 0\n}", "func (cli *CLI) Run() {\n\tcli.validateArgs()\n\n\tconst NODE_ID = \"1\"\n\n\tcreateBlockChainCmd := flag.NewFlagSet(\"createblockchain\", flag.ExitOnError)\n\tprintChainCmd := flag.NewFlagSet(\"printchain\", flag.ExitOnError)\n\tgetBalanceCmd := flag.NewFlagSet(\"getBalance\", flag.ExitOnError)\n\tsendCmd := flag.NewFlagSet(\"send\", flag.ExitOnError)\n\n\tcreateBlockChainAddress := createBlockChainCmd.String(\"address\",\t \"\", \"The address to send genesis block reward to\")\n\tgetBalanceAddress := getBalanceCmd.String(\"address\", \"\", \"The address to get balance for\")\n\tsendFrom := sendCmd.String(\"from\", \"\", \"The address to get coins from\")\n\tsendTo := sendCmd.String(\"to\", \"\", \"The address to send coins to\")\n\tsendAmount := sendCmd.Int(\"amount\", 0, \"The amount to send\")\n\n\n\tswitch os.Args[1] {\n\tcase \"createblockchain\":\n\t\terr := createBlockChainCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"printchain\":\n\t\terr := printChainCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"getbalance\":\n\t\terr := getBalanceCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"send\":\n\t\terr := sendCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tdefault:\n\t\tcli.printUsage()\n\t\tos.Exit(1)\n\t}\n\n\tif createBlockChainCmd.Parsed() {\n\t\tif *createBlockChainAddress == \"\" {\n\t\t\tcreateBlockChainCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcli.createBlockChain(*createBlockChainAddress, NODE_ID)\n\t}\n\n\tif getBalanceCmd.Parsed() {\n\t\tif *getBalanceAddress == \"\" {\n\t\t\tgetBalanceCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcli.getBalance(*getBalanceAddress, NODE_ID)\n\t}\n\n\tif sendCmd.Parsed() {\n\t\tif *sendFrom == \"\" || *sendTo == \"\" || *sendAmount == 0 {\n\t\t\tgetBalanceCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcli.send(*sendFrom, *sendTo, *sendAmount, NODE_ID)\n\t}\n\n\tif printChainCmd.Parsed() {\n\t\t//TODO: need to pass NodeId\n\t\tcli.printChain(NODE_ID)\n\t}\n}", "func Run(args []string, env map[string]string) error {\n\tapp, cmd, err := DryRun(args, env)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cmd != nil {\n\t\tfmt.Printf(\"\\nINFO: Starting the Coherence %s process using:\\n\", app)\n\t\tfmt.Printf(\"INFO: %s %s\\n\\n\", cmd.Path, strings.Join(cmd.Args, \" \"))\n\t\tfmt.Println(\"INFO: Environment:\")\n\t\tfor _, e := range cmd.Env {\n\t\t\tfmt.Printf(\"INFO: %s\\n\", e)\n\t\t}\n\t\treturn cmd.Run()\n\t}\n\treturn nil\n}", "func (a *App) Run() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tinitFlag()\n\n\tcmd := cobra.Command{\n\t\tUse: FormatBaseName(a.basename),\n\t\tLong: a.description,\n\t\tSilenceUsage: true,\n\t\tSilenceErrors: true,\n\t}\n\tcmd.SetUsageTemplate(usageTemplate)\n\tcmd.Flags().SortFlags = false\n\tif len(a.commands) > 0 {\n\t\tfor _, command := range a.commands {\n\t\t\tcmd.AddCommand(command.cobraCommand())\n\t\t}\n\t\tcmd.SetHelpCommand(helpCommand(a.name))\n\t}\n\tif a.runFunc != nil {\n\t\tcmd.Run = a.runCommand\n\t}\n\n\tcmd.Flags().AddGoFlagSet(flag.CommandLine)\n\tif a.options != nil {\n\t\tif _, ok := a.options.(ConfigurableOptions); ok {\n\t\t\taddConfigFlag(a.basename, cmd.Flags())\n\t\t}\n\t\ta.options.AddFlags(cmd.Flags())\n\t}\n\n\tif !a.noVersion {\n\t\tversion.AddFlags(cmd.Flags())\n\t}\n\taddHelpFlag(a.name, cmd.Flags())\n\n\tif err := cmd.Execute(); err != nil {\n\t\tfmt.Printf(\"%v %v\\n\", color.RedString(\"Error:\"), err)\n\t\tos.Exit(1)\n\t}\n}", "func Run() error {\n\tcommand := &commander.Command{\n\t\tUsageLine: os.Args[0],\n\t\tShort: \"go_todo\",\n\t}\n\n\tcommand.Subcommands = []*commander.Command{\n\t\ttodoList(todoFilename),\n\t\ttodoSave(todoFilename),\n\t\ttodoStatus(todoFilename),\n\t\ttodoDelete(todoFilename),\n\t}\n\n\terr := command.Dispatch(os.Args[1:])\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn err\n}", "func (c *CLI) Run(args []string) int {\n\tvar (\n\t\tdebug bool\n\t\tversion bool\n\t)\n\tflags := flag.NewFlagSet(args[0], flag.ContinueOnError)\n\tflags.Usage = func() {\n\t\tfmt.Fprint(c.errStream, helpText)\n\t}\n\tflags.BoolVar(&debug, \"debug\", false, \"\")\n\tflags.BoolVar(&debug, \"d\", false, \"\")\n\tflags.BoolVar(&version, \"version\", false, \"\")\n\tflags.BoolVar(&version, \"v\", false, \"\")\n\n\t// Parse flag\n\tif err := flags.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeParseFlagsError\n\t}\n\n\tif debug {\n\t\tos.Setenv(EnvDebug, \"1\")\n\t\tDebugf(\"Run as DEBUG mode\")\n\t}\n\n\tif version {\n\t\tfmt.Fprintf(c.outStream, fmt.Sprintf(\"%s\\n\", Version))\n\t\treturn ExitCodeOK\n\t}\n\n\tparsedArgs := flags.Args()\n\tif len(parsedArgs) == 0 {\n\t\tPrintErrorf(\"Invalid argument: you must set keyword.\")\n\t\treturn ExitCodeBadArgs\n\t}\n\n\tkeywords := parsedArgs\n\tDebugf(\"keyword: %s\", keywords)\n\n\tsearcher, err := NewClient(keywords)\n\tif err != nil {\n\t\treturn ExitCodeError\n\t}\n\n\tstatus := searcher.search()\n\tif status != ExitCodeOK {\n\t\treturn ExitCodeError\n\t}\n\n\tsearcher.output(c.outStream)\n\n\treturn ExitCodeOK\n}", "func (plugin OpenPlugin) Run(cliConnection plugin.CliConnection, args []string) {\n\terr := checkArgs(cliConnection, args)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\tif args[0] == \"open\" {\n\t\tplugin.runAppOpen(cliConnection, args)\n\t} else if args[0] == \"service-open\" {\n\t\tplugin.runServiceOpen(cliConnection, args)\n\t}\n}", "func (cli *Cli) Run() {\n\t// define two cli input modes\n\tsendCmdSet := flag.NewFlagSet(\"send\", flag.ExitOnError)\n\tprintCmdSet := flag.NewFlagSet(\"print\", flag.ExitOnError)\n\tgetbalanceCmdSet := flag.NewFlagSet(\"getbalance\", flag.ExitOnError)\n\n\tsendFrom := sendCmdSet.String(\"from\", \"\", \"Sender of coins.\")\n\tsendTo := sendCmdSet.String(\"to\", \"\", \"Receiver of coins.\")\n\tsendAmt := sendCmdSet.String(\"amount\", \"\", \"Amount to send.\")\n\n\tgetbalanceCmd := getbalanceCmdSet.String(\"address\", \"\", \"Get the balance of address.\")\n\n\tif len(os.Args) < 2 {\n\t\tfmt.Println(\"subcommand required.\")\n\t\tos.Exit(1)\n\t}\n\n\tswitch os.Args[1] {\n\tcase \"send\":\n\t\tsendCmdSet.Parse(os.Args[2:])\n\tcase \"print\":\n\t\tprintCmdSet.Parse(os.Args[2:])\n\tcase \"getbalance\":\n\t\tgetbalanceCmdSet.Parse(os.Args[2:])\n\tdefault:\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\n\tif sendCmdSet.Parsed() {\n\t\tif *sendFrom == \"\" || *sendTo == \"\" || *sendAmt == \"\" {\n\t\t\tsendCmdSet.PrintDefaults()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tamt, _ := strconv.ParseInt(*sendAmt, 10, 64)\n\t\tcli.Send(*sendFrom, *sendTo, int(amt))\n\n\t}\n\n\tif printCmdSet.Parsed() {\n\t\tcli.bc.Print()\n\t}\n\n\tif getbalanceCmdSet.Parsed() {\n\t\tif *getbalanceCmd == \"\" {\n\t\t\tgetbalanceCmdSet.PrintDefaults()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tcli.GetBalance(*getbalanceCmd)\n\t}\n}", "func (b *RunStep) Run(state quantum.StateBag) error {\n\trunner := state.Get(\"runner\").(quantum.Runner)\n\tconn := state.Get(\"conn\").(quantum.AgentConn)\n\toutCh := conn.Logs()\n\tsigCh := conn.Signals()\n\n\tif b.Command == \"\" {\n\t\tcommandRaw, ok := state.GetOk(\"command\")\n\t\tif ok {\n\t\t\tb.Command = commandRaw.(string)\n\t\t}\n\t}\n\n\tlog.Printf(\"Running command: %v\", b.Command)\n\n\terr := runner.Run(b.Command, outCh, sigCh)\n\tif err != nil {\n\t\treturn errors.New(\"Cmd: \" + b.Command + \" failed: \" + err.Error())\n\t}\n\treturn nil\n}", "func (cli *CLI) Run() {\n\n\t// validate command line arguments are entered\n\t// or print instructions and exit gracefully\n\tif len(os.Args) < 2 {\n\n\t\t// no command was entered, print usage and\n\t\t// safely shutdown go routines (so we don't\n\t\t// corrupt our database)\n\t\tcli.printUsage()\n\t\truntime.Goexit()\n\t}\n\n\t// initialize command line flags\n\tgetBalanceCmd := flag.NewFlagSet(\"getbal\", flag.ExitOnError)\n\tcreateBlockchainCmd := flag.NewFlagSet(\"create\", flag.ExitOnError)\n\tsendCmd := flag.NewFlagSet(\"send\", flag.ExitOnError)\n\tprintBlocksCmd := flag.NewFlagSet(\"print\", flag.ExitOnError)\n\tcreateWalletCmd := flag.NewFlagSet(\"createwallet\", flag.ExitOnError)\n\tlistAddressesCmd := flag.NewFlagSet(\"listaddresses\", flag.ExitOnError)\n\tgetBalanceAddress := getBalanceCmd.String(\"address\", \"\", \"The address to get balance for\")\n\tcreateBlockchainAddress := createBlockchainCmd.String(\"address\", \"\", \"The address to send genesis block reward to\")\n\tsendFrom := sendCmd.String(\"from\", \"\", \"Source wallet address\")\n\tsendTo := sendCmd.String(\"to\", \"\", \"Destination wallet address\")\n\tsendAmount := sendCmd.Int(\"amount\", 0, \"Amount to send\")\n\n\t// parse first command line argument\n\tswitch os.Args[1] {\n\tcase \"print\":\n\t\terr := printBlocksCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Unable to parse print command: %s\", err.Error())\n\t\t} else {\n\t\t\tcli.printBlocks()\n\t\t}\n\tcase \"getbal\":\n\t\terr := getBalanceCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Unable to parse %s command: %s\", os.Args[1], err.Error())\n\t\t}\n\tcase \"create\":\n\t\terr := createBlockchainCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Unable to parse %s command: %s\", os.Args[1], err.Error())\n\t\t}\n\tcase \"send\":\n\t\terr := sendCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Unable to parse %s command: %s\", os.Args[1], err.Error())\n\t\t}\n\tcase \"createwallet\":\n\t\terr := createWalletCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Unable to parse createwallet command: %s\", err.Error())\n\t\t} else {\n\t\t\tcli.createWallet()\n\t\t}\n\tcase \"listaddresses\":\n\t\terr := listAddressesCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Unable to parse listaddresses command: %s\", err.Error())\n\t\t} else {\n\t\t\tcli.listAddresses()\n\t\t}\n\tdefault:\n\t\t// print usage instructions and exit gracefully\n\t\tcli.printUsage()\n\t\truntime.Goexit()\n\t}\n\n\t// continue parsing getBalanceCmd\n\tif getBalanceCmd.Parsed() {\n\t\tif *getBalanceAddress == \"\" {\n\t\t\tgetBalanceCmd.Usage()\n\t\t\truntime.Goexit()\n\t\t}\n\t\tcli.getBalance(*getBalanceAddress)\n\t}\n\n\t// continue parsing createBlockchainCmd\n\tif createBlockchainCmd.Parsed() {\n\t\tif *createBlockchainAddress == \"\" {\n\t\t\tcreateBlockchainCmd.Usage()\n\t\t\truntime.Goexit()\n\t\t}\n\t\tcli.createBlockChain(*createBlockchainAddress)\n\t}\n\n\t// continue parsing sendCmd\n\tif sendCmd.Parsed() {\n\t\tif *sendFrom == \"\" || *sendTo == \"\" || *sendAmount <= 0 {\n\t\t\tsendCmd.Usage()\n\t\t\truntime.Goexit()\n\t\t}\n\n\t\tcli.send(*sendFrom, *sendTo, *sendAmount)\n\t}\n}", "func (c *DisplayCommand) Run(args []string) int {\n\tvar debug bool\n\tf := flag.NewFlagSet(\"display\", flag.ContinueOnError)\n\tf.Usage = func() { c.UI.Output(c.Help()) }\n\tf.BoolVar(&debug, \"debug\", false, \"Debug mode enabled\")\n\tif err := f.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\tsetupLogging(debug)\n\treturn c.doDisplaySystemInformations()\n}", "func (cli *CLI) Run() {\n\tcli.validateArgs()\n\n\t//possible commands\n\twallet := cmds.ConfigureCreateWalletCmd()\n\tcreate, createData := cmds.ConfigureCreateChainCmd()\n\tgetBalance, getBalanceData := cmds.ConfigureBalanceCmd()\n\tsend, from, to, amount := cmds.ConfigureSendCmd()\n\tprintChain := cmds.ConfigurePrintCmd()\n\n\tif len(os.Args) >= 1 {\n\t\tswitch os.Args[1] {\n\t\tcase cmds.CreateWalletCmdId:\n\t\t\t_ = wallet.Parse(os.Args[2:])\n\t\tcase cmds.PrintCmdId:\n\t\t\t_ = printChain.Parse(os.Args[2:])\n\t\tcase cmds.CreateChainCmdId:\n\t\t\t_ = create.Parse(os.Args[2:])\n\t\tcase cmds.BalanceCmdId:\n\t\t\t_ = getBalance.Parse(os.Args[2:])\n\t\tcase cmds.SendCmdId:\n\t\t\t_ = send.Parse(os.Args[2:])\n\t\tdefault:\n\t\t\tcli.printUsage()\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tcli.printUsage()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tif wallet.Parsed() {\n\t\tcli.createWallet()\n\t}\n\n\tif printChain.Parsed() {\n\t\tcli.printChain()\n\t}\n\n\tif create.Parsed() {\n\t\terr := cli.createBlockchain(*createData)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tif getBalance.Parsed() {\n\t\tif *getBalanceData == \"\" {\n\t\t\tgetBalance.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr := cli.getBalance(*getBalanceData)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tif send.Parsed() {\n\t\tif *from == \"\" || *to == \"\" || *amount <= 0 {\n\t\t\tsend.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr := cli.send(*from, *to, *amount)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}", "func (h *Handler) Run(_ *cli.Context) error {\n\tcmd, err := newCommand(h)\n\tutils.FatalOnErr(err)\n\n\tutils.FatalOnErr(cmd.Run())\n\n\treturn nil\n}", "func (c *Command) Run(args []string) {\n\tservice := servd.New()\n\tc.loadAndValidateConfig()\n\n\tswitch args[0] {\n\tcase inquiry:\n\t\tservice.Inquiry()\n\tcase payment:\n\t\tservice.Payment()\n\tcase checkStatus:\n\t\tservice.CheckStatus()\n\tdefault:\n\t\tlog.Println(\"please specify the available command (inquiry, payment, checkstatus)\")\n\t}\n}", "func (c *CommandEngine) Run() {\n\n\tcn := containers.NewEngine()\n\tvar commands = []*cobra.Command{\n\t\t// this for run server by comands\n\t\t{\n\t\t\tUse: \"serve\",\n\t\t\tShort: \"Noteable Listening HTTP server\",\n\t\t\tLong: \"Noteable-app Listening HTTP server\",\n\t\t\t//Args: cobra.MinimumNArgs(1),\n\t\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\t\tactions.ServerListen(cn.Make(new(routes.Route)).GetRouter())\n\t\t\t},\n\t\t},\n\t}\n\tfor _, command := range commands {\n\t\tc.rootCmd.AddCommand(command)\n\t}\n\tc.rootCmd.Execute()\n}", "func (action *Action) Run(service string, cli kt.CliInterface, options *options.DaemonOptions) error {\n\tch := SetUpCloseHandler(cli, options, \"run\")\n\trun(service, cli, options)\n\t<-ch\n\treturn nil\n}", "func (f CommanderFunc) Run(ctx context.Context, command string, args ...string) error {\n\treturn f(ctx, command, args...)\n}", "func (c Command) Run(ctx *Context) (err error) {\n\tif !c.SkipFlagParsing {\n\t\tif len(c.Subcommands) > 0 {\n\t\t\treturn c.startApp(ctx)\n\t\t}\n\t}\n\n\tif !c.HideHelp && (HelpFlag != BoolFlag{}) {\n\t\t// append help to flags\n\t\tc.Flags = append(\n\t\t\tc.Flags,\n\t\t\tHelpFlag,\n\t\t)\n\t}\n\n\tif ctx.App.UseShortOptionHandling {\n\t\tc.UseShortOptionHandling = true\n\t}\n\n\tset, err := c.parseFlags(ctx.Args().Tail(), ctx.shellComplete)\n\n\tcontext := NewContext(ctx.App, set, ctx)\n\tcontext.Command = c\n\tif checkCommandCompletions(context, c.Name) {\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\tif c.OnUsageError != nil {\n\t\t\terr := c.OnUsageError(context, err, false)\n\t\t\tcontext.App.handleExitCoder(context, err)\n\t\t\treturn err\n\t\t}\n\t\t_, _ = fmt.Fprintln(context.App.Writer, \"Incorrect Usage:\", err.Error())\n\t\t_, _ = fmt.Fprintln(context.App.Writer)\n\t\t_ = ShowCommandHelp(context, c.Name)\n\t\treturn err\n\t}\n\n\tif checkCommandHelp(context, c.Name) {\n\t\treturn nil\n\t}\n\n\tcerr := checkRequiredFlags(c.Flags, context)\n\tif cerr != nil {\n\t\t_ = ShowCommandHelp(context, c.Name)\n\t\treturn cerr\n\t}\n\n\tif c.After != nil {\n\t\tdefer func() {\n\t\t\tafterErr := c.After(context)\n\t\t\tif afterErr != nil {\n\t\t\t\tcontext.App.handleExitCoder(context, err)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr = NewMultiError(err, afterErr)\n\t\t\t\t} else {\n\t\t\t\t\terr = afterErr\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tif c.Before != nil {\n\t\terr = c.Before(context)\n\t\tif err != nil {\n\t\t\tcontext.App.handleExitCoder(context, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif c.Action == nil {\n\t\tc.Action = helpSubcommand.Action\n\t}\n\n\terr = HandleAction(c.Action, context)\n\n\tif err != nil {\n\t\tcontext.App.handleExitCoder(context, err)\n\t}\n\treturn err\n}", "func (c *RunCommand) Run(args []string) int {\n\tvar (\n\t\tlangFlag string\n\t\tvalidaterFlag string\n\t\tverboseFlag bool\n\t\troundFlag int\n\t)\n\n\tflags := c.Meta.NewFlagSet(\"run\", c.Help())\n\tflags.StringVar(&langFlag, \"l\", \"\", \"Specify Language\")\n\tflags.StringVar(&langFlag, \"language\", \"\", \"Specify Language\")\n\tflags.StringVar(&validaterFlag, \"V\", \"\", \"Specify Validater\")\n\tflags.StringVar(&validaterFlag, \"validater\", \"\", \"Specify Validater\")\n\tflags.BoolVar(&verboseFlag, \"vb\", false, \"increase amount of output\")\n\tflags.BoolVar(&verboseFlag, \"verbose\", false, \"increase amount of output\")\n\tflags.IntVar(&roundFlag, \"p\", 0, \"Rounded to the decimal point p digits\")\n\tflags.IntVar(&roundFlag, \"place\", 0, \"Rounded to the decimal point place digits\")\n\n\tif err := flags.Parse(args); err != nil {\n\t\tmsg := fmt.Sprintf(\"Invalid option: %s\", strings.Join(args, \" \"))\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\targs = flags.Args()\n\n\tif len(args) < 2 {\n\t\tmsg := fmt.Sprintf(\"Invalid arguments: %s\", strings.Join(args, \" \"))\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tif _, err := os.Stat(args[0]); err != nil {\n\t\tc.UI.Error(\"does not exist (No such directory)\")\n\t\treturn ExitCodeFailed\n\t}\n\n\tif langFlag == \"\" {\n\t\tlangFlag = strings.Replace(path.Ext(args[1]), \".\", \"\", -1)\n\t}\n\tlang, ok := Lang[langFlag]\n\tif !ok {\n\t\tmsg := fmt.Sprintf(\"Invalid language: %s\", langFlag)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tif roundFlag < 0 || roundFlag > 15 {\n\t\tmsg := fmt.Sprintf(\"Invalid round: %d\", roundFlag)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tif validaterFlag == \"float\" {\n\t\tValidaters[\"float\"] = &FloatValidater{Place: roundFlag}\n\t}\n\n\tif validaterFlag == \"\" {\n\t\tvalidaterFlag = \"diff\"\n\t}\n\tv, ok := Validaters[validaterFlag]\n\tif !ok {\n\t\tmsg := fmt.Sprintf(\"Invalid validater: %s\", validaterFlag)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tinfoBuf, err := ioutil.ReadFile(args[0] + \"/\" + \"info.json\")\n\tif err != nil {\n\t\tc.UI.Error(fmt.Sprintf(\"failed to read info file: %v\", err))\n\t\treturn ExitCodeFailed\n\t}\n\n\tinfo := Info{}\n\tif err := json.Unmarshal(infoBuf, &info); err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn ExitCodeFailed\n\t}\n\n\tvar w, e io.Writer\n\tif verboseFlag {\n\t\tw, e = os.Stdout, os.Stderr\n\t}\n\n\tcode, result, clearFunc, err := NewCode(args[1], lang, &info, w, e)\n\tif err != nil {\n\t\tc.UI.Output(err.Error())\n\t\treturn ExitCodeFailed\n\t}\n\tc.UI.Output(result.String())\n\tdefer clearFunc()\n\n\tvar rCode *Code\n\tif info.JudgeType > 0 {\n\t\trCode, clearFunc, err = NewReactiveCode(&info, args[0], w, e)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn ExitCodeFailed\n\t\t}\n\t\tdefer clearFunc()\n\t}\n\n\tinputFiles, err := filepath.Glob(strings.Join([]string{args[0], \"test_in\", \"*\"}, \"/\"))\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"input testcase error: %v\", err)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\toutputFiles, err := filepath.Glob(strings.Join([]string{args[0], \"test_out\", \"*\"}, \"/\"))\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"output testcase error: %v\", err)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tfor i := 0; i < len(inputFiles); i++ {\n\t\terr := func() error {\n\t\t\tinput, err := os.Open(inputFiles[i])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"input test file error: %v\", err)\n\t\t\t}\n\t\t\tdefer input.Close()\n\n\t\t\toutput, err := ioutil.ReadFile(outputFiles[i])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"output test file error: %v\", err)\n\t\t\t}\n\n\t\t\tvar result string\n\t\t\tif info.JudgeType > 0 {\n\t\t\t\tresult, err = rCode.Reactive(code, inputFiles[i], outputFiles[i], input, w, e)\n\t\t\t} else {\n\t\t\t\tvar buf bytes.Buffer\n\t\t\t\tresult, err = code.Run(v, output, input, &buf, e)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_, testFile := path.Split(inputFiles[i])\n\t\t\tc.UI.Output(fmt.Sprintf(\"%s\\t%s\", result, testFile))\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn ExitCodeFailed\n\t\t}\n\t}\n\treturn ExitCodeOK\n}", "func (c *Command) Run(s *runtime.Scheme, log logging.Logger) error {\n\tcfg, err := ctrl.GetConfig()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Cannot get config\")\n\t}\n\tlog.Debug(\"Starting\", \"sync-period\", c.Sync.String())\n\n\tmgr, err := ctrl.NewManager(cfg, ctrl.Options{\n\t\tScheme: s,\n\t\tLeaderElection: c.LeaderElection,\n\t\tLeaderElectionID: \"crossplane-leader-election-core\",\n\t\tSyncPeriod: &c.Sync,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Cannot create manager\")\n\t}\n\n\tif err := apiextensions.Setup(mgr, log); err != nil {\n\t\treturn errors.Wrap(err, \"Cannot setup API extension controllers\")\n\t}\n\n\tpkgCache := xpkg.NewImageCache(c.CacheDir, afero.NewOsFs())\n\n\tif err := pkg.Setup(mgr, log, pkgCache, c.Namespace); err != nil {\n\t\treturn errors.Wrap(err, \"Cannot add packages controllers to manager\")\n\t}\n\n\treturn errors.Wrap(mgr.Start(ctrl.SetupSignalHandler()), \"Cannot start controller manager\")\n}", "func Run() {\n\tcmd.Execute()\n}", "func Run() error {\n\treturn command.Execute()\n}", "func (g *App) Run(args []string) error {\n\tif g.root == nil {\n\t\tpanic(\"need Bind or use NewWith\")\n\t}\n\n\t_, _, err := g.exec(args, true)\n\treturn err\n}", "func Run(argv []string) (err error) {\n\tenvFlag := cli.StringFlag{\n\t\tName: \"env\",\n\t\tValue: \"dev\",\n\t\tUsage: \"Specify Space environment\",\n\t}\n\n\tdownloadCommand := cli.Command{\n\t\tName: \"pull\",\n\t\tAliases: []string{\"download\"},\n\t\tUsage: \"Download file from Space\",\n\t\tArgsUsage: \"Space object's name\",\n\t\tFlags: []cli.Flag{\n\t\t\t&envFlag,\n\t\t\t&cli.StringFlag{\n\t\t\t\tName: \"output\",\n\t\t\t\tAliases: []string{\"o\"},\n\t\t\t\tUsage: \"Output file, otherwise use object's name\",\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t},\n\t\tAction: downloadAction,\n\t}\n\n\tlistInternalCommand := cli.Command{\n\t\tName: \"list-internal\",\n\t\tUsage: \"List available buckets or objects in Space. Not a good idea for production bucket.\",\n\t\tArgsUsage: \"If given, list all objects in {bucket}/{prefix}, otherwise list all buckets\",\n\t\tHideHelp: true,\n\t\tHidden: true,\n\t\tFlags: []cli.Flag{\n\t\t\t&envFlag,\n\t\t},\n\t\tAction: listInternalAction,\n\t}\n\n\tlistCommand := cli.Command{\n\t\tName: \"list\",\n\t\tUsage: \"List available objects in Space.\",\n\t\tArgsUsage: \"Prefix\",\n\t\tFlags: []cli.Flag{\n\t\t\t&envFlag,\n\t\t},\n\t\tAction: listAction,\n\t}\n\n\tpushCommand := cli.Command{\n\t\tName: \"push\",\n\t\tAliases: []string{\"upload\"},\n\t\tUsage: \"Upload file/folder to Space\",\n\t\tArgsUsage: \"File or folder path to upload\",\n\t\tFlags: []cli.Flag{\n\t\t\t&envFlag,\n\t\t\t&cli.BoolFlag{\n\t\t\t\tName: \"recursive\",\n\t\t\t\tAliases: []string{\"r\"},\n\t\t\t\tUsage: \"Upload a folder recursively\",\n\t\t\t\tValue: false,\n\t\t\t},\n\t\t\t&cli.StringFlag{\n\t\t\t\tName: \"prefix\",\n\t\t\t\tAliases: []string{\"p\"},\n\t\t\t\tUsage: \"Object name's prefix.\",\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t\t&cli.StringFlag{\n\t\t\t\tName: \"tags\",\n\t\t\t\tAliases: []string{\"t\"},\n\t\t\t\tUsage: \"Add tags, e.g. \\\"version: 0.0, type: app\\\"\",\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t},\n\t\tAction: pushAction,\n\t}\n\n\tremoveCommand := cli.Command{\n\t\tName: \"remove\",\n\t\tAliases: []string{\"rm\"},\n\t\tUsage: \"Remove file(s) in Space\",\n\t\tArgsUsage: \"Files to be removed\",\n\t\tFlags: []cli.Flag{\n\t\t\t&envFlag,\n\t\t},\n\t\tAction: removeAction,\n\t}\n\n\tapp := &cli.App{\n\t\tName: \"space\",\n\t\tUsage: \"Work with Space and assets\",\n\t\tCommands: []*cli.Command{\n\t\t\t&downloadCommand,\n\t\t\t&listInternalCommand,\n\t\t\t&listCommand,\n\t\t\t&pushCommand,\n\t\t\t&removeCommand,\n\t\t},\n\t}\n\n\terr = app.Run(argv)\n\treturn err\n}", "func (cmd HelpCommand) Run(ctx context.Context, wf *aw.Workflow) {\n\tsubcommands := []struct {\n\t\tname string\n\t\tdesc string\n\t}{\n\t\t{\n\t\t\tname: \"pulls\",\n\t\t\tdesc: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"issues\",\n\t\t\tdesc: \"\",\n\t\t},\n\t}\n\n\tfor _, cmd := range subcommands {\n\t\twf.NewItem(cmd.name).\n\t\t\tSubtitle(cmd.desc).\n\t\t\tAutocomplete(\"my \" + cmd.name + \" \").\n\t\t\tValid(false)\n\t}\n\n\tif cmd.HasQuery() {\n\t\twf.Filter(cmd.Query())\n\t}\n\n\t// Show a warning in Alfred if there are no commands.\n\twf.WarnEmpty(\"No commands found.\", \"\")\n}", "func (Interface *LineInterface) Run() {\n\tInterface.ValidateArguments()\n\tgetBalanceCommand := flag.NewFlagSet(\"getbalance\", flag.ExitOnError)\n\tcreateBlockChainCommand := flag.NewFlagSet(\"createblockchain\", flag.ExitOnError)\n\tsendCommand := flag.NewFlagSet(\"send\", flag.ExitOnError)\n\tprintChainCommand := flag.NewFlagSet(\"printchain\", flag.ExitOnError)\n\tcreateWalletCommand := flag.NewFlagSet(\"createwallet\", flag.ExitOnError)\n\tlistAddressesCommand := flag.NewFlagSet(\"listaddresses\", flag.ExitOnError)\n\n\tgetBalanceAddress := getBalanceCommand.String(\"address\", \"\", \"The Address to find Balance.\")\n\tcreateBlockChainAddress := createBlockChainCommand.String(\"address\", \"\", \"The Address to send Reward to.\")\n\tsendFrom := sendCommand.String(\"from\", \"\", \"Source Wallet Address\")\n\tsendTo := sendCommand.String(\"to\", \"\", \"Destination Wallet Address\")\n\tsendAmount := sendCommand.Int(\"amount\", 0, \"Amount To Send\")\n\tswitch os.Args[1] {\n\tcase \"getbalance\":\n\t\terr := getBalanceCommand.Parse(os.Args[2:])\n\t\tblockchain.Handle(err)\n\tcase \"createblockchain\":\n\t\terr := createBlockChainCommand.Parse(os.Args[2:])\n\t\tblockchain.Handle(err)\n\tcase \"listaddresses\":\n\t\terr := listAddressesCommand.Parse(os.Args[2:])\n\t\tblockchain.Handle(err)\n\tcase \"createwallet\":\n\t\terr := createWalletCommand.Parse(os.Args[2:])\n\t\tblockchain.Handle(err)\n\tcase \"printchain\":\n\t\terr := printChainCommand.Parse(os.Args[2:])\n\t\tblockchain.Handle(err)\n\tcase \"send\":\n\t\terr := sendCommand.Parse(os.Args[2:])\n\t\tblockchain.Handle(err)\n\tdefault:\n\t\tInterface.PrintUsage()\n\t\truntime.Goexit()\n\t}\n\tif getBalanceCommand.Parsed() {\n\t\tif *getBalanceAddress == \"\" {\n\t\t\tgetBalanceCommand.Usage()\n\t\t\truntime.Goexit()\n\t\t}\n\t\tInterface.GetBalance(*getBalanceAddress)\n\t}\n\tif createBlockChainCommand.Parsed() {\n\t\tif *createBlockChainAddress == \"\" {\n\t\t\tcreateBlockChainCommand.Usage()\n\t\t\truntime.Goexit()\n\t\t}\n\t\tInterface.CreateBlockChain(*createBlockChainAddress)\n\t}\n\tif printChainCommand.Parsed() {\n\t\tInterface.PrintChain()\n\t}\n\tif createWalletCommand.Parsed() {\n\t\tInterface.CreateWallet()\n\t}\n\tif listAddressesCommand.Parsed() {\n\t\tInterface.ListAddresses()\n\t}\n\tif sendCommand.Parsed() {\n\t\tif *sendFrom == \"\" || *sendTo == \"\" || *sendAmount <= 0 {\n\t\t\tsendCommand.Usage()\n\t\t\truntime.Goexit()\n\t\t}\n\t\tInterface.Send(*sendFrom, *sendTo, *sendAmount)\n\t}\n}", "func Run() int {\n\tflag.Parse()\n\tctx := context.Background()\n\tres := subcommands.Execute(ctx)\n\treturn int(res)\n}", "func Run(logger log.Logger, streams cmd.IOStreams, args []string) error {\n\t// NOTE: we handle the quiet flag here so we can fully silence cobra\n\tif checkQuiet(args) {\n\t\t// if we are in quiet mode, we want to suppress all status output\n\t\t// only streams.Out should be written to (program output)\n\t\tlogger = log.NoopLogger{}\n\t\tstreams.ErrOut = io.Discard\n\t}\n\t// actually run the command\n\tc := kind.NewCommand(logger, streams)\n\tc.SetArgs(args)\n\tif err := c.Execute(); err != nil {\n\t\tlogError(logger, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *Command) Run(args []string) int {\n\tname, opts, peers, err := c.readConfig()\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\tc.instance, err = huton.NewInstance(name, opts...)\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\t_, err = c.instance.Join(peers)\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\treturn c.handleSignals()\n}", "func (c *ServeCommand) Run(args []string) int {\n\n\tcmdFlags := flag.NewFlagSet(\"serve\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { c.UI.Output(c.Help()) }\n\tenv.ConfigFlags(cmdFlags)\n\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\tfmt.Println(\"Could not parse config: \", err)\n\t\treturn 1\n\t}\n\n\tenvironment, err := env.GetEnvironment()\n\tif err != nil {\n\t\tfmt.Println(\"Could not parse config \", err)\n\t\treturn 1\n\t}\n\n\tmysql, err = env.GetConnection(environment)\n\tif err != nil {\n\t\tfmt.Println(\"Could not Get DB Connection: \", err)\n\t\treturn 1\n\t}\n\n\tfs := http.FileServer(http.Dir(\"../public\"))\n\thttp.Handle(\"/public/\", http.StripPrefix(\"/public/\", fs))\n\thttp.HandleFunc(\"/\", homeEntry)\n\thttp.HandleFunc(\"/search/do\", doSearchHandler)\n\thttp.HandleFunc(\"/rank/do\", doRankHandler)\n\terr = http.ListenAndServe(environment.HTTP, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Server Failed to Start, \", err.Error())\n\t\treturn 1\n\t}\n\treturn 0\n}", "func Run(rpcCfg RPCConfig) error {\n\tconfig := DefaultConfig()\n\n\t// Parse command line flags.\n\tparser := flags.NewParser(&config, flags.Default)\n\tparser.SubcommandsOptional = true\n\n\t_, err := parser.Parse()\n\tif e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Parse ini file.\n\tloopDir := lncfg.CleanAndExpandPath(config.LoopDir)\n\tconfigFile := lncfg.CleanAndExpandPath(config.ConfigFile)\n\n\t// If our loop directory is set and the config file parameter is not\n\t// set, we assume that they want to point to a config file in their\n\t// loop dir. However, if the config file has a non-default value, then\n\t// we leave the config parameter as its custom value.\n\tif loopDir != loopDirBase && configFile == defaultConfigFile {\n\t\tconfigFile = filepath.Join(\n\t\t\tloopDir, defaultConfigFilename,\n\t\t)\n\t}\n\n\tif err := flags.IniParse(configFile, &config); err != nil {\n\t\t// If it's a parsing related error, then we'll return\n\t\t// immediately, otherwise we can proceed as possibly the config\n\t\t// file doesn't exist which is OK.\n\t\tif _, ok := err.(*flags.IniError); ok {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Parse command line flags again to restore flags overwritten by ini\n\t// parse.\n\t_, err = parser.Parse()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Show the version and exit if the version flag was specified.\n\tappName := filepath.Base(os.Args[0])\n\tappName = strings.TrimSuffix(appName, filepath.Ext(appName))\n\tif config.ShowVersion {\n\t\tfmt.Println(appName, \"version\", loop.Version())\n\t\tos.Exit(0)\n\t}\n\n\t// Special show command to list supported subsystems and exit.\n\tif config.DebugLevel == \"show\" {\n\t\tfmt.Printf(\"Supported subsystems: %v\\n\",\n\t\t\tlogWriter.SupportedSubsystems())\n\t\tos.Exit(0)\n\t}\n\n\t// Validate our config before we proceed.\n\tif err := Validate(&config); err != nil {\n\t\treturn err\n\t}\n\n\t// Initialize logging at the default logging level.\n\terr = logWriter.InitLogRotator(\n\t\tfilepath.Join(config.LogDir, defaultLogFilename),\n\t\tconfig.MaxLogFileSize, config.MaxLogFiles,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = build.ParseAndSetDebugLevels(config.DebugLevel, logWriter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Print the version before executing either primary directive.\n\tlog.Infof(\"Version: %v\", loop.Version())\n\n\tlisCfg := newListenerCfg(&config, rpcCfg)\n\n\t// Execute command.\n\tif parser.Active == nil {\n\t\tsignal.Intercept()\n\n\t\tdaemon := New(&config, lisCfg)\n\t\tif err := daemon.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tselect {\n\t\tcase <-signal.ShutdownChannel():\n\t\t\tlog.Infof(\"Received SIGINT (Ctrl+C).\")\n\t\t\tdaemon.Stop()\n\n\t\t\t// The above stop will return immediately. But we'll be\n\t\t\t// notified on the error channel once the process is\n\t\t\t// complete.\n\t\t\treturn <-daemon.ErrChan\n\n\t\tcase err := <-daemon.ErrChan:\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif parser.Active.Name == \"view\" {\n\t\treturn view(&config, lisCfg)\n\t}\n\n\treturn fmt.Errorf(\"unimplemented command %v\", parser.Active.Name)\n}", "func (cmd *Command) Run(args ...string) (err error) {\n\tfs := flag.NewFlagSet(\"dumptsmwal\", flag.ExitOnError)\n\tfs.SetOutput(cmd.Stdout)\n\tfs.BoolVar(&cmd.showDuplicates, \"show-duplicates\", false, \"prints keys with out-of-order or duplicate values\")\n\tfs.Usage = cmd.printUsage\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t} else if fs.NArg() == 0 {\n\t\tfmt.Printf(\"path required\\n\\n\")\n\t\tfs.Usage()\n\t\treturn nil\n\t}\n\n\t// Process each TSM WAL file.\n\tfor _, path := range fs.Args() {\n\t\tif err := cmd.process(path); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (a *RESTAgent) Run(args []string) int {\n\tvar address string\n\tcmdFlags := flag.NewFlagSet(\"agent\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { a.UI.Error(a.Help()) }\n\tcmdFlags.StringVar(&address, \"address\", \"\", \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\t//Check address\n\tif address == \"\" {\n\t\ta.UI.Error(\"Address must be specified\")\n\t\ta.UI.Error(\"\")\n\t\ta.UI.Error(a.Help())\n\t\treturn 1\n\t}\n\n\texitChannel := make(chan int)\n\tsignalChannel := make(chan os.Signal, 1)\n\tsignal.Notify(signalChannel, os.Interrupt)\n\n\tgo func() {\n\t\tfor _ = range signalChannel {\n\t\t\texitChannel <- 0\n\t\t}\n\t}()\n\n\tgo func(addr string, kvs kvstore.KVStore) {\n\t\tagent := agent.NewAgent(addr, kvs)\n\t\tagent.Start()\n\t\texitChannel <- 1\n\t}(address, a.KVStore)\n\n\texitStatus := <-exitChannel\n\tfmt.Printf(\"exiting with status %d\\n\", exitStatus)\n\treturn exitStatus\n}", "func Run(ch chan bool, cmd *exec.Cmd, dep Target) Target {\n\tannotate := func() error {\n\t\t<-ch\n\t\tInfof(\"run %v\", cmd.Args)\n\t\terr := cmd.Run()\n\t\tch <- true\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"run %v: %v\", cmd.Args, err)\n\t\t}\n\t\treturn err\n\t}\n\ttarget := newTarget(annotate, dep)\n\treturn &target // TODO\n}", "func (cli *CLI) Run() (int, error) {\n\tif cli.isHelp {\n\t\tcli.Usage()\n\t\treturn 0, nil\n\t} else if cli.isVersion {\n\t\tfmt.Println(cli.Version)\n\t\treturn 0, nil\n\t} else if cli.debug {\n\t\tlog.SetFlags(log.LstdFlags | log.Lshortfile | log.Lmicroseconds)\n\t}\n\n\treturn cli.run()\n}", "func (cmd *FigletCommand) Run(args ...string) {\n\tchannel, err := cmd.Channel(cmd.line)\n\tif err != nil {\n\t\tcmd.bot.LogError(\"FigletCommand.Run()\", err)\n\t\treturn\n\t}\n\n\tif len(args) <= 0 {\n\t\tcmd.bot.Log.WithField(\"args\", args).Debug(\"FigletCommand.Run(): No args\")\n\t\treturn\n\t}\n\n\tphrase := args[0]\n\tif phrase == \"\" {\n\t\tcmd.bot.Log.Debug(\"FigletCommand.Run(): No phrase\")\n\t\treturn\n\t}\n\n\tif output, err := exec.Command(figletPath, phrase).Output(); err != nil {\n\t\tcmd.bot.LogError(\"FigletCommand.Run()\", err)\n\t} else {\n\t\tfor _, line := range strings.Split(string(output), \"\\n\") {\n\t\t\tcmd.bot.Msg(cmd.conn, channel, line)\n\t\t}\n\t}\n}", "func (cxt *Context) Run(args []string) int {\n\tvar (\n\t\tout string\n\t\tverFlag bool\n\t)\n\n\t// Define option flag parse\n\tflags := flag.NewFlagSet(cxt.CommandName, flag.ContinueOnError)\n\tflags.SetOutput(cxt.Cli.ErrorWriter)\n\n\tflags.StringVar(&out, \"out\", \"\", \"output file\")\n\tflags.BoolVar(&verFlag, \"version\", false, \"Print version information and quit.\")\n\n\t// Parse commandline flag\n\tif err := flags.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeError\n\t}\n\n\t// Show version\n\tif verFlag {\n\t\tcxt.Cli.OutputErrln(cxt.CommandName, \"version\", cxt.Version)\n\t\treturn ExitCodeOK\n\t}\n\n\t// Parse commandline sub-arguments\n\tinfiles := flags.Args()\n\n\t//OutputFile\n\tif len(out) > 0 {\n\t\tfile, err := os.Create(out) //maybe file path\n\t\tif err != nil {\n\t\t\tcxt.Cli.OutputErrln(err.Error())\n\t\t\treturn ExitCodeError\n\t\t}\n\t\tdefer file.Close()\n\t\tcxt.Cli.Writer = file\n\t}\n\n\t//Create Context\n\tgcatCxt := gcat.Context {Cli: cxt.Cli}\n\tgcatCxt.Cli.ModeInteract()\n\n\t//Input File\n\tif len(infiles) == 0 {\n\t\tif err := gcatCxt.Catenate(); err != nil {\n\t\t\tcxt.Cli.OutputErrln(err.Error())\n\t\t\treturn ExitCodeError\n\t\t}\n\t} else {\n\t\tfor _, infile := range infiles {\n\t\t\tfile, err := os.Open(infile) //maybe file path\n\t\t\tif err != nil {\n\t\t\t\tcxt.Cli.OutputErrln(err.Error())\n\t\t\t\treturn ExitCodeError\n\t\t\t}\n\t\t\tdefer file.Close()\n\t\t\tcxt.Cli.Reader = file\n\t\t\tif err := gcatCxt.Catenate(); err != nil {\n\t\t\t\tcxt.Cli.OutputErrln(err.Error())\n\t\t\t\treturn ExitCodeError\n\t\t\t}\n\t\t\tfile.Close()\n\t\t}\n\t}\n\n\treturn ExitCodeOK\n}", "func (o *Cmd) Run(rl lib.ReactorLog, msg lib.Msg) error {\n\n\tvar args []string\n\n\tif o.argsjson {\n\t\tfor _, parse := range o.args {\n\t\t\tif strings.Contains(parse, \"$.\") {\n\t\t\t\tnewParse := parse\n\t\t\t\tfor _, argValue := range strings.Split(parse, \"$.\") {\n\t\t\t\t\tif argValue == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\top, _ := jq.Parse(\".\" + argValue) // create an Op\n\t\t\t\t\tvalue, _ := op.Apply(msg.Body())\n\t\t\t\t\tnewParse = strings.Replace(newParse, \"$.\"+argValue, strings.Trim(string(value), \"\\\"\"), -1)\n\t\t\t\t}\n\t\t\t\targs = append(args, newParse)\n\t\t\t} else {\n\t\t\t\targs = append(args, parse)\n\t\t\t}\n\t\t}\n\t} else {\n\t\targs = o.args\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), maximumCmdTimeLive)\n\tdefer cancel()\n\n\tvar c *exec.Cmd\n\tif len(args) > 0 {\n\t\tc = exec.CommandContext(ctx, o.cmd, args...)\n\t} else {\n\t\tc = exec.CommandContext(ctx, o.cmd)\n\t}\n\n\tc.Stdout = rl\n\tc.Stderr = rl\n\n\trunlog := fmt.Sprintf(\"RUN: %s %s\", o.cmd, strings.Join(args, \" \"))\n\tlog.Println(runlog)\n\trl.WriteStrings(runlog)\n\tif err := c.Run(); err != nil {\n\t\t// This will fail after timeout.\n\t\trl.WriteStrings(fmt.Sprintf(\"ERROR: %s\", err))\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Command) Run(ctx context.Context) error {\n\t// TODO: criteria\n\terr := c.Exec(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.Next != nil {\n\t\treturn c.Next.Run(ctx)\n\t}\n\n\treturn nil\n}", "func (c *HelloCommand) Run(args []string) int {\n\tif len(args) == 0 {\n\t\tfmt.Fprintln(c.ErrStream, \"Please specify name\")\n\t\treturn 1\n\t}\n\n\tfmt.Fprintf(c.OutStream, \"Hello %s\\n\", args[0])\n\treturn 0\n}", "func (c *BasicPlugin) Run(cliConnection plugin.CliConnection, args []string) {\n\t// Ensure that we called the command basic-plugin-command\n\tcfCLI = cliConnection\n\tif args[0] == \"firehose-analyzer\" {\n\t\tstartAnalyzer()\n\t}\n}", "func Run(cmd *exec.Cmd, name string, settings ...SettingsFunc) {\n\tr := DefaultRunner()\n\tr.Run(cmd, name, settings...)\n}", "func (c *Cmd) Run() error {\n\treturn c.Cmd.Run()\n}", "func (c *StartCommand) Run(args []string) int {\n\tvar vars StringMap\n\tvar projectName string\n\tcmdFlags := newFlagSet(\"start\", flag.ContinueOnError)\n\tcmdFlags.Var(&vars, \"v\", \"\")\n\tcmdFlags.Var(&vars, \"var\", \"\")\n\tcmdFlags.StringVar(&projectName, \"p\", \"\", \"\")\n\tcmdFlags.StringVar(&projectName, \"project\", \"\", \"\")\n\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\tc.UI.Error(err.Error())\n\t\tc.UI.Output(\"See 'sloppy start --help'.\")\n\t\treturn 1\n\t}\n\n\tif code := c.UI.ErrorNoFlagAfterArg(cmdFlags.Args()); code == 1 {\n\t\treturn code\n\t}\n\n\tif cmdFlags.NArg() < 1 {\n\t\treturn c.UI.ErrorNotEnoughArgs(\"start\", \"\", 1)\n\t}\n\n\tfile, err := os.Open(cmdFlags.Arg(0))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tc.UI.Error(fmt.Sprintf(\"file '%s' not found.\", cmdFlags.Arg(0)))\n\t\t} else if os.IsPermission(err) {\n\t\t\tc.UI.Error(fmt.Sprintf(\"no read permission '%s'.\", cmdFlags.Arg(0)))\n\t\t} else {\n\t\t\tc.UI.Error(err.Error())\n\t\t}\n\t\treturn 1\n\t}\n\tdefer file.Close()\n\n\tvar inputSource io.Reader\n\tinputSource = file\n\n\text := filepath.Ext(file.Name())\n\tif ext == \".yaml\" || ext == \".yml\" {\n\t\tnewSource, err := tryDockerCompose(file.Name(), projectName)\n\t\tif err != nil {\n\t\t\tc.UI.Error(fmt.Sprintf(\"Converting docker-compose failed: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\tif newSource != nil {\n\t\t\tinputSource = newSource\n\t\t}\n\t}\n\n\tdecoder := newDecoder(inputSource, vars)\n\tvar input = new(api.Project)\n\n\tswitch ext {\n\tcase \".json\":\n\t\tif err := decoder.DecodeJSON(input); err != nil {\n\t\t\tc.UI.Error(fmt.Sprintf(\"failed to parse JSON file %s\\n%s\", file.Name(), err.Error()))\n\t\t\treturn 1\n\t\t}\n\tcase \".yaml\", \".yml\":\n\t\tif err := decoder.DecodeYAML(input); err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\tdefault:\n\t\tc.UI.Error(\"file extension not supported, must be json or yaml.\")\n\t\treturn 1\n\t}\n\n\tproject, _, err := c.Projects.Create(input)\n\tif err != nil {\n\t\tc.UI.ErrorAPI(err)\n\t\treturn 1\n\t}\n\n\tc.UI.Table(\"show\", project.Services)\n\treturn 0\n}", "func (m *Manager) Run(command []string) error {\n\topts := buildah.RunOptions{}\n\treturn m.b.Run(command, opts)\n}", "func Run(cmd *cobra.Command, args []string) error {\n\n\tif version {\n\t\tfmt.Fprintf(os.Stdout, \"v%v\\n\", VERSION)\n\t\treturn nil\n\t}\n\n\t// Input from stdin\n\tif !terminal.IsTerminal(int(syscall.Stdin)) {\n\t\tdata, err := ioutil.ReadAll(os.Stdin)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toutput(data)\n\n\t\treturn nil\n\t}\n\n\tswitch {\n\tcase len(args) == 0:\n\t\treturn errors.New(\"file path not found\")\n\tcase len(args) > 1:\n\t\treturn errors.New(\"too many arguments\")\n\t}\n\n\t// Input from the file\n\tfile := file.File{}\n\tif err := file.Open(args[0]); err != nil {\n\t\treturn err\n\t}\n\n\toutput(file.Data)\n\n\treturn nil\n}", "func Run(args []string) (err error) {\n\tcli.VersionFlag = &cli.BoolFlag{\n\t\tName: \"version\",\n\t\tAliases: []string{\"v\"},\n\t\tUsage: \"Shows current cli version\",\n\t}\n\n\tapp.EnableBashCompletion = true\n\terr = app.Run(args)\n\treturn\n}", "func Run(ctx *cli.Context) {\n\tport = ctx.Int(\"port\")\n\n\tif len(ctx.Args()) == 1 {\n\t\tcwd = ctx.Args()[0]\n\t}\n\tif abspath, err := filepath.Abs(cwd); err == nil {\n\t\tenv.Set(internal.BaseDir, abspath)\n\t} else {\n\t\tlog.Fatalf(\"Failed to retrieve the directory: %v\", err)\n\t}\n\n\tpkg, err := build.ImportDir(cwd, build.AllowBinary)\n\tif err != nil || pkg.Name != \"main\" {\n\t\tlog.Fatalf(\"No buildable Go source files found\")\n\t}\n\tapp := new(app)\n\tapp.dir = cwd\n\tapp.binary = filepath.Join(os.TempDir(), \"rex-bin\")\n\tif runtime.GOOS == \"windows\" {\n\t\tapp.binary += \".exe\"\n\t}\n\tapp.task = ctx.String(\"task\")\n\tapp.Start()\n}", "func Run() {\n\tx := &xCmd{Ctx: app.AppCtx()}\n\tx.Flags = opt.New(\"{file}\")\n\tx.NewFlag(\"D\", \"debug\", &x.Debug)\n\tx.NewFlag(\"F\", \"sep: blank character(s) (or string under -1)\", &x.seps)\n\tx.NewFlag(\"1\", \"words separated by 1 run of the blank string given to -F\", &x.one)\n\targs, err := x.Parse(x.Args)\n\tif err != nil {\n\t\tapp.Warn(\"%s\", err)\n\t\tx.Usage()\n\t\tapp.Exits(\"usage\")\n\t}\n\tif len(args) != 0 {\n\t\tin := app.Files(args...)\n\t\tapp.SetIO(in, 0)\n\t}\n\tin := app.Lines(app.In())\t// to make sure we don't break a word in recvs.\n\tx.words(in, app.Out())\n\tapp.Exits(cerror(in))\n}", "func (c *CLI) Run(args []string) int {\n\tconf, err := c.loadConfig()\n\tif err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"args parse error: %v\\n\", err)\n\t\treturn ExitCodeParseError\n\t}\n\n\tp := c.preparePrompt(conf)\n\tp.Run()\n\n\t// TODO: This is dead code. Invoke os.Exit by the prompt.Run\n\treturn ExitCodeOK\n}", "func (cmd *MoveCommand) Run() {\n\tnewSrcPwd := cmdPath(cmd.client.Pwd, cmd.Source)\n\tnewTargetPwd := cmdPath(cmd.client.Pwd, cmd.Target)\n\n\tt := cmd.client.GetType(newSrcPwd)\n\tif t != client.NODE && t != client.LEAF {\n\t\tfmt.Fprintln(cmd.stderr, \"Not a valid source path: \"+newSrcPwd)\n\t\treturn\n\t}\n\n\trunCommandWithTraverseTwoPaths(cmd.client, newSrcPwd, newTargetPwd, cmd.moveSecret)\n\treturn\n}", "func Run() error {\n\t/*\n\t\tlogs.InitLogs()\n\t\tdefer logs.FlushLogs()\n\t*/\n\n\tcmd := cmd.NewJXCommand(cmdutil.NewFactory(), os.Stdin, os.Stdout, os.Stderr)\n\treturn cmd.Execute()\n}", "func (s *Stream) Run(cmd string) error {\n\t// First parse the entire command string\n\tcm, er := parser.ParseString(cmd)\n\tif er != nil {\n\t\treturn er\n\t}\n\t//spew.Dump(cm)\n\t// So now we run these commands on the file\n\tfi, er := interpreter.New(s.file)\n\tif er != nil {\n\t\treturn er\n\t}\n\treturn fi.Run(cm)\n}", "func (c *Command) Run(t *testing.T) {\n\targs := strings.Split(c.Args, \" \")\n\tif output, err := exec.Command(c.Exec, args...).CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"Error executing: '%s' '%s' -err: '%v'\", c.Exec, c.Args, strings.TrimSpace(string(output)))\n\t}\n}", "func Run() {\n\troot.Execute()\n}", "func Run() error {\n\tdefer FuncEndingAlways(FuncStarting())\n\tparams := GetCliParams(os.Args[1:])\n\tDebugf(\"CLI Params:\\n%s\", params)\n\tif params.HelpPrinted {\n\t\treturn nil\n\t}\n\tif params.HasError() {\n\t\treturn &params\n\t}\n\tdat, err := ReadFile(params.InputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tDebugf(\"Input File Contents:\\n%s\", dat)\n\tinput, err := ParseInput(dat)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = input.ApplyParams(params); err != nil {\n\t\treturn err\n\t}\n\tDebugf(\"Parsed Input:\\n%s\", input)\n\tanswer, err := Solve(input)\n\tif err != nil {\n\t\treturn err\n\t}\n\tStdout(\"Answer: %s\", answer)\n\treturn nil\n}", "func (z *Zest) Run() error {\n\treturn z.cli.Run(os.Args)\n}", "func (cmd *Command) Run(args ...string) error {\n\tfs := flag.NewFlagSet(\"buildtsi\", flag.ExitOnError)\n\tdataDir := fs.String(\"datadir\", \"\", \"data directory\")\n\twalDir := fs.String(\"waldir\", \"\", \"WAL directory\")\n\tfs.IntVar(&cmd.concurrency, \"concurrency\", runtime.GOMAXPROCS(0), \"Number of workers to dedicate to shard index building. Defaults to GOMAXPROCS\")\n\tfs.StringVar(&cmd.databaseFilter, \"database\", \"\", \"optional: database name\")\n\tfs.StringVar(&cmd.retentionFilter, \"retention\", \"\", \"optional: retention policy\")\n\tfs.StringVar(&cmd.shardFilter, \"shard\", \"\", \"optional: shard id\")\n\tfs.Int64Var(&cmd.maxLogFileSize, \"max-log-file-size\", tsdb.DefaultMaxIndexLogFileSize, \"optional: maximum log file size\")\n\tfs.Uint64Var(&cmd.maxCacheSize, \"max-cache-size\", tsdb.DefaultCacheMaxMemorySize, \"optional: maximum cache size\")\n\tfs.IntVar(&cmd.batchSize, \"batch-size\", defaultBatchSize, \"optional: set the size of the batches we write to the index. Setting this can have adverse affects on performance and heap requirements\")\n\tfs.BoolVar(&cmd.Verbose, \"v\", false, \"verbose\")\n\tfs.SetOutput(cmd.Stdout)\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t} else if fs.NArg() > 0 || *dataDir == \"\" || *walDir == \"\" {\n\t\tfs.Usage()\n\t\treturn nil\n\t}\n\tcmd.Logger = logger.New(cmd.Stderr)\n\n\treturn cmd.run(*dataDir, *walDir)\n}", "func (c *Cmd) Run() error {\n\treturn c.runInnerCommand()\n}", "func Run(root string) error {\n\tv1, err := readConfig()\n\tmust(err)\n\n\tvar context = runconf{\n\t\tFilterOut: v1.GetStringSlice(\"filterOut\"),\n\t\tLookFor: v1.GetStringSlice(\"lookFor\"),\n\t\trootPath: root,\n\t}\n\n\titerate(context)\n\treturn nil\n}", "func (cli *CLI) Run(args []string) int {\n\tvar (\n\t\tconfPath string\n\t\tdate string\n\t\tversion bool\n\t)\n\n\t// Define option flag parse\n\tflags := flag.NewFlagSet(Name, flag.ContinueOnError)\n\tflags.SetOutput(cli.ErrStream)\n\tflags.StringVar(&confPath, \"conf\", \"\", \"slack-summary config file path.\")\n\tflags.StringVar(&confPath, \"c\", \"\", \"slack-summary config file path.(Short)\")\n\tflags.StringVar(&date, \"date\", today(), \"Day of history get.\")\n\tflags.StringVar(&date, \"d\", today(), \"Day of history get.(Short)\")\n\tflags.BoolVar(&version, \"version\", false, \"Print version information and quit.\")\n\tflags.BoolVar(&version, \"v\", false, \"Print version information and quit.(Short)\")\n\n\t// Parse commandline flag\n\tif err := flags.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeError\n\t}\n\n\t// Show version\n\tif version {\n\t\tfmt.Fprintf(cli.ErrStream, \"%s version %s\\n\", Name, Version)\n\t\treturn ExitCodeOK\n\t}\n\n\tif confPath == \"\" {\n\t\tfmt.Fprint(cli.ErrStream, \"conf file is required\\n\")\n\t\treturn ExitCodeError\n\t}\n\n\tconf, err := LoadConfToml(confPath)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn ExitCodeError\n\t}\n\n\tif err := doSummarySend(conf, date); err != nil {\n\t\tfmt.Println(err)\n\t\treturn ExitCodeError\n\t}\n\n\t_ = conf\n\t_ = date\n\n\treturn ExitCodeOK\n}", "func (app *App) run() error {\n\targs := app.flags.Args()\n\tif len(args) == 0 {\n\t\treturn fmt.Errorf(\"%s needs a command\", os.Args[0])\n\t}\n\tvar (\n\t\tcommand = args[0]\n\t\tcommands = app.commands()\n\t)\n\trun, ok := commands[command]\n\tif !ok {\n\t\treturn errors.New(\"unrecognized command: \" + command)\n\t}\n\treturn run(args[1:])\n}", "func (h *FuncHandler) Run(run func(cmd *cobra.Command, args []string)) func(cmd *cobra.Command, args []string) {\n\treturn func(cmd *cobra.Command, args []string) {\n\t\trun(cmd, args)\n\t}\n}", "func Run(fn interface{}, args ...string) error {\n\tfd, err := parseFuncDesc(fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := fd.parseArgs(args); err != nil {\n\t\treturn err\n\t}\n\treturn fd.call()\n}", "func (o *ExecuteOptions) Run() error {\n\treturn o.Cmd.Help()\n}", "func Run() {\n\tvar toc toc\n\t// Create a FlagSet and sets the usage\n\tfs := flag.NewFlagSet(filepath.Base(os.Args[0]), flag.ExitOnError)\n\n\t// Configure the options from the flags/config file\n\topts, err := config.ConfigureOptions(fs, os.Args[1:])\n\tif err != nil {\n\t\tconfig.UsageAndExit(err)\n\t}\n\n\tif opts.ShowHelp {\n\t\tconfig.HelpAndExit()\n\t}\n\n\ttoc.Options.Path = opts.Path\n\ttoc.Options.Bulleted = opts.Bulleted\n\ttoc.Options.Append = opts.Append\n\ttoc.Options.Skip = opts.Skip\n\ttoc.Options.Depth = opts.Depth\n\n\ttoc.logic()\n}", "func Run(cmd *exec.Cmd) error {\n\treturn DefaultRunner.Run(cmd)\n}", "func (r *Runner) Run(ctx context.Context, node syntax.Node) error {\n\tif !r.didReset {\n\t\tr.Reset()\n\t}\n\tr.err = nil\n\tr.filename = \"\"\n\tswitch x := node.(type) {\n\tcase *syntax.File:\n\t\tr.filename = x.Name\n\t\tr.stmts(ctx, x.StmtList)\n\tcase *syntax.Stmt:\n\t\tr.stmt(ctx, x)\n\tcase syntax.Command:\n\t\tr.cmd(ctx, x)\n\tdefault:\n\t\treturn fmt.Errorf(\"Node can only be File, Stmt, or Command: %T\", x)\n\t}\n\tif r.exit > 0 {\n\t\tr.setErr(ExitStatus(r.exit))\n\t}\n\treturn r.err\n}", "func (c *CLI) Run(args []string) int {\n\tparam := &param{}\n\terr := c.parseArgs(args[1:], param)\n\tif err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"args parse error: %v\", err)\n\t\treturn ExitCodeParseError\n\t}\n\n\tserver, err := NewServer(param.file)\n\tif err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"invalid args. failed to initialize server: %v\", err)\n\t\treturn ExitCodeInvalidArgsError\n\t}\n\n\tif err := server.PrepareServer(); err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"failed to setup server: %v\", err)\n\t\treturn ExitCodeSetupServerError\n\t}\n\n\tif err := server.Run(param.port); err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"failed from server: %v\", err)\n\t\treturn ExitCodeError\n\t}\n\treturn ExitCodeOK\n}", "func (s *Step) Run(ctx context.Context, _ *Project) (int, error) {\n\t// For now, just route output to stdout.\n\targs := parseArgs(s.Args)\n\tcmd := exec.CommandContext(ctx, s.Command, *args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stdout\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"Build command failed: %s\\n\", err.Error())\n\t\treturn 0, err\n\t}\n\n\treturn 0, nil\n}", "func (p *Pipeline) Run(args []string) int {\n\tif err := p.LoadConfig(); err != nil {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func (cli *CLI) Run(args []string) int {\n\n\tvar (\n\t\towner string\n\t\trepo string\n\t\ttoken string\n\n\t\tcommitish string\n\t\tname string\n\t\tbody string\n\t\tdraft bool\n\t\tprerelease bool\n\n\t\tparallel int\n\n\t\trecreate bool\n\t\treplace bool\n\t\tsoft bool\n\n\t\tstat bool\n\t\tversion bool\n\t\tdebug bool\n\n\t\tgeneratenotes bool\n\t)\n\n\tflags := flag.NewFlagSet(Name, flag.ContinueOnError)\n\tflags.SetOutput(cli.errStream)\n\tflags.Usage = func() {\n\t\tfmt.Fprint(cli.errStream, helpText)\n\t}\n\n\tflags.StringVar(&owner, \"username\", \"\", \"\")\n\tflags.StringVar(&owner, \"owner\", \"\", \"\")\n\tflags.StringVar(&owner, \"u\", \"\", \"\")\n\n\tflags.StringVar(&repo, \"repository\", \"\", \"\")\n\tflags.StringVar(&repo, \"r\", \"\", \"\")\n\n\tflags.StringVar(&token, \"token\", os.Getenv(EnvGitHubToken), \"\")\n\tflags.StringVar(&token, \"t\", os.Getenv(EnvGitHubToken), \"\")\n\n\tflags.StringVar(&commitish, \"commitish\", \"\", \"\")\n\tflags.StringVar(&commitish, \"c\", \"\", \"\")\n\n\tflags.StringVar(&name, \"name\", \"\", \"\")\n\tflags.StringVar(&name, \"n\", \"\", \"\")\n\n\tflags.StringVar(&body, \"body\", \"\", \"\")\n\tflags.StringVar(&body, \"b\", \"\", \"\")\n\n\tflags.BoolVar(&draft, \"draft\", false, \"\")\n\tflags.BoolVar(&prerelease, \"prerelease\", false, \"\")\n\n\tflags.IntVar(&parallel, \"parallel\", defaultParallel, \"\")\n\tflags.IntVar(&parallel, \"p\", defaultParallel, \"\")\n\n\tflags.BoolVar(&recreate, \"delete\", false, \"\")\n\tflags.BoolVar(&recreate, \"recreate\", false, \"\")\n\n\tflags.BoolVar(&replace, \"replace\", false, \"\")\n\n\tflags.BoolVar(&soft, \"soft\", false, \"\")\n\n\tflags.BoolVar(&version, \"version\", false, \"\")\n\tflags.BoolVar(&version, \"v\", false, \"\")\n\n\tflags.BoolVar(&debug, \"debug\", false, \"\")\n\n\tflags.BoolVar(&generatenotes, \"generatenotes\", false, \"\")\n\n\t// Deprecated\n\tflags.BoolVar(&stat, \"stat\", false, \"\")\n\n\t// Parse flags\n\tif err := flags.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeParseFlagsError\n\t}\n\n\tif debug {\n\t\tos.Setenv(EnvDebug, \"1\")\n\t\tDebugf(\"Run as DEBUG mode\")\n\t}\n\n\t// Show version and check latest version release\n\tif version {\n\t\tfmt.Fprint(cli.outStream, OutputVersion())\n\t\treturn ExitCodeOK\n\t}\n\n\tparsedArgs := flags.Args()\n\tDebugf(\"parsed args : %s\", parsedArgs)\n\tvar tag, path string\n\tswitch len(parsedArgs) {\n\tcase 1:\n\t\ttag, path = parsedArgs[0], \"\"\n\tcase 2:\n\t\ttag, path = parsedArgs[0], parsedArgs[1]\n\tdefault:\n\t\tPrintRedf(cli.errStream,\n\t\t\t\"Invalid number of arguments: you must set a git TAG and optionally a PATH.\\n\")\n\t\treturn ExitCodeBadArgs\n\t}\n\n\t// Extract github repository owner username.\n\t// If it's not provided via command line flag, read it from .gitconfig\n\t// (github user or git user).\n\tif len(owner) == 0 {\n\t\torigin, err := gitconfig.OriginURL()\n\t\tif err == nil {\n\t\t\towner = retrieveOwnerName(origin)\n\t\t}\n\t\tif len(owner) == 0 {\n\t\t\towner, err = gitconfig.GithubUser()\n\t\t\tif err != nil {\n\t\t\t\towner, err = gitconfig.Username()\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tPrintRedf(cli.errStream,\n\t\t\t\t\t\"Failed to set up ghr: repository owner name not found\\n\")\n\t\t\t\tfmt.Fprintf(cli.errStream,\n\t\t\t\t\t\"Please set it via `-u` option.\\n\\n\"+\n\t\t\t\t\t\t\"You can set default owner name in `github.username` or `user.name`\\n\"+\n\t\t\t\t\t\t\"in `~/.gitconfig` file\\n\")\n\t\t\t\treturn ExitCodeOwnerNotFound\n\t\t\t}\n\t\t}\n\t}\n\tDebugf(\"Owner: %s\", owner)\n\n\t// Extract repository name from files.\n\t// If not provided, read it from .git/config file.\n\tif len(repo) == 0 {\n\t\tvar err error\n\t\trepo, err = gitconfig.Repository()\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream,\n\t\t\t\t\"Failed to set up ghr: repository name not found\\n\")\n\t\t\tfmt.Fprintf(cli.errStream,\n\t\t\t\t\"ghr reads it from `.git/config` file. Change directory to \\n\"+\n\t\t\t\t\t\"repository root directory or setup git repository.\\n\"+\n\t\t\t\t\t\"Or set it via `-r` option.\\n\")\n\t\t\treturn ExitCodeRepoNotFound\n\t\t}\n\t}\n\tDebugf(\"Repository: %s\", repo)\n\n\t// If GitHub API token is not provided via command line flag\n\t// or env var then read it from .gitconfig file.\n\tif len(token) == 0 {\n\t\tvar err error\n\t\ttoken, err = gitconfig.GithubToken()\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream, \"Failed to set up ghr: token not found\\n\")\n\t\t\tfmt.Fprintf(cli.errStream,\n\t\t\t\t\"To use ghr, you need a GitHub API token.\\n\"+\n\t\t\t\t\t\"Please set it via `%s` env var or `-t` option.\\n\\n\"+\n\t\t\t\t\t\"If you don't have one, visit official doc (goo.gl/jSnoI)\\n\"+\n\t\t\t\t\t\"and get it first.\\n\",\n\t\t\t\tEnvGitHubToken)\n\t\t\treturn ExitCodeTokenNotFound\n\t\t}\n\t}\n\tDebugf(\"Github API Token: %s\", maskString(token))\n\n\t// Set Base GitHub API URL. Base URL can also be provided via env var for use with GHE.\n\tbaseURLStr := defaultBaseURL\n\tif urlStr := os.Getenv(EnvGitHubAPI); len(urlStr) != 0 {\n\t\tbaseURLStr = urlStr\n\t}\n\tDebugf(\"Base GitHub API URL: %s\", baseURLStr)\n\n\tif parallel <= 0 {\n\t\tparallel = runtime.NumCPU()\n\t}\n\tDebugf(\"Parallel factor: %d\", parallel)\n\n\tlocalAssets, err := LocalAssets(path)\n\tif err != nil {\n\t\tPrintRedf(cli.errStream,\n\t\t\t\"Failed to find assets from %s: %s\\n\", path, err)\n\t\treturn ExitCodeError\n\t}\n\tDebugf(\"Number of file to upload: %d\", len(localAssets))\n\n\t// Create a GitHub client\n\tgitHubClient, err := NewGitHubClient(owner, repo, token, baseURLStr)\n\tif err != nil {\n\t\tPrintRedf(cli.errStream, \"Failed to construct GitHub client: %s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\n\tghr := GHR{\n\t\tGitHub: gitHubClient,\n\t\toutStream: cli.outStream,\n\t}\n\n\tDebugf(\"Name: %s\", name)\n\n\t// Prepare create release request\n\treq := &github.RepositoryRelease{\n\t\tName: github.String(name),\n\t\tTagName: github.String(tag),\n\t\tPrerelease: github.Bool(prerelease),\n\t\tDraft: github.Bool(draft),\n\t\tTargetCommitish: github.String(commitish),\n\t\tBody: github.String(body),\n\t\tGenerateReleaseNotes: github.Bool(generatenotes),\n\t}\n\n\tctx := context.TODO()\n\n\tif soft {\n\t\t_, err := ghr.GitHub.GetRelease(ctx, *req.TagName)\n\n\t\tif err == nil {\n\t\t\tfmt.Fprintf(cli.outStream, \"ghr aborted since tag `%s` already exists\\n\", *req.TagName)\n\t\t\treturn ExitCodeOK\n\t\t}\n\n\t\tif !errors.Is(err, ErrReleaseNotFound) {\n\t\t\tPrintRedf(cli.errStream, \"Failed to get GitHub release: %s\\n\", err)\n\t\t\treturn ExitCodeError\n\t\t}\n\t}\n\n\trelease, err := ghr.GitHub.GetDraftRelease(ctx, tag)\n\tif err != nil {\n\t\tPrintRedf(cli.errStream, \"Failed to get draft release: %s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\tif release == nil {\n\t\trelease, err = ghr.CreateRelease(ctx, req, recreate)\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream, \"Failed to create GitHub release page: %s\\n\", err)\n\t\t\treturn ExitCodeError\n\t\t}\n\t}\n\n\tif replace {\n\t\terr := ghr.DeleteAssets(ctx, *release.ID, localAssets, parallel)\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream, \"Failed to delete existing assets: %s\\n\", err)\n\t\t\treturn ExitCodeError\n\t\t}\n\t}\n\n\t// FIXME(tcnksm): More ideal way to change this\n\t// This is for Github enterprise\n\tif err := ghr.GitHub.SetUploadURL(*release.UploadURL); err != nil {\n\t\tfmt.Fprintf(cli.errStream, \"Failed to set upload URL %s: %s\\n\", *release.UploadURL, err)\n\t\treturn ExitCodeError\n\t}\n\n\terr = ghr.UploadAssets(ctx, *release.ID, localAssets, parallel)\n\tif err != nil {\n\t\tPrintRedf(cli.errStream, \"Failed to upload one of assets: %s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\n\tif !draft {\n\t\t_, err := ghr.GitHub.EditRelease(ctx, *release.ID, &github.RepositoryRelease{\n\t\t\tDraft: github.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream, \"Failed to publish release: %s\\n\", err)\n\t\t\treturn ExitCodeError\n\t\t}\n\t}\n\n\treturn ExitCodeOK\n}", "func (y *Handler) Run(ctx context.Context, direction string, params []string) error {\n\tvar err error\n\n\ty.text, y.isDictionary, err = buildTextWithDictionary(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = y.setLanguages(ctx, direction)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// concurrent translation and spelling check requests\n\tresults, err := y.translationAndSpelling(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresult.Show(results)\n\treturn nil\n}", "func (r *Runner) Run(ctx context.Context) error {\n\treturn errors.New(\"not implemented\")\n}", "func (g *UICommand) Run() error {\n\tlog.WithField(\"g\", g).Trace(\n\t\t\"Running cli command\",\n\t)\n\n\tuiServer := meshservice.NewUIServer(\n\t\tg.meshConfig.Agent.GRPCSocket,\n\t\tg.meshConfig.UI.HTTPBindAddr,\n\t\tg.meshConfig.UI.HTTPBindPort)\n\tuiServer.Serve()\n\n\treturn nil\n}", "func Run(dir, command string, flags ...string) error {\n\tcmd := exec.Command(command, flags...)\n\tcmd.Dir = dir\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to run command: %s %v\", command, err)\n\t}\n\treturn nil\n}", "func (cli *CLI) Run() {\n\tcli.validateArgs()\n\n\tnodeID := os.Getenv(\"NODE_ID\")\n\tif nodeID == \"\" {\n\t\tfmt.Printf(\"NODE_ID env. var is not set!\")\n\t\tos.Exit(1)\n\t}\n\n\tinventoryCmd := flag.NewFlagSet(\"inventory\", flag.ExitOnError)\n\tcreateBlockchainCmd := flag.NewFlagSet(\"createblockchain\", flag.ExitOnError)\n\tcreateWalletCmd := flag.NewFlagSet(\"createwallet\", flag.ExitOnError)\n\tcreateOrgCmd := flag.NewFlagSet(\"createorg\", flag.ExitOnError)\n\tlistOrgCmd := flag.NewFlagSet(\"listorg\", flag.ExitOnError)\n\tlistAddressesCmd := flag.NewFlagSet(\"listaddresses\", flag.ExitOnError)\n\tprintChainCmd := flag.NewFlagSet(\"printchain\", flag.ExitOnError)\n\treindexUTXOCmd := flag.NewFlagSet(\"reindexutxo\", flag.ExitOnError)\n\tsendCmd := flag.NewFlagSet(\"send\", flag.ExitOnError)\n\taddProductsCmd := flag.NewFlagSet(\"addproducts\", flag.ExitOnError)\n\tlistProductsCmd := flag.NewFlagSet(\"listproducts\", flag.ExitOnError)\n\tproduceProductsCmd := flag.NewFlagSet(\"produceproducts\", flag.ExitOnError)\n\tgetItemDetailsCmd := flag.NewFlagSet(\"getitemdetails\", flag.ExitOnError)\n\tstartNodeCmd := flag.NewFlagSet(\"startnode\", flag.ExitOnError)\n\n\tinventoryAddress := inventoryCmd.String(\"address\", \"\", \"The address to get inventory for\")\n\tcreateBlockchainName := createBlockchainCmd.String(\"name\", \"\", \"Name of the organisation\")\n\tcreateBlockchainPublicKey := createBlockchainCmd.String(\"publickey\", \"\", \"PublicKey of the organisation\")\n\tcreateBlockchainGSTIN := createBlockchainCmd.String(\"gstin\", \"\", \"GSTIN of the organisation\")\n\tcreateBlockchainPrefix := createBlockchainCmd.String(\"prefix\", \"\", \"Prefix of the organisation\")\n\tproduceProductsAddress := produceProductsCmd.String(\"address\", \"\", \"The address to send produced product to\")\n\tcreateOrgAdminAddr := createOrgCmd.String(\"address\", \"\", \"Address of the admin\")\n\tcreateOrgName := createOrgCmd.String(\"name\", \"\", \"Name of the organisation\")\n\tcreateOrgPublicKey := createOrgCmd.String(\"publickey\", \"\", \"PublicKey of the organisation\")\n\tcreateOrgGSTIN := createOrgCmd.String(\"gstin\", \"\", \"GSTIN of the organisation\")\n\tcreateOrgPrefix := createOrgCmd.String(\"prefix\", \"\", \"Prefix of the organisation\")\n\tcreateOrgRole := createOrgCmd.String(\"role\", \"\", \"Role of the organisation\")\n\tsendFrom := sendCmd.String(\"from\", \"\", \"Source wallet address\")\n\tsendTo := sendCmd.String(\"to\", \"\", \"Destination wallet address\")\n\tsendProduct := sendCmd.String(\"products\", \"\", \"Item to send\")\n\tsendMine := sendCmd.Bool(\"mine\", false, \"Mine immediately on the same node\")\n\taddProductAddress := addProductsCmd.String(\"address\", \"\", \"Product name\")\n\taddProductsName := addProductsCmd.String(\"names\", \"\", \"Product names\")\n\tlistProductsAddress := listProductsCmd.String(\"address\", \"\", \"Source Wallet Address\")\n\tcProducts := produceProductsCmd.String(\"codes\", \"\", \"Code of products to produce\")\n\tcItem := getItemDetailsCmd.String(\"product\", \"\", \"Item to find detail of\")\n\n\tswitch os.Args[1] {\n\tcase \"inventory\":\n\t\terr := inventoryCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"createblockchain\":\n\t\terr := createBlockchainCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"createwallet\":\n\t\terr := createWalletCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"createorg\":\n\t\terr := createOrgCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"listorg\":\n\t\terr := listOrgCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"listaddresses\":\n\t\terr := listAddressesCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"printchain\":\n\t\terr := printChainCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"send\":\n\t\terr := sendCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"reindexutxo\":\n\t\terr := reindexUTXOCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"addproducts\":\n\t\terr := addProductsCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"listproducts\":\n\t\terr := listProductsCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"produceproducts\":\n\t\terr := produceProductsCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"getitemdetails\":\n\t\terr := getItemDetailsCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tcase \"startnode\":\n\t\terr := startNodeCmd.Parse(os.Args[2:])\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\tdefault:\n\t\tcli.printUsage()\n\t\tos.Exit(1)\n\t}\n\n\tif inventoryCmd.Parsed() {\n\t\tif *inventoryAddress == \"\" {\n\t\t\tinventoryCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcli.getInventory(*inventoryAddress, nodeID)\n\t}\n\n\tif createBlockchainCmd.Parsed() {\n\t\tif *createBlockchainName == \"\" || *createBlockchainPublicKey == \"\" || *createBlockchainGSTIN == \"\" || *createBlockchainPrefix == \"\" {\n\t\t\tcreateBlockchainCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcli.createBlockchain(*createBlockchainName, *createBlockchainPublicKey, *createBlockchainGSTIN, *createBlockchainPrefix, nodeID)\n\t}\n\n\tif createWalletCmd.Parsed() {\n\t\tcli.createWallet(nodeID)\n\t}\n\n\tif createOrgCmd.Parsed() {\n\t\tif *createOrgAdminAddr == \"\" || *createOrgName == \"\" || *createOrgPublicKey == \"\" || *createOrgGSTIN == \"\" || *createOrgPrefix == \"\" || *createOrgRole == \"\" {\n\t\t\tcreateOrgCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tcli.createOrg(*createOrgAdminAddr, *createOrgName, *createOrgPublicKey, *createOrgGSTIN, *createOrgPrefix, *createOrgRole, nodeID)\n\t}\n\n\tif listOrgCmd.Parsed() {\n\t\tcli.listOrganisations(nodeID)\n\t}\n\n\tif listAddressesCmd.Parsed() {\n\t\tcli.listAddresses(nodeID)\n\t}\n\n\tif printChainCmd.Parsed() {\n\t\tcli.printTransactionChain(nodeID)\n\t}\n\n\tif reindexUTXOCmd.Parsed() {\n\t\tcli.reindexUTXO(nodeID)\n\t}\n\n\tif sendCmd.Parsed() {\n\t\tif *sendFrom == \"\" || *sendTo == \"\" || *sendProduct == \"\" {\n\t\t\tsendCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tcli.send(*sendFrom, *sendTo, *sendProduct, nodeID, *sendMine)\n\t}\n\n\tif addProductsCmd.Parsed() {\n\t\tif *addProductAddress == \"\" || *addProductsName == \"\" {\n\t\t\taddProductsCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcli.addProducts(*addProductAddress, *addProductsName, nodeID)\n\t}\n\n\tif listProductsCmd.Parsed() {\n\t\tif *listProductsAddress == \"\" {\n\t\t\tlistProductsCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcli.listProducts(*listProductsAddress, nodeID)\n\t}\n\n\tif produceProductsCmd.Parsed() {\n\t\tif *produceProductsAddress == \"\" || *cProducts == \"\" {\n\t\t\tproduceProductsCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tcli.produceProducts(*produceProductsAddress, *cProducts, nodeID)\n\t}\n\n\tif getItemDetailsCmd.Parsed() {\n\t\tif *cItem == \"\" {\n\t\t\tgetItemDetailsCmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tcli.getItemDetails(*cItem, nodeID)\n\t}\n\n\tif startNodeCmd.Parsed() {\n\t\tcli.startNode(nodeID)\n\t}\n\n}" ]
[ "0.72419333", "0.7218792", "0.6972016", "0.69717157", "0.69462645", "0.69454765", "0.68706745", "0.68009824", "0.67415124", "0.6704121", "0.6692788", "0.66791135", "0.65891695", "0.6573109", "0.6557353", "0.6537066", "0.65309674", "0.6517296", "0.651638", "0.6514274", "0.65041846", "0.6487788", "0.6476409", "0.6460346", "0.6423775", "0.6417304", "0.6417251", "0.64022005", "0.6401635", "0.63990444", "0.6397913", "0.63962495", "0.6395776", "0.63877916", "0.6382428", "0.63786954", "0.636048", "0.6348907", "0.6348453", "0.63440543", "0.63405335", "0.63291687", "0.63252056", "0.63201517", "0.6316856", "0.6314574", "0.6307929", "0.62958056", "0.62930965", "0.6275965", "0.6274991", "0.6266862", "0.6264615", "0.6258852", "0.62449473", "0.62393165", "0.6230477", "0.6222519", "0.62186056", "0.6217057", "0.62054485", "0.6203614", "0.6197812", "0.61865973", "0.61707073", "0.61683965", "0.6133538", "0.6116797", "0.6108116", "0.61016357", "0.6083423", "0.6083323", "0.60799617", "0.6056398", "0.60463774", "0.6043048", "0.6029876", "0.6015457", "0.6013877", "0.6005311", "0.60046273", "0.6002437", "0.5998411", "0.59966606", "0.59741104", "0.5972019", "0.5962243", "0.5960452", "0.59604293", "0.5959489", "0.59575886", "0.5953145", "0.5948391", "0.5948323", "0.59431833", "0.5942956", "0.5942848", "0.5941105", "0.5939455", "0.5938154", "0.59350693" ]
0.0
-1
Names returns the names including short names and aliases.
func (cmd *Command) Names() []string { return append([]string{cmd.Name}, cmd.Aliases...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c Command) Names() []string {\n\tnames := []string{c.Name}\n\n\tif c.ShortName != \"\" {\n\t\tnames = append(names, c.ShortName)\n\t}\n\n\treturn append(names, c.Aliases...)\n}", "func (a *Aliases) ShortNames() ShortNames {\n\ta.mx.RLock()\n\tdefer a.mx.RUnlock()\n\n\tm := make(ShortNames, len(a.Alias))\n\tfor alias, gvr := range a.Alias {\n\t\tif v, ok := m[gvr]; ok {\n\t\t\tm[gvr] = append(v, alias)\n\t\t} else {\n\t\t\tm[gvr] = []string{alias}\n\t\t}\n\t}\n\n\treturn m\n}", "func (a *Aliens) Names() []string {\n\tnames := make([]string, len(*a))\n\tfor i, alien := range *a {\n\t\tnames[i] = alien.Name\n\t}\n\treturn names\n}", "func (ss Suggestions) Names() []string {\n\tvar results []string\n\tfor _, s := range ss {\n\t\tresults = append(results, s.Name)\n\t}\n\treturn results\n}", "func (o GetAscriptsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetAscriptsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (flag *SliceFlag[T]) Names() []string {\n\treturn append([]string{flag.Name}, flag.Aliases...)\n}", "func (c *Command) Names() []string {\n\tnames := strings.Split(c.Name, \",\")\n\tfor i, name := range names {\n\t\tnames[i] = strings.TrimSpace(name)\n\t}\n\treturn names\n}", "func (a Authorizers) Names() []string {\n\tresult := make([]string, 0, len(a))\n\tfor k, _ := range a {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}", "func AllNames() []string {\n\tret := make([]string, 0, len(unitByName))\n\tfor n := range unitByName {\n\t\tif n == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tret = append(ret, n)\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}", "func (o GetAlarmContactsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetAlarmContactsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (rs Repos) Names() []string {\n\tnames := make([]string, len(rs))\n\tfor i := range rs {\n\t\tnames[i] = rs[i].Name\n\t}\n\treturn names\n}", "func (p *BaseProvider) Names() []string {\n\tnames := make([]string, 0, len(p.defs))\n\n\tfor name := range p.defs {\n\t\tnames = append(names, name)\n\t}\n\n\tsort.Strings(names)\n\n\treturn names\n}", "func (es ExternalServices) DisplayNames() []string {\n\tnames := make([]string, len(es))\n\tfor i := range es {\n\t\tnames[i] = es[i].DisplayName\n\t}\n\treturn names\n}", "func (o GetSecretsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetSecretsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (c *locationClient) Names() []string {\n\tlocs, err := c.All(context.Background())\n\tif err != nil || len(locs) == 0 {\n\t\treturn nil\n\t}\n\tnames := make([]string, len(locs))\n\tfor i, loc := range locs {\n\t\tname := loc.Name\n\t\tif name == \"\" {\n\t\t\tname = strconv.FormatInt(loc.ID, 10)\n\t\t}\n\t\tnames[i] = name\n\t}\n\treturn names\n}", "func (o GetRegistryEnterpriseNamespacesResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetRegistryEnterpriseNamespacesResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (o GetOpenApiPricingModulesResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetOpenApiPricingModulesResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (o GetAvailabilityZonesResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetAvailabilityZonesResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (r *Registry) Names() []string {\n\tkeys := []string{}\n\tfor key := range r.registrants {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (o GetFoldersResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetFoldersResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func Names(v interface{}) []string {\n\treturn New(v).Names()\n}", "func (c *Client) AliasNames() ([]string, error) {\n\tclient := c.client\n\tinfo, err := client.Aliases().Index(\"_all\").Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tins := info.Indices\n\tvar aliasNames []string\n\tfor _, v := range ins {\n\t\tfor _, x := range v.Aliases {\n\t\t\taliasNames = append(aliasNames, x.AliasName)\n\t\t}\n\t}\n\n\tsort.Strings(aliasNames)\n\treturn aliasNames, nil\n}", "func (c *ContainerContext) Names() string {\n\tnames := formatter.StripNamePrefix(c.c.Names)\n\tif c.trunc {\n\t\tfor _, name := range names {\n\t\t\tif len(strings.Split(name, \"/\")) == 1 {\n\t\t\t\tnames = []string{name}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn strings.Join(names, \",\")\n}", "func (e *Extractor) Names() (out []string, err error) {\n\n\tif err := e.isValidStruct(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := reflect.ValueOf(e.StructAddr).Elem()\n\tfields := e.fields(s)\n\tfor _, field := range fields {\n\t\tout = append(out, field.name)\n\t}\n\n\treturn\n}", "func Names() []string {\n\t// TODO eliminate duplicates\n\tvar names []string\n\tfor _, f := range factories {\n\t\tnames = append(names, f.Names()...)\n\t}\n\treturn names\n}", "func (r *REST) ShortNames() []string {\n\treturn []string{\"pvc\"}\n}", "func Names() []string {\n\tout := []string{}\n\tfor name := range Registry {\n\t\tout = append(out, name)\n\t}\n\tsort.Strings(out)\n\treturn out\n}", "func (pl List) names() []string {\n\tret := make([]string, len(pl))\n\tfor i, p := range pl {\n\t\tret[i] = p.String()\n\t}\n\treturn ret\n}", "func (o GetTopicSubscriptionsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetTopicSubscriptionsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (t Tags) Names() []string {\n\tset := map[string]string{}\n\tfor _, s := range t {\n\t\tset[s.Name] = \"\"\n\t}\n\n\tres := make([]string, 0, len(set))\n\n\tfor k := range set {\n\t\tres = append(res, k)\n\t}\n\n\tsort.Strings(res)\n\n\treturn res\n}", "func Names(professions []Profession) []string {\n\tvar names []string\n\n\tfor _, p := range professions {\n\t\tnames = append(names, p.Name)\n\t}\n\n\treturn names\n}", "func Names() []string {\n\tvar names []string\n\tfor name := range directory {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\treturn names\n}", "func (s Scope) Names() []string {\n\tnames := make([]string, s.Count())\n\ti := 0\n\tfor e := s.Enumerator(); e.MoveNext(); {\n\t\tnames[i], _ = e.Current()\n\t\ti++\n\t}\n\treturn names\n}", "func (o GetNodeTypesResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetNodeTypesResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (a Fields) Names() []string {\n\tnames := make([]string, len(a))\n\tfor i, f := range a {\n\t\tnames[i] = f.Name\n\t}\n\treturn names\n}", "func (s *Structx) Names() []string {\n\tvar result []string\n\n\tfs := s.Fields()\n\tfor _, v := range fs {\n\t\tresult = append(result, v.Name())\n\t}\n\n\treturn result\n}", "func (o GetSecondaryIndexesResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetSecondaryIndexesResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (n Notes) Names() []string {\n\tnames := make([]string, 0)\n\tfor _, v := range n {\n\t\tnames = append(names, v.Name())\n\t}\n\treturn names\n}", "func (rs *Resources) Names() []string {\n\tnames := []string{}\n\tfor _, r := range rs {\n\t\tnames = append(names, r.list.Name())\n\t}\n\treturn names\n}", "func (o GetDatabasesResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetDatabasesResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (d *DescriptorService) ListNames(prefixes ...string) ([]string, error) {\n\tprefix := path.Join(prefixes...)\n\treturn d.Store.ListDir(prefix + \"/\")\n}", "func (o *Service) GetNames() []string {\n\tif o == nil || o.Names == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.Names\n}", "func (o GetInfrastructureConfigurationsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetInfrastructureConfigurationsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (o GetServiceTopicsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetServiceTopicsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (mfbp ProviderList) Names() []string {\n\tkeys := []string{}\n\tfor k := range mfbp {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\treturn keys\n}", "func (o GetFlowlogsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetFlowlogsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func FriendlyNames(ts []Target) []string {\n\tstrs := []string{}\n\tfor _, t := range ts {\n\t\tstrs = append(strs, t.FriendlyName())\n\t}\n\treturn strs\n}", "func (o GetAlidnsDomainGroupsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetAlidnsDomainGroupsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func (p ByName) AltNames() []string {\n\tan := make([]string, len(p.altNames))\n\tcopy(an, p.altNames)\n\treturn an\n}", "func UnitNames() [7]string {\n\treturn unitNames\n}", "func (solution Solution) Names() []string {\n\tpackages := make([]string, 0, len(solution))\n\tfor name := range solution {\n\t\tpackages = append(packages, name)\n\t}\n\tsort.Strings(packages)\n\treturn packages\n}", "func (o GetInstanceAttachmentsResultOutput) Names() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetInstanceAttachmentsResult) []string { return v.Names }).(pulumi.StringArrayOutput)\n}", "func NameList(c echo.Context) error {\n\tnames := nameList(true)\n\treturn c.JSON(http.StatusOK, http.Resp{\n\t\tData: names,\n\t})\n}", "func (col Columns) Names() []string {\n\tnames := make([]string, col.Len())\n\tfor i, mn := range col.MultiNames() {\n\t\tname := strings.Join(mn, values.GetMultiColNameSeparator())\n\t\tnames[i] = name\n\t}\n\treturn names\n}", "func (j Jobs) Names() []string {\n\tn := make([]string, len(j))\n\tfor i, job := range j {\n\t\tn[i] = job.Name\n\t}\n\treturn n\n}", "func Names(v interface{}, prev ...string) [][]string {\n\tval := reflect.ValueOf(v)\n\treturn names(val, prev)\n}", "func (f *FlagBase[T, C, V]) Names() []string {\n\treturn FlagNames(f.Name, f.Aliases)\n}", "func getNames(elements []element) string {\n\tvar names string\n\tfor i, el := range elements {\n\t\tif i != 0 {\n\t\t\tnames += \" \"\n\t\t}\n\t\tnames += el.name\n\t}\n\treturn names\n}", "func (n *dnode) Names() []string {\n\tn.mutex.RLock()\n\tdefer n.mutex.RUnlock()\n\n\ttmp := make([]string, 0, len(n.children))\n\tfor k := range n.children {\n\t\ttmp = append(tmp, k)\n\t}\n\tsort.Strings(tmp)\n\treturn tmp\n}", "func Names(tmpl Template) (names []string) {\n\ttS := tmpl.Templates()\n\tsort.Slice(tS, func(i, j int) bool { return (tS[i].Name() < tS[j].Name()) })\n\tfor i := range tS {\n\t\tnames = append(names, tS[i].Name())\n\t}\n\treturn\n}", "func (c Completer) Names() []string {\n\tnames := make([]string, 0, c.scope.Size())\n\tc.scope.Range(func(k string, v values.Value) {\n\t\tnames = append(names, k)\n\t})\n\tsort.Strings(names)\n\treturn names\n}", "func ListHumanNames() []string {\n\tret := []string{}\n\tfor _, v := range humanNames {\n\t\tret = append(ret, v)\n\t}\n\treturn ret\n}", "func Names() []string {\n\tfiles, _ := assets.ReadDir(\".\")\n\tres := make([]string, 0, len(files))\n\tfor _, f := range files {\n\t\tres = append(res, f.Name())\n\t}\n\treturn res\n}", "func UConverterGetAvailableNames() (_swig_ret []string)", "func (s *FabricInterfaceSet) Names() []string {\n\tif s == nil {\n\t\treturn []string{}\n\t}\n\treturn s.byName.keys()\n}", "func (d *TagAny) NameS() []string {\n\treturn nvp.NameS(d)\n}", "func (store_m StoreModule) NameList() []string {\n\treturn []string{\n\t\t\"product\",\n\t\t\"aboutus\",\n\t\t\"project\",\n\t\t\"certification\",\n\t\t\"culture\",\n\t\t\"nearshop\",\n\t\t\"navigation\",\n\t\t\"contactus\",\n\t}\n}", "func (mb *MutableBag) Names() []string {\n\ti := 0\n\tkeys := make([]string, len(mb.values))\n\tfor k := range mb.values {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn append(keys, mb.parent.Names()...)\n}", "func PrintShortName(name pkix.Name) (out string) {\n\tprinted := false\n\tfor _, name := range name.Names {\n\t\tshort := oidShort(name.Type)\n\t\tif short != \"\" {\n\t\t\tif printed {\n\t\t\t\tout += \", \"\n\t\t\t}\n\t\t\tout += fmt.Sprintf(\"%s=%v\", short, name.Value)\n\t\t\tprinted = true\n\t\t}\n\t}\n\n\treturn\n}", "func GetAZNameList() (names []string) {\n\tazs, _ := GetAZs()\n\tazlist := make([]string, len(*azs))\n\n\tfor i, az := range *azs {\n\t\tazlist[i] = az.Name\n\t}\n\treturn azlist\n}", "func (*unifinames) Name() string { return \"unifi-names\" }", "func ListNames(db *bolt.DB) ([]string, error) {\n\treturn dbutil.ListNames(db, dbutil.TOTPBucket)\n}", "func (obj *ExitStatus) Names() []string {\n\treturn []string{\n\t\t\"OK\",\n\t\t\"ERROR\",\n\t}\n}", "func (m *WorkbookWorksheet) GetNames()([]WorkbookNamedItemable) {\n return m.names\n}", "func (c *DynamicTargetingKeysListCall) Names(names ...string) *DynamicTargetingKeysListCall {\n\tc.urlParams_.SetMulti(\"names\", append([]string{}, names...))\n\treturn c\n}", "func (a AliasedName) GetName() string { return a.Name }", "func (list *DeploymentList) Names() []string {\n\tvar deploymentNames []string\n\n\tfor _, deployment := range list.Items {\n\t\tdeploymentNames = append(deploymentNames, deployment.Name)\n\t}\n\n\treturn deploymentNames\n}", "func (reqSet *requestSet) Names() []string {\n\tvar n []string\n\tfor k := range *reqSet {\n\t\tn = append(n, k)\n\t}\n\treturn n\n}", "func (p PropertyList) GetNames() []string {\n\tkeys := []string{}\n\tfor _, k := range p {\n\t\tkeys = append(keys, k.Name)\n\t}\n\n\t// Without this, the ordering is (intentionally) pseudorandom and inconsistent.\n\tsort.Strings(keys)\n\treturn keys\n}", "func FullNames(repository scm.Repository) []string {\n\towner := repository.Namespace\n\tname := repository.Name\n\tfullName := repository.FullName\n\tif fullName == \"\" {\n\t\tfullName = scm.Join(owner, name)\n\t}\n\tfullNames := []string{fullName}\n\tlowerOwner := strings.ToLower(owner)\n\tif lowerOwner != owner {\n\t\tfullNames = append(fullNames, scm.Join(lowerOwner, name))\n\t}\n\treturn fullNames\n}", "func (p *Params) Names() []string {\n\treturn p.names\n}", "func (m *Workbook) GetNames()([]WorkbookNamedItemable) {\n return m.names\n}", "func (a *Action) OutputNames() (names []string) {\n\tfor k := range a.Output {\n\t\tnames = append(names, k)\n\t}\n\n\tsort.Strings(names)\n\n\treturn names\n}", "func (r *Recorder) names() []string {\n\tvar names []string\n\tfor _, h := range r.snapshot {\n\t\tnames = append(names, h.Name)\n\t}\n\treturn names\n}", "func (self *SAMFile) RefNames() []string {\n\th := self.header()\n\tif h == nil {\n\t\treturn nil\n\t}\n\treturn h.targetNames()\n}", "func ProviderNames(verbose bool) []string {\n\tnames := []string{}\n\n\tfor key, p := range Providers {\n\t\tif enableWhitelist {\n\t\t\t_, ok := whitelist[key]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif enableBlacklist {\n\t\t\t_, ok := blacklist[key]\n\t\t\tif ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif verbose {\n\t\t\ttags := p.Tags()\n\n\t\t\tif len(tags) > 0 {\n\t\t\t\tsort.Strings(tags)\n\t\t\t\tnames = append(names, fmt.Sprintf(\"%s (%s)\", key, strings.Join(tags, \", \")))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tnames = append(names, key)\n\t}\n\n\tsort.Strings(names)\n\treturn names\n}", "func (ns *Namespace) ChildNames() []string {\n\tif len(ns.children) == 0 {\n\t\treturn nil\n\t}\n\tnms := []string{}\n\tfor k := range ns.children {\n\t\tnms = append(nms, k)\n\t}\n\tsort.Strings(nms)\n\treturn nms\n}", "func TagNames(verbose bool) []string {\n\ttags := make(map[string][]string)\n\n\tfor name, p := range Providers {\n\t\tif enableWhitelist {\n\t\t\t_, ok := whitelist[name]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif enableBlacklist {\n\t\t\t_, ok := blacklist[name]\n\t\t\tif ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfor _, t := range p.Tags() {\n\t\t\ttags[t] = append(tags[t], name)\n\t\t}\n\t}\n\n\ttagList := []string{}\n\n\tfor k, providers := range tags {\n\t\tif verbose {\n\t\t\tsort.Strings(providers)\n\t\t\ttagList = append(tagList, fmt.Sprintf(\"%s (%s)\", k, strings.Join(providers, \", \")))\n\t\t} else {\n\t\t\ttagList = append(tagList, k)\n\t\t}\n\t}\n\n\tsort.Strings(tagList)\n\n\treturn tagList\n}", "func ListNames() []string {\n\tkt.mu.Lock()\n\tdefer kt.mu.Unlock()\n\tvar names []string\n\tfor _, transport := range kt.transports {\n\t\tif !deprecatedTransports.Contains(transport.Name()) {\n\t\t\tnames = append(names, transport.Name())\n\t\t}\n\t}\n\tsort.Strings(names)\n\treturn names\n}", "func GetNames(result []interface{})[]string{\n\tvar names []string\n\tfor _, poi := range result {\n\t\t//fmt.Println(poi.(map[string]interface{})[\"name\"])\n\t\tnames = append(names, poi.(map[string]interface{})[\"name\"].(string))\n\t}\n\treturn names\n}", "func (pl List) SortedNames() []string {\n\tnames := pl.names()\n\tsort.Strings(names)\n\treturn names\n}", "func (s *Author) MiddleNames() []string {\n\treturn s.middleName\n}", "func (ns *Namespace) RelativesNames() []string {\n\ta := []string{}\n\tif ns.parent != nil {\n\t\ta = append(a, ns.parent.name)\n\t}\n\tfor k := range ns.children {\n\t\ta = append(a, k)\n\t}\n\n\treturn a\n}", "func (n Node) DemoNames() []string {\n\tvar names []string\n\n\tfor name := range n.Meta.Demos {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\treturn names\n}", "func (a Entity) Name() []string {\n\tnewName := make([]string, len(a.name))\n\tcopy(newName, a.name)\n\n\treturn newName\n}", "func (da *DataFrame) Names() []string {\n\treturn da.names\n}", "func (o ServicePrincipalOutput) AlternativeNames() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *ServicePrincipal) pulumi.StringArrayOutput { return v.AlternativeNames }).(pulumi.StringArrayOutput)\n}", "func (o NamedRuleWithOperationsOutput) ResourceNames() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v NamedRuleWithOperations) []string { return v.ResourceNames }).(pulumi.StringArrayOutput)\n}", "func (c unionCAContent) Name() string {\n\tnames := []string{}\n\tfor _, curr := range c {\n\t\tnames = append(names, curr.Name())\n\t}\n\treturn strings.Join(names, \",\")\n}" ]
[ "0.717598", "0.7162568", "0.69929", "0.6861949", "0.66984594", "0.6692085", "0.65727407", "0.65398175", "0.65245485", "0.6514099", "0.6512489", "0.6505329", "0.64872956", "0.64470685", "0.64367557", "0.6419261", "0.639283", "0.6387922", "0.6352587", "0.634653", "0.6344535", "0.6308442", "0.630217", "0.6296705", "0.6287308", "0.62865067", "0.6254792", "0.62545824", "0.6229706", "0.61694044", "0.6149681", "0.6146984", "0.6144135", "0.6142294", "0.613655", "0.61033744", "0.6100299", "0.60984534", "0.60972756", "0.6094148", "0.60918885", "0.6054956", "0.6028196", "0.60281247", "0.60199416", "0.59922194", "0.59763515", "0.59717137", "0.5971204", "0.5951715", "0.59461427", "0.594394", "0.59358156", "0.5891859", "0.5879732", "0.5879309", "0.58640623", "0.5854803", "0.58467406", "0.5833368", "0.5828839", "0.5818656", "0.5789455", "0.57702994", "0.5757194", "0.5749725", "0.5744604", "0.5714323", "0.57093567", "0.56697744", "0.5660068", "0.56573313", "0.56352025", "0.5629349", "0.56288314", "0.5616718", "0.5609982", "0.5606629", "0.5585199", "0.55820495", "0.5577616", "0.55756015", "0.5554658", "0.5553002", "0.55438274", "0.55410224", "0.5534166", "0.5531778", "0.55147356", "0.55029005", "0.54997146", "0.5494396", "0.5484168", "0.54824424", "0.54418784", "0.5439479", "0.5428016", "0.54193085", "0.54192513" ]
0.68288386
5
HasName returns true if Command.Name matches given name
func (cmd *Command) HasName(name string) bool { for _, n := range cmd.Names() { if n == name { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c Command) HasName(name string) bool {\n\tfor _, n := range c.Names() {\n\t\tif n == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isCommand(name string) bool {\n\tfor _, cmd := range []string{\"_hooks\", \"_forward\"} {\n\t\tif strings.Compare(name, cmd) == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p *Project) IsCommand(name string) bool {\n\tif t, exists := p.Targets[name]; exists {\n\t\treturn t.Command\n\t}\n\treturn false\n}", "func CmdExists(name string) bool {\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"command -v \"+name)\n\tif err := cmd.Run(); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func commandExists(name string) bool {\n\t_, err := exec.LookPath(name)\n\treturn err == nil\n}", "func IsCommandExists(name string) (bool, string) {\n\tresult := false\n\n\tpath, err := exec.LookPath(name)\n\tif err == nil {\n\t\tresult = true\n\t}\n\n\treturn result, path\n}", "func (self *Controller) HasCommand(notificationName string) bool {\n\tself.commandMapMutex.RLock()\n\tdefer self.commandMapMutex.RUnlock()\n\n\treturn self.commandMap[notificationName] != nil\n}", "func CheckCmdName(s string) bool {\n\tif runtime.GOOS == `windows` {\n\t\treturn strings.IndexAny(s, `\\/:*?\"<>|`) < 0\n\t}\n\treturn strings.IndexAny(s, `/`) < 0\n}", "func (o *ShowSystem) HasName() bool {\n\tif o != nil && !IsNil(o.Name) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Secret) HasName(name string) bool {\n\treturn strings.EqualFold(s.Name, name)\n}", "func (o *ManualDependency) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func CommandExists(appName string) error {\n\treturn appExists(appName)\n}", "func IsCommand(cmd string) bool {\n for val := range DaemonizedCommands() {\n if val == cmd {\n return true\n }\n }\n for val := range InfoCommands() {\n if val == cmd {\n return true\n }\n }\n\n return false\n}", "func (command *Command) Match(incmd sfinterfaces.ICommandInput) bool {\n\n\tshell := command.GetShell()\n\tlog := *shell.GetLog()\n\n\tif strings.HasPrefix(incmd.GetLowerCommandName(), strings.ToLower(command.GetName())) {\n\t\tlog.LogDebugf(\"Match()\", \"Command '%s' matched '%s'\", command.GetName(), incmd.GetLowerCommandName())\n\t\treturn true\n\t}\n\n\t// this will space the trace with false positives\n\t//log.LogDebug(\"Match()\", \"Command '%s' did not match '%s'\", command.GetName(), incmd.GetLowerCommandName())\n\treturn false\n}", "func (o *OnpremUpgradePhase) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *OsInstallAllOf) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (p pkgInfo) IsCommand() bool { return p.Name == \"main\" }", "func (o *GridViewUpdate) HasName() bool {\n\tif o != nil && !IsNil(o.Name) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *EditBankConnectionParams) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *HyperflexHealthCheckPackageChecksum) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *RoleUpdate) HasName() bool {\n\treturn r.hasName\n}", "func (o *UpdateRole) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (cc *CloneCommand) NameIsID() bool {\n\treturn cc.RepositoryName == \"\"\n}", "func (o *VersionedConnection) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WorkflowBuildTaskMeta) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Exists(name string) bool {\n\treturn CliExists(name)\n}", "func IsCommandExist(exe string) bool {\n\t_, err := exec.LookPath(exe)\n\treturn err == nil\n}", "func (c *TestAndSetCommand) CommandName() string {\n\treturn commandName(\"testAndSet\")\n}", "func (o *Ga4ghChemotherapy) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ApiKey) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *StorageSasExpander) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (e *Event) HasName(name string) bool {\n\ts := (*C.gchar)(C.CString(name))\n\tdefer C.free(unsafe.Pointer(s))\n\treturn C.gst_event_has_name(e.g(), s) != 0\n}", "func (r *RoleCreate) HasName() bool {\n\treturn r.hasName\n}", "func (o *Commitstatus) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *StorageNetAppCloudTargetAllOf) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *MessageExecuteCommand) HasCommand() bool {\n\treturn r.hasCommand\n}", "func (o *BaseItem) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ControllersUpdateStorageOptionsTemplateRequest) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WorkflowSshCmd) HasCommand() bool {\n\tif o != nil && o.Command != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Product) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Exists(name string) bool {\n\tfor _, arg := range os.Args {\n\t\tif strings.HasPrefix(arg, \"-\"+name) || strings.HasPrefix(arg, \"--\"+name) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *VersionedControllerService) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (p *profileCache) CommandExists(profileName string, cmd string, method string) (bool, errors.EdgeX) {\n\tp.mutex.RLock()\n\tdefer p.mutex.RUnlock()\n\n\t_, ok := p.deviceProfileMap[profileName]\n\tif !ok {\n\t\terrMsg := fmt.Sprintf(\"failed to find Profile %s in cache\", profileName)\n\t\treturn false, errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, errMsg, nil)\n\t}\n\t// Check whether cmd exists in deviceCommands.\n\tvar deviceCommands map[string][]models.ResourceOperation\n\tif strings.ToLower(method) == common.GetCmdMethod {\n\t\tdeviceCommands, _ = p.getResourceOperationsMap[profileName]\n\t} else {\n\t\tdeviceCommands, _ = p.setResourceOperationsMap[profileName]\n\t}\n\n\tif _, ok := deviceCommands[cmd]; !ok {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}", "func (d UserData) HasName() bool {\n\treturn d.ModelData.Has(models.NewFieldName(\"Name\", \"name\"))\n}", "func (o *IamServiceProviderAllOf) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func CommandExists(command string) bool {\n\t_, err := exec.LookPath(command)\n\treturn err == nil\n}", "func commandExists(command string) bool {\n\t_, err := exec.LookPath(command)\n\treturn err == nil\n}", "func (r *CommandRegistry) Command(commandName string) (cmd Command, exists bool) {\n\n\tr.commandsMu.Lock()\n\tdefer r.commandsMu.Unlock()\n\n\tcommand, exists := r.commands[commandName]\n\treturn command, exists\n}", "func CommandArgExists(mapArr map[string]string, key string) bool {\n\tif _, ok := mapArr[key]; ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func hasName(t Type) bool {\n\tswitch t.(type) {\n\tcase *Basic, *Named, *TypeParam:\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *ApplianceClusterInstallPhase) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *KubernetesAddonDefinitionAllOf) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WorkflowSshCmdAllOf) HasCommand() bool {\n\tif o != nil && o.Command != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *CloudInstanceTypeAllOf) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ConnectorTypeAllOf) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ConnectorTypeAllOf) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Name) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WorkflowCliCommandAllOf) HasCommand() bool {\n\tif o != nil && o.Command != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Ga4ghTumourboard) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WorkflowServiceItemActionInstance) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *JsonEnvironment) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (fc *FooCommand) Name() string {\n\treturn \"foo\"\n}", "func (o *EtherPhysicalPort) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (pkg *goPackage) isCommand() bool {\n\treturn pkg.name == \"main\" && pkg.hasMainFunction\n}", "func (o *EventsScalarQuery) HasName() bool {\n\treturn o != nil && o.Name != nil\n}", "func (o *StoragePhysicalDisk) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func CommandExists(command string) (isExists bool) {\n\tcmd := exec.Command(\"which\", command)\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(out) > 0 {\n\t\tisExists = true\n\t}\n\treturn\n}", "func (o *User) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (Functions) CommandName(obj interface{}) string {\n\treturn nameOptions{}.convert(nameOf(obj))\n}", "func (o *ParameterContextDTO) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isCommandAvailable(name string) bool {\n\tcmd := exec.Command(\"command\", name, \"-V\")\n\tif err := cmd.Run(); err != nil {\n\t\tFatalf(\"%s executable is not installed on this box, please run 'yum install -y %[1]s to install it'\", name, name)\n\t}\n\treturn true\n}", "func (authClient *AuthClient) IsCommand(message string) bool {\n\treturn strings.HasPrefix(message, authCommand)\n}", "func (app *App) CommandName() string {\n\tif app.invoked == nil {\n\t\treturn \"\"\n\t}\n\treturn app.invoked.Name()\n}", "func (o *Member) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *LogsPipelineProcessor) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *FileversionFileversion) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func containsCommand(components []string) bool {\n\tfor _, comp := range components {\n\t\tif isCommand(comp) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *CreateTemplateRequestEntity) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *FormField) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (commands Commands) Get(name string) *Command {\n\tfor _, cmd := range commands {\n\t\tif cmd.HasName(name) {\n\t\t\treturn cmd\n\t\t}\n\t}\n\n\treturn nil\n}", "func (dbh *DBHandler) NameExists(name string) bool {\n\tresult, err := dbh.Connection.Query(`SELECT * FROM users WHERE nickname = ?;`, name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer result.Close()\n\tif result.Next() {\n\t\treturn true\n\t}\n\treturn false\n}", "func (strokeClient *StrokeClient) IsCommand(message string) bool {\n\treturn strings.HasPrefix(message, strokeCommand)\n}", "func (o *ComponentReferenceDTO) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *Commands) Remove(name string) (found bool) {\n\tfor index, cmd := range c.list {\n\t\tif cmd.Name == name {\n\t\t\tfound = true\n\t\t\tc.changed = true\n\t\t\tc.list = append(c.list[:index], c.list[index+1:]...)\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (o *ConvergedinfraBasePod) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *V0037Node) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsExportedName(name string) bool {\n\trune, _ := utf8.DecodeRuneInString(name)\n\treturn unicode.IsUpper(rune)\n}", "func CheckIsLeagueExecutable(name string) bool {\r\n\treturn strings.HasSuffix(strings.TrimSpace(strings.ToLower(name)), \"leagueclient.exe\")\r\n}", "func (o *GroupReplaceRequest) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *EquipmentBaseSensor) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ControllerServiceReferencingComponentDTO) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *RunCommand) Name() string {\n\treturn c.cmd.name\n}", "func TestUpdateName(t *testing.T) {\n\tc := &Command{Use: \"name xyz\"}\n\toriginalName := c.Name()\n\n\tc.Use = \"changedName abc\"\n\tif originalName == c.Name() || c.Name() != \"changedName\" {\n\t\tt.Error(\"c.Name() should be updated on changed c.Use\")\n\t}\n}", "func TestUpdateName(t *testing.T) {\n\tc := &Command{Use: \"name xyz\"}\n\toriginalName := c.Name()\n\n\tc.Use = \"changedName abc\"\n\tif originalName == c.Name() || c.Name() != \"changedName\" {\n\t\tt.Error(\"c.Name() should be updated on changed c.Use\")\n\t}\n}", "func (lc *LeetCommand) IsCommandMatch(update *tgbotapi.Update) bool {\n\treturn strings.HasPrefix(strings.TrimSpace(update.Message.Text), \"1337\")\n}", "func (cmd *Command) Name() string {\n\treturn strings.SplitN(cmd.Names, \",\", 2)[0]\n}", "func (c *SetCommand) CommandName() string {\n\treturn commandName(\"set\")\n}", "func (c *RemoveCommand) CommandName() string {\n\treturn commandName(\"remove\")\n}", "func (o *WorkbookChart) HasName() bool {\n\tif o != nil && o.Name != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (p Typed) IsCommand() bool {\n\treturn p.Kind() == TypeIDKindCommand\n}" ]
[ "0.8616193", "0.7294112", "0.70098317", "0.69279706", "0.6805237", "0.6766771", "0.67423135", "0.65067405", "0.64313805", "0.6430653", "0.6413377", "0.6372383", "0.6323463", "0.6284533", "0.6250266", "0.62433934", "0.6240555", "0.6221084", "0.6212627", "0.61703545", "0.61641973", "0.6154425", "0.61398846", "0.61341345", "0.61056846", "0.6100086", "0.60942286", "0.6091874", "0.6090919", "0.6062105", "0.6034251", "0.60230345", "0.6022823", "0.6004796", "0.60027176", "0.59973514", "0.59969664", "0.59829235", "0.5980164", "0.5972051", "0.5966358", "0.59659153", "0.5962244", "0.5958982", "0.5955602", "0.5955136", "0.59515446", "0.59513766", "0.5945093", "0.5928521", "0.59260786", "0.5923598", "0.5911147", "0.5898333", "0.5895043", "0.5895043", "0.58792675", "0.58761805", "0.5873224", "0.58658266", "0.58652544", "0.58624977", "0.586198", "0.5852751", "0.5846852", "0.5840307", "0.5832126", "0.5828493", "0.5825588", "0.5817466", "0.5802696", "0.5795945", "0.5795859", "0.57902116", "0.5779679", "0.5778425", "0.5771989", "0.57706", "0.5760142", "0.5739505", "0.5738219", "0.57345134", "0.57344556", "0.5732432", "0.5727083", "0.57258", "0.57207495", "0.57160974", "0.5703584", "0.57000476", "0.5692949", "0.56920034", "0.5690697", "0.5690697", "0.5686055", "0.5677921", "0.5672827", "0.56705934", "0.56692344", "0.56597525" ]
0.8542021
1
VisibleCategories returns a slice of categories and commands that are Hidden=false
func (cmd *Command) VisibleCategories() []CommandCategory { ret := []CommandCategory{} for _, category := range cmd.categories.Categories() { if visible := func() CommandCategory { if len(category.VisibleCommands()) > 0 { return category } return nil }(); visible != nil { ret = append(ret, visible) } } return ret }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cmd *Command) VisibleFlagCategories() []VisibleFlagCategory {\n\tif cmd.flagCategories == nil {\n\t\tcmd.flagCategories = newFlagCategoriesFromFlags(cmd.Flags)\n\t}\n\treturn cmd.flagCategories.VisibleCategories()\n}", "func (cmd *Command) VisibleCommands() []*Command {\n\tvar ret []*Command\n\tfor _, command := range cmd.Commands {\n\t\tif !command.Hidden {\n\t\t\tret = append(ret, command)\n\t\t}\n\t}\n\treturn ret\n}", "func (o OceanFiltersPtrOutput) Categories() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *OceanFilters) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Categories\n\t}).(pulumi.StringArrayOutput)\n}", "func (c Command) VisibleFlags() []Flag {\n\treturn visibleFlags(c.Flags)\n}", "func (o OceanFiltersOutput) Categories() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v OceanFilters) []string { return v.Categories }).(pulumi.StringArrayOutput)\n}", "func (g *Game) Categories(filter *CategoryFilter, sort *Sorting, embeds string) (*CategoryCollection, *Error) {\n\tif g.CategoriesData == nil {\n\t\treturn fetchCategoriesLink(firstLink(g, \"categories\"), filter, sort, embeds)\n\t}\n\n\treturn toCategoryCollection(g.CategoriesData), nil\n}", "func (cmd *Command) VisibleFlags() []Flag {\n\treturn visibleFlags(cmd.Flags)\n}", "func VisibleChatConversationStatuses() (res []chat1.ConversationStatus) {\n\tres = make([]chat1.ConversationStatus, 0, len(chat1.ConversationStatusMap))\n\tfor _, s := range chat1.ConversationStatusMap {\n\t\tif GetConversationStatusBehavior(s).ShowInInbox {\n\t\t\tres = append(res, s)\n\t\t}\n\t}\n\tsort.Sort(byConversationStatus(res))\n\treturn\n}", "func (ref *UIElement) VisibleColumns() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleColumnsAttribute)\n}", "func (j *DSRocketchat) Categories() map[string]struct{} {\n\treturn RocketchatCategories\n}", "func (c *Client) ShowCategories(ctx context.Context, path string) (*http.Response, error) {\n\treq, err := c.NewShowCategoriesRequest(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Client.Do(ctx, req)\n}", "func (commands Commands) VisibleCommands() []*cli.Command {\n\tvar visible []*cli.Command\n\n\tfor _, cmd := range commands {\n\t\tif cmd.Hidden {\n\t\t\tcontinue\n\t\t}\n\n\t\tif cmd.HelpName == \"\" {\n\t\t\tcmd.HelpName = cmd.Name\n\t\t}\n\n\t\tvisible = append(visible, &cli.Command{\n\t\t\tName: cmd.Name,\n\t\t\tAliases: cmd.Aliases,\n\t\t\tHelpName: cmd.HelpName,\n\t\t\tUsage: cmd.Usage,\n\t\t\tUsageText: cmd.UsageText,\n\t\t\tDescription: cmd.Description,\n\t\t\tHidden: cmd.Hidden,\n\t\t})\n\t}\n\n\treturn visible\n}", "func (ref *UIElement) VisibleChildren() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleChildrenAttribute)\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisible() bool {\n\tif o == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\n\treturn o.Visible\n}", "func (ref *UIElement) VisibleCells() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleCellsAttribute)\n}", "func (r *RPC) Categories(c context.Context, arg *model.ArgIP, res *model.Categories) (err error) {\n\t*res, err = r.s.ListCategories(c, arg.RealIP)\n\treturn\n}", "func (o GoogleCloudRetailV2alphaProductResponseOutput) Categories() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaProductResponse) []string { return v.Categories }).(pulumi.StringArrayOutput)\n}", "func NewGetAssetCategoriesForbidden() *GetAssetCategoriesForbidden {\n\treturn &GetAssetCategoriesForbidden{}\n}", "func (j *DSGit) Categories() map[string]struct{} {\n\treturn GitCategories\n}", "func filterCategory(flags []cli.Flag, category string) (ret []cli.Flag) {\n\tfor _, f := range flags {\n\t\tif flagCategories[f.GetName()] == category {\n\t\t\tret = append(ret, f)\n\t\t}\n\t}\n\treturn\n}", "func (m *EducationAssignment) GetCategories()([]EducationCategoryable) {\n return m.categories\n}", "func (t *tagStorage) QueryCategories() ([]model.CategoryInfo, error) {\n\tvar cs []model.CategoryInfo\n\n\terr := t.db.Table(model.CateTableName()).Where(\"del_flag = ?\", 0).Find(&cs)\n\treturn cs, err\n}", "func GetHidden() (tags []Type, err error) {\n\ttags = make([]Type, 0)\n\n\t_, err = mongo.Find(\"blotter\", \"tags\", bson.M{\n\t\t\"hide\": true,\n\t}, nil, &tags)\n\n\tif err != nil {\n\t\treturn\n\t}\n\treturn tags, nil\n}", "func (s *tsLibrary) Visibility() []string {\n\treturn s.RuleConfig.GetVisibility()\n}", "func (s UserSet) SidebarVisible() bool {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"SidebarVisible\", \"sidebar_visible\")).(bool)\n\treturn res\n}", "func (m *OutlookUser) GetMasterCategories()([]OutlookCategoryable) {\n return m.masterCategories\n}", "func (j *DSGitHub) Categories() map[string]struct{} {\n\treturn GitHubCategories\n}", "func (r Repository) Categories() []DbCategory {\n\tsession, _ := mgo.Dial(r.ipAddress)\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\n\tcategories := []DbCategory{}\n\tcollection := session.DB(\"u-talk\").C(\"forum\")\n\terr := collection.Find(nil).All(&categories)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn categories\n}", "func (o AccessPackageCatalogOutput) ExternallyVisible() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *AccessPackageCatalog) pulumi.BoolPtrOutput { return v.ExternallyVisible }).(pulumi.BoolPtrOutput)\n}", "func (tbl AssociationTable) SliceCategory(req require.Requirement, wh where.Expression, qc where.QueryConstraint) ([]Category, error) {\n\treturn sliceAssociationTableCategoryPtrList(tbl, req, \"category\", wh, qc)\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisibleOk() (*bool, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Visible, true\n}", "func (v *IconView) GetVisibleRange() (*TreePath, *TreePath) {\n\tvar (\n\t\tcpathStart, cpathEnd *C.GtkTreePath\n\t\tpathStart, pathEnd *TreePath\n\t)\n\n\tC.gtk_icon_view_get_visible_range(v.native(), &cpathStart, &cpathEnd)\n\n\tif cpathStart != nil {\n\t\tpathStart = &TreePath{cpathStart}\n\t\truntime.SetFinalizer(pathStart, func(v *TreePath) { glib.FinalizerStrategy(v.free) })\n\t}\n\n\tif cpathEnd != nil {\n\t\tpathEnd = &TreePath{cpathEnd}\n\t\truntime.SetFinalizer(pathEnd, func(v *TreePath) { glib.FinalizerStrategy(v.free) })\n\t}\n\n\treturn pathStart, pathEnd\n}", "func Categories(filter bson.M) ([]structure.Category, error) {\n\tvar categories []structure.Category\n\n\tsession := mgoSession.Copy()\n\tdefer session.Close()\n\n\tc := session.DB(dbName).C(\"categories\")\n\n\tpipeline := []bson.M{\n\t\tbson.M{\n\t\t\t\"$match\": filter,\n\t\t},\n\t\tbson.M{\n\t\t\t\"$project\": bson.M{\n\t\t\t\t\"_id\": 1,\n\t\t\t\t\"name\": 1,\n\t\t\t},\n\t\t},\n\t}\n\n\terr := c.Pipe(pipeline).All(&categories)\n\n\treturn categories, err\n}", "func FilterOutClans(clanList []string) []string {\n\ttofetch := SplitToChucks(clanList)\n\ttoReturn := []string{}\n\tfor _, ids := range tofetch {\n\t\trawOut, err := CallRoute(\"clanDiscription\", map[string]string{\"clanID\": strings.Join(ids, \"%2C\")}) // %2C = ,\n\t\tif err != nil {\n\t\t\tapiErr(\"FilterOutClans\", err, \"error check CallRoute\")\n\t\t\tcontinue\n\t\t}\n\t\tvar out ClanDiscription\n\t\tjson.Unmarshal([]byte(rawOut), &out)\n\t\tif out.Status != \"ok\" {\n\t\t\tapiErr(\"FilterOutClans\", errors.New(out.Error.Message), \"api status is not OK json.Unmarshal\")\n\t\t\tcontinue\n\t\t}\n\t\tfor clanID, clan := range out.Data {\n\t\t\tif IsDutch(clan.Description) {\n\t\t\t\ttoReturn = append(toReturn, clanID)\n\t\t\t\tother.DevPrint(\"found clan:\", clan.Tag)\n\t\t\t}\n\t\t}\n\t}\n\treturn toReturn\n}", "func GetCategories(c echo.Context) error {\n\trepo := repositories.CategoryRepo{}\n\treturn c.JSON(200, repo.All())\n}", "func (me TAttlistAbstractTextNlmCategory) IsConclusions() bool { return me.String() == \"CONCLUSIONS\" }", "func (m *IntentsDeviceManagementIntentItemRequestBuilder) Categories()(*IntentsItemCategoriesRequestBuilder) {\n return NewIntentsItemCategoriesRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func (d UserData) SidebarVisible() bool {\n\tval := d.ModelData.Get(models.NewFieldName(\"SidebarVisible\", \"sidebar_visible\"))\n\tif !d.Has(models.NewFieldName(\"SidebarVisible\", \"sidebar_visible\")) {\n\t\treturn *new(bool)\n\t}\n\treturn val.(bool)\n}", "func FilterHiddenServices(services []occlient.Service) []occlient.Service {\n\tvar filteredServices []occlient.Service\n\tfor _, service := range services {\n\t\tif !service.Hidden {\n\t\t\tfilteredServices = append(filteredServices, service)\n\t\t}\n\t}\n\treturn filteredServices\n}", "func (c *MeetupClient) GetCategories() (results JsonArray, err_out error) {\n\n\turl_str := API_BASE + CATEGORIES\n\treq_url, _ := url.Parse(url_str)\n\tresults , err_out = c.doV2Get(req_url)\n\treturn\n}", "func (o *GetClientConfig200ResponseDenylist) SetVisible(v bool) {\n\to.Visible = v\n}", "func NewGetAssetCategoriesUnauthorized() *GetAssetCategoriesUnauthorized {\n\treturn &GetAssetCategoriesUnauthorized{}\n}", "func (tbl DbCompoundTable) SliceCategory(req require.Requirement, wh where.Expression, qc where.QueryConstraint) ([]Category, error) {\n\treturn tbl.sliceCategoryList(req, \"category\", wh, qc)\n}", "func (cRepo *CategoryGormRepo) Categories() ([]entity.Category, []error) {\n\tctgs := []entity.Category{}\n\terrs := cRepo.conn.Find(&ctgs).GetErrors()\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn ctgs, errs\n}", "func itemCategoryType(categories models.Category) []string {\n\tres := make([]string, 0, 5)\n\n\tif categories.Armor != nil {\n\t\tres = append(res, models.Armors...)\n\t\tres = append(res, *categories.Armor...)\n\t}\n\tif categories.Accessories != nil {\n\t\tres = append(res, models.Accessories...)\n\t\tres = append(res, *categories.Accessories...)\n\t}\n\tif categories.Currency != nil {\n\t\tres = append(res, models.Currencies...)\n\t\tres = append(res, *categories.Currency...)\n\t}\n\tif categories.Jewels != nil {\n\t\tres = append(res, models.Jewels...)\n\t\tres = append(res, *categories.Jewels...)\n\t}\n\tif categories.Weapons != nil {\n\t\tres = append(res, models.Weapons...)\n\t\tres = append(res, *categories.Weapons...)\n\t}\n\tif categories.Gems != nil {\n\t\tres = append(res, models.Gems...)\n\t\tres = append(res, *categories.Gems...)\n\t}\n\tif categories.Maps != nil {\n\t\tres = append(res, models.Maps...)\n\t\tres = append(res, *categories.Maps...)\n\t}\n\n\treturn res\n}", "func (api *Client) Categories() (*CategoriesContainer, error) {\n\tcategories := &CategoriesContainer{}\n\terr := get(api, \"/categories?lang=ja\", categories)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn categories, nil\n}", "func (d UserData) Categories() m.PartnerCategorySet {\n\tval := d.ModelData.Get(models.NewFieldName(\"Categories\", \"category_ids\"))\n\tif !d.Has(models.NewFieldName(\"Categories\", \"category_ids\")) || val == nil || val == (*interface{})(nil) {\n\t\tval = models.InvalidRecordCollection(\"PartnerCategory\")\n\t}\n\treturn val.(models.RecordSet).Collection().Wrap().(m.PartnerCategorySet)\n}", "func (m *MenuMutation) CategoryCleared() bool {\n\treturn m.clearedcategory\n}", "func (ref *UIElement) VisibleRows() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleRowsAttribute)\n}", "func (v *Kounta) GetCategories(token string, company string) (Categories, error) {\n\tclient := &http.Client{}\n\tclient.CheckRedirect = checkRedirectFunc\n\n\tu, _ := url.ParseRequestURI(baseURL)\n\tu.Path = fmt.Sprintf(categoriesURL, company)\n\turlStr := fmt.Sprintf(\"%v\", u)\n\n\tr, err := http.NewRequest(\"GET\", urlStr, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr.Header = http.Header(make(map[string][]string))\n\tr.Header.Set(\"Accept\", \"application/json\")\n\tr.Header.Set(\"Authorization\", \"Bearer \"+token)\n\n\tres, err := client.Do(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawResBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.StatusCode == 200 {\n\t\tvar resp []Category\n\n\t\terr = json.Unmarshal(rawResBody, &resp)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn resp, nil\n\t}\n\treturn nil, fmt.Errorf(\"Failed to get Kounta Categories %s\", res.Status)\n\n}", "func GetCategories() []string {\n\tstmt := \"select name from category\"\n\trows := database.query(stmt)\n\tvar categories []string\n\tvar category string\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&category)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tcategories = append(categories, category)\n\t}\n\treturn categories\n}", "func (h *PageHandler) ListCategories(c *fiber.Ctx) error {\n\tcategories, err := h.repository.FindCategories()\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error while getting list categories %s\", err)\n\n\t\treturn h.Error(500)\n\t}\n\n\treturn h.JSON(c, 200, categories)\n}", "func getCategoryNames(categories []wikimedia.ApiPageCategory) []string {\n\tvar values []string\n\tfor _, cat := range categories {\n\t\tvalues = append(values, cat.Name)\n\t}\n\n\treturn values\n}", "func (c *Client) GetCategories() string {\n\turl := fmt.Sprintf(c.baseURL + \"categories\")\n\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\treturn string(body)\n}", "func (s UserSet) Categories() m.PartnerCategorySet {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"Categories\", \"category_ids\")).(models.RecordSet).Collection().Wrap(\"PartnerCategory\").(m.PartnerCategorySet)\n\treturn res\n}", "func (ws *Workspace) Visible() bool {\n\treturn ws.visible\n}", "func (m *WorkbookChartTitle) GetVisible()(*bool) {\n val, err := m.GetBackingStore().Get(\"visible\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}", "func printAllCategories() string {\n\tfmt.Println(\"\\nEmployee(s) you're able to edit:\")\n\treturn \"\\n[0] - First name\\n[1] - Last name\\n[2] - Department\\n[3] - Phone number\\n[4] - E-mail\\n\"\n}", "func (wk *wkAPI) GetCategories(pageid int) (WikipediaPageFull, error) {\n\tvar value WikipediaPageFull\n\n\tf := url.Values{\n\t\t\"action\": {\"parse\"},\n\t\t\"pageid\": {strconv.Itoa(pageid)},\n\t\t\"prop\": {\"categories\"},\n\t}\n\n\tres, err := wk.w.Query(f)\n\tif err != nil {\n\t\treturn value, err\n\t}\n\n\tvalue = WikipediaPageFull{\n\t\tMeta: WikipediaPage{\n\t\t\tID: res.Parse.PageId,\n\t\t\tTitle: res.Parse.Title,\n\t\t\tURL: getWikipediaURL(res.Parse.Title),\n\t\t},\n\t\tCategories: getCategoryNames(res.Parse.Categories),\n\t}\n\n\treturn value, nil\n}", "func GetCategories(c *fiber.Ctx) error {\n\tdb := database.DBConn\n\n\trows, err := db.Query(\"SELECT * FROM categories\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar category models.Category\n\tvar categories []models.Category\n\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&category.ID, &category.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcategories = append(categories, category)\n\t}\n\treturn c.Render(\"category\", categories)\n}", "func (tk Kinds) IsCat() bool {\n\treturn tk.Cat() == tk\n}", "func (ob *Obj3D) SelectCats(cats []string) {\n\tnc := len(ob.Cats)\n\tfor ci := nc - 1; ci >= 0; ci-- {\n\t\tcat := ob.Cats[ci]\n\n\t\tsel := false\n\t\tfor _, cs := range cats {\n\t\t\tif cat == cs {\n\t\t\t\tsel = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !sel {\n\t\t\tob.Cats = append(ob.Cats[:ci], ob.Cats[ci+1:]...)\n\t\t\tob.ObjFilesAll = append(ob.ObjFilesAll[:ci], ob.ObjFilesAll[ci+1:]...)\n\t\t\tob.ObjFilesTrain = append(ob.ObjFilesTrain[:ci], ob.ObjFilesTrain[ci+1:]...)\n\t\t\tob.ObjFilesTest = append(ob.ObjFilesTest[:ci], ob.ObjFilesTest[ci+1:]...)\n\t\t}\n\t}\n\tob.Flats()\n}", "func (m *RestaurantMutation) CategoriesCleared() bool {\n\treturn m.clearedcategories\n}", "func (ws *Workspace) Hide() {\n\tws.visible = false\n\n\tfor _, c := range ws.clients {\n\t\tc.Hide()\n\t}\n}", "func (*Out_GetProductCategorys) Descriptor() ([]byte, []int) {\n\treturn file_modules_inventory_product_category_product_category_proto_rawDescGZIP(), []int{10}\n}", "func (conn *DB) WhereItemsAreVisible(groupID int64) *DB {\n\treturn conn.WhereGroupHasPermissionOnItems(groupID, \"view\", \"info\")\n}", "func (m *WorkbookChartTitle) GetVisible()(*bool) {\n return m.visible\n}", "func (v *Filter) ToggleVisible() error {\n\t// delete all user input from the tree view\n\tv.view.Clear()\n\n\t// toggle hiding\n\tv.hidden = !v.hidden\n\n\tif !v.hidden {\n\t\t_, err := v.gui.SetCurrentView(v.name)\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"unable to toggle filter view: \", err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t// reset the cursor for the next time it is visible\n\t// Note: there is a subtle gocui behavior here where this cannot be called when the view\n\t// is newly visible. Is this a problem with dive or gocui?\n\treturn v.view.SetCursor(0, 0)\n}", "func GetCategories(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\tAnswerBadRequest(w, \"Wrong request method only GET allowed.\")\n\t\treturn\n\t}\n\n\tstatusCode := http.StatusOK\n\tcategories, err := models.GetAllCategories()\n\n\tif err != nil {\n\t\tlog.Printf(\"Error during getting categories for DB.\")\n\t\tstatusCode = http.StatusInternalServerError\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(statusCode)\n\terr = json.NewEncoder(w).Encode(categories)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (_Mcapscontroller *McapscontrollerCallerSession) GetCategoryTokens(categoryID *big.Int) ([]common.Address, error) {\n\treturn _Mcapscontroller.Contract.GetCategoryTokens(&_Mcapscontroller.CallOpts, categoryID)\n}", "func GetCategories() []e.Category {\n\tvar categories []e.Category\n\tdb.DB.Order(\"id\").Find(&categories)\n\treturn categories\n}", "func (me TxsdPresentationAttributesViewportsOverflow) IsHidden() bool { return me.String() == \"hidden\" }", "func (me TxsdPresentationAttributesViewportsOverflow) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func GetAllCat(userid uint32) ([]models.Category, error) {\n\tvar cats []models.Category\n\terr := DB.Where(\"user_id = ?\", userid).Find(&cats).Error\n\tif err != nil {\n\t\tlog.Warning.Println(err)\n\t\treturn nil, ErrInternal\n\t}\n\treturn cats, nil\n}", "func (cri *CategoryRepositoryImpl) Categories() ([]entity.Category, error) {\n\n\trows, err := cri.conn.Query(\"SELECT * FROM categories;\")\n\tif err != nil {\n\t\treturn nil, errors.New(\"Could not query the database\")\n\t}\n\tdefer rows.Close()\n\n\tctgs := []entity.Category{}\n\n\tfor rows.Next() {\n\t\tcategory := entity.Category{}\n\t\terr = rows.Scan(&category.ID, &category.Name, &category.Description, &category.Image)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tctgs = append(ctgs, category)\n\t}\n\n\treturn ctgs, nil\n}", "func (e Endpoints) GetCatChildes(ctx context.Context, id string) (c []io.TodoCategory, error error) {\n\trequest := GetCatChildesRequest{Id: id}\n\tresponse, err := e.GetCatChildesEndpoint(ctx, request)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn response.(GetCatChildesResponse).C, response.(GetCatChildesResponse).Error\n}", "func (outer outer) Visible() bool {\r\n\treturn false\r\n}", "func (c *CommerceController) Categories(ctx *app.CategoriesCommerceContext) error {\n\tcategories, err := c.CommerceStorage.Categories()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ctx.OK(categories)\n}", "func (s *Store) FindMatcategories(filter bson.M, opts ...*options.FindOptions) ([]models.MatCategory, error) {\n\tvar ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tdb := s.db.Database(dbName)\n\tcol := db.Collection(\"matcategories\")\n\tcur, err := col.Find(ctx, filter, opts...)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcats := make([]models.MatCategory, 0)\n\n\terr = cur.All(ctx, &cats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cats, nil\n}", "func ShowCategoriesPath(categoryID int) string {\n\tparam0 := strconv.Itoa(categoryID)\n\n\treturn fmt.Sprintf(\"/categories/%s\", param0)\n}", "func (d *Dao) ChannelCategories(c context.Context, arg *model.ArgChannelCategory) (res []*model.ChannelCategory, err error) {\n\trows, err := d.db.Query(c, _channelCategorieSQL, arg.State, arg.LastID, arg.Size)\n\tif err != nil {\n\t\tlog.Error(\"d.dao.ChannelCategories(%v) error(%v)\", arg, err)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tres = make([]*model.ChannelCategory, 0, arg.Size)\n\tfor rows.Next() {\n\t\tt := &model.ChannelCategory{}\n\t\tif err = rows.Scan(&t.ID, &t.Name, &t.Order, &t.Attr, &t.State, &t.CTime, &t.MTime); err != nil {\n\t\t\tlog.Error(\"d.dao.ChannelCategories(%v) rows.Scan() error(%v)\", arg, err)\n\t\t\treturn\n\t\t}\n\t\tres = append(res, t)\n\t}\n\treturn\n}", "func (*ListComicCategoryFilterResponse) Descriptor() ([]byte, []int) {\n\treturn file_comic_v1_api_proto_rawDescGZIP(), []int{10}\n}", "func GetCategories() []*datastore.Category {\n\t// Connect to database\n\tdb := OpenPostgresDB()\n\tdefer db.Close()\n\n\t// Query database\n\trows, err := db.Query(\"SELECT * FROM category\")\n\terror_handler.Check(err)\n\n\t// Store categories for landing page in this array\n\tlandingPage := make([]*datastore.Category, 0)\n\n\t// Go through list of categories returned from db\n\tfor rows.Next() {\n\t\tvar id int\n\t\tvar name string\n\t\tvar image string\n\t\terr = rows.Scan(&id, &name, &image)\n\n\t\terror_handler.Check(err)\n\n\t\t// Store single instance of categories\n\t\tvar lpage = datastore.NewCategory(id, name, image)\n\n\t\t// Store above categories in landing page array\n\t\tlandingPage = append(landingPage, lpage)\n\t}\n\treturn landingPage\n}", "func (me TxsdPresentationAttributesGraphicsVisibility) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func (_Mcapscontroller *McapscontrollerFilterer) FilterCategorySorted(opts *bind.FilterOpts) (*McapscontrollerCategorySortedIterator, error) {\n\n\tlogs, sub, err := _Mcapscontroller.contract.FilterLogs(opts, \"CategorySorted\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &McapscontrollerCategorySortedIterator{contract: _Mcapscontroller.contract, event: \"CategorySorted\", logs: logs, sub: sub}, nil\n}", "func (s BmCategoryStorage) GetAll(r api2go.Request, skip int, take int) []*BmModel.Category {\n\tin := BmModel.Category{}\n\tvar out []BmModel.Category\n\terr := s.db.FindMulti(r, &in, &out, skip, take)\n\tif err == nil {\n\t\tvar tmp []*BmModel.Category\n\t\t//tmp := make(map[string]*BmModel.Category)\n\t\tfor _, iter := range out {\n\t\t\ts.db.ResetIdWithId_(&iter)\n\t\t\ttmp = append(tmp, &iter)\n\t\t\t//tmp[iter.ID] = &iter\n\t\t}\n\t\treturn tmp\n\t} else {\n\t\treturn nil //make(map[string]*BmModel.Category)\n\t}\n}", "func (service *Service) ListCategory() (*[]CategoryList, error) {\n\tVisited = map[int]bool{}\n\t//Get the details of existing categories\n\tcategoryDetails, err := service.repo.GetCategories()\n\tlog.Println(categoryDetails)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar categoryIDs []int //to store all the category IDs\n\tvar mainCategories []int //to store main categories which doesn't have a child\n\tcategoryChildMap := make(map[int][]int) //to store category and its child relation\n\tcategoryNameMap := make(map[int]string) //to map category and its name\n\t//generating categoryChildMap, categoryNameMap, categoryIDs and mainCategories\n\tfor _, v := range *categoryDetails {\n\t\tif v.ParentID == 0 {\n\t\t\tmainCategories = append(mainCategories, v.ID)\n\t\t\tcategoryNameMap[v.ID] = v.Name\n\t\t\tcategoryIDs = append(categoryIDs, v.ID)\n\t\t\tcontinue\n\t\t}\n\t\tcategoryChildMap[v.ParentID] = append(categoryChildMap[v.ParentID], v.ID)\n\t\tcategoryNameMap[v.ID] = v.Name\n\t\tcategoryIDs = append(categoryIDs, v.ID)\n\t}\n\t//To get all the product and its variants\n\tproductVariantForCategory, err := service.repo.GetProductVariantForEachCategory(categoryIDs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(productVariantForCategory)\n\t//generating category and its associated products mapping\n\tcategoryProductMap := make(map[int][]Product)\n\tfor _, v := range productVariantForCategory {\n\t\tcategoryProductMap[v.CategoryID] = append(categoryProductMap[v.CategoryID], v)\n\t}\n\tlog.Println(categoryProductMap)\n\tvar categoryList []CategoryList //Final result category listing\n\tfor _, categoryID := range mainCategories {\n\n\t\tcatList := formatCategory(categoryID, categoryProductMap, categoryChildMap, categoryNameMap)\n\t\tif catList.CategoryID != 0 {\n\t\t\tcategoryList = append(categoryList, catList)\n\t\t}\n\t}\n\treturn &categoryList, nil\n}", "func (_Mcapscontroller *McapscontrollerCaller) GetCategoryTokens(opts *bind.CallOpts, categoryID *big.Int) ([]common.Address, error) {\n\tvar out []interface{}\n\terr := _Mcapscontroller.contract.Call(opts, &out, \"getCategoryTokens\", categoryID)\n\n\tif err != nil {\n\t\treturn *new([]common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address)\n\n\treturn out0, err\n\n}", "func (p PostgresPersister) SelectCategories() ([]model.Category, error) {\n\trows, err := p.db.Query(\"SELECT id, body, insert_time, last_update_time FROM category\")\n\tif err != nil {\n\t\treturn nil, translateError(err)\n\t}\n\tdefer rows.Close()\n\tcats := make([]model.Category, 0)\n\tfor rows.Next() {\n\t\tvar dbid int32\n\t\tvar cat model.Category\n\t\terr := rows.Scan(&dbid, &cat.CategoryBody, &cat.InsertTime, &cat.LastUpdateTime)\n\t\tif err != nil {\n\t\t\treturn nil, translateError(err)\n\t\t}\n\t\tcat.ID = p.pathPrefix + fmt.Sprintf(model.CategoryIDFormat, dbid)\n\t\tcat.Type = \"category\"\n\t\tcats = append(cats, cat)\n\t}\n\treturn cats, nil\n}", "func (c FieldsCollection) SidebarVisible() *models.Field {\n\treturn c.MustGet(\"SidebarVisible\")\n}", "func (Visibility) Values() []Visibility {\n\treturn []Visibility{\n\t\t\"IGNORED\",\n\t\t\"VISIBLE\",\n\t}\n}", "func (m *Maps) SetVisible(c *character.Character) {\n\n\tcoord := c.Location()\n\tadj := append(adjacent(coord, false), diagonal(coord, false)...)\n\tfor _, l := range adj {\n\t\tswitch m.active[l.Y][l.X].(type) {\n\t\tcase Visible:\n\t\t\tm.active[l.Y][l.X].(Visible).Visible(true)\n\t\t}\n\t}\n}", "func (o *ClientConfiguration) GetCategoryRestrictions() []Category {\n\tif o == nil {\n\t\tvar ret []Category\n\t\treturn ret\n\t}\n\n\treturn o.CategoryRestrictions\n}", "func (r *RPC) CategoriesMap(c context.Context, arg *model.ArgIP, res *map[int64]*model.Category) (err error) {\n\t*res, err = r.s.ListCategoriesMap(c, arg.RealIP)\n\treturn\n}", "func (*In_GetProductCategorys) Descriptor() ([]byte, []int) {\n\treturn file_modules_inventory_product_category_product_category_proto_rawDescGZIP(), []int{9}\n}", "func (_Mcapscontroller *McapscontrollerSession) GetCategoryTokens(categoryID *big.Int) ([]common.Address, error) {\n\treturn _Mcapscontroller.Contract.GetCategoryTokens(&_Mcapscontroller.CallOpts, categoryID)\n}", "func (f *FlagBase[T, C, V]) IsVisible() bool {\n\treturn !f.Hidden\n}", "func (mw *MapWidget) Visible() bool {\n\treturn !mw.hidden\n}", "func (_Mcapscontroller *McapscontrollerCaller) HasCategory(opts *bind.CallOpts, categoryID *big.Int) (bool, error) {\n\tvar out []interface{}\n\terr := _Mcapscontroller.contract.Call(opts, &out, \"hasCategory\", categoryID)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}", "func (o ClusterNodeGroupDataDiskOutput) Category() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ClusterNodeGroupDataDisk) string { return v.Category }).(pulumi.StringOutput)\n}" ]
[ "0.7034814", "0.6247197", "0.57433826", "0.56768787", "0.5613966", "0.5493636", "0.5483244", "0.5441153", "0.54229337", "0.5393549", "0.5390449", "0.5373621", "0.5359513", "0.5219466", "0.5208159", "0.5200282", "0.5168491", "0.514697", "0.51412046", "0.5137399", "0.5120797", "0.5086906", "0.50158226", "0.50089926", "0.4991143", "0.49733883", "0.49680302", "0.49417716", "0.4939605", "0.48922458", "0.48597425", "0.48550078", "0.48397094", "0.48269925", "0.48255345", "0.4804502", "0.47998297", "0.47902763", "0.477575", "0.4765223", "0.47312525", "0.47264946", "0.47222418", "0.47075385", "0.46671113", "0.46669665", "0.46603957", "0.46578974", "0.46499848", "0.46430728", "0.4636982", "0.46348906", "0.4630255", "0.4613697", "0.45971683", "0.4558441", "0.45426372", "0.4525071", "0.45192477", "0.45191792", "0.45176065", "0.45067215", "0.45028242", "0.44961756", "0.44903243", "0.4481491", "0.44784015", "0.44777155", "0.44657862", "0.44586983", "0.4458378", "0.44521365", "0.44504336", "0.44502237", "0.4446796", "0.44442365", "0.44426852", "0.44388175", "0.44266394", "0.44016024", "0.43956345", "0.43891254", "0.43873498", "0.43799758", "0.43725926", "0.43725607", "0.43717894", "0.4371232", "0.4369802", "0.43602607", "0.43447062", "0.43447018", "0.4344627", "0.43444532", "0.43406153", "0.43361744", "0.43338898", "0.43285388", "0.43263337", "0.4325419" ]
0.77693605
0
VisibleCommands returns a slice of the Commands with Hidden=false
func (cmd *Command) VisibleCommands() []*Command { var ret []*Command for _, command := range cmd.Commands { if !command.Hidden { ret = append(ret, command) } } return ret }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (commands Commands) VisibleCommands() []*cli.Command {\n\tvar visible []*cli.Command\n\n\tfor _, cmd := range commands {\n\t\tif cmd.Hidden {\n\t\t\tcontinue\n\t\t}\n\n\t\tif cmd.HelpName == \"\" {\n\t\t\tcmd.HelpName = cmd.Name\n\t\t}\n\n\t\tvisible = append(visible, &cli.Command{\n\t\t\tName: cmd.Name,\n\t\t\tAliases: cmd.Aliases,\n\t\t\tHelpName: cmd.HelpName,\n\t\t\tUsage: cmd.Usage,\n\t\t\tUsageText: cmd.UsageText,\n\t\t\tDescription: cmd.Description,\n\t\t\tHidden: cmd.Hidden,\n\t\t})\n\t}\n\n\treturn visible\n}", "func (c Command) VisibleFlags() []Flag {\n\treturn visibleFlags(c.Flags)\n}", "func (cmd *Command) VisibleFlags() []Flag {\n\treturn visibleFlags(cmd.Flags)\n}", "func (cmd *Command) VisibleCategories() []CommandCategory {\n\tret := []CommandCategory{}\n\tfor _, category := range cmd.categories.Categories() {\n\t\tif visible := func() CommandCategory {\n\t\t\tif len(category.VisibleCommands()) > 0 {\n\t\t\t\treturn category\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); visible != nil {\n\t\t\tret = append(ret, visible)\n\t\t}\n\t}\n\treturn ret\n}", "func (cmd InspectCmd) Hidden() bool {\n\treturn true\n}", "func TestHiddenCommandIsHidden(t *testing.T) {\n\tc := &Command{Use: \"c\", Hidden: true, Run: emptyRun}\n\tif c.IsAvailableCommand() {\n\t\tt.Errorf(\"Hidden command should be unavailable\")\n\t}\n}", "func (ref *UIElement) VisibleColumns() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleColumnsAttribute)\n}", "func ShowSubcommands(cmd *cobra.Command, args []string) error {\n\tvar strs []string\n\tfor _, subcmd := range cmd.Commands() {\n\t\tif !subcmd.Hidden {\n\t\t\tstrs = append(strs, subcmd.Name())\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Subcommand not found, use one of the available commands: %s\", strings.Join(strs, \", \"))\n}", "func (channelInfo ChannelInfo) GetCommands() string {\n\tif channelInfo.StreamStatus.Online == true {\n\t\treturn \"!\" + strings.Join(channelInfo.OnlineCommands, \", !\")\n\n\t}\n\treturn \"!\" + strings.Join(channelInfo.OfflineCommands, \", !\")\n}", "func (c StandardCommand) GetCommands() ([]Command, error) {\n\treturn c.SubCommands, nil\n}", "func (ref *UIElement) VisibleChildren() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleChildrenAttribute)\n}", "func (d *DevfileV2) GetCommands(options common.DevfileOptions) ([]v1.Command, error) {\n\tif len(options.Filter) == 0 {\n\t\treturn d.Commands, nil\n\t}\n\n\tvar commands []v1.Command\n\tfor _, command := range d.Commands {\n\t\tfilterIn, err := common.FilterDevfileObject(command.Attributes, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif filterIn {\n\t\t\tcommand.Id = strings.ToLower(command.Id)\n\t\t\tcommands = append(commands, command)\n\t\t}\n\t}\n\n\treturn commands, nil\n}", "func (h *HyperCommand) Commands() []*cobra.Command {\n\treturn h.cmds\n}", "func (c *Cacheable) Commands() []string {\n\treturn c.cs.s\n}", "func (cl *commandList) GetDebugCommands(mode string) [][]string {\n\tvar allCommands [][]string\n\tfor _, def := range cl.definitions {\n\t\t// TODO: incorporate query commands into e2e testing once proxy access is implemented\n\t\tif def.commandGroup == query {\n\t\t\tcontinue\n\t\t}\n\t\tif mode == runtime.ModeController && def.use == \"log-level\" {\n\t\t\t// log-level command does not support remote execution.\n\t\t\tcontinue\n\t\t}\n\t\tif mode == runtime.ModeAgent && def.agentEndpoint != nil ||\n\t\t\tmode == runtime.ModeController && def.controllerEndpoint != nil ||\n\t\t\tmode == runtime.ModeFlowAggregator && def.flowAggregatorEndpoint != nil {\n\t\t\tvar currentCommand []string\n\t\t\tif group, ok := groupCommands[def.commandGroup]; ok {\n\t\t\t\tcurrentCommand = append(currentCommand, group.Use)\n\t\t\t}\n\t\t\tcurrentCommand = append(currentCommand, def.use)\n\t\t\tallCommands = append(allCommands, currentCommand)\n\t\t}\n\t}\n\tfor _, cmd := range cl.rawCommands {\n\t\tif cmd.cobraCommand.Use == \"proxy\" {\n\t\t\t// proxy will keep running until interrupted so it\n\t\t\t// cannot be used as is in e2e tests.\n\t\t\tcontinue\n\t\t}\n\t\tif mode == runtime.ModeController && cmd.supportController ||\n\t\t\tmode == runtime.ModeAgent && cmd.supportAgent {\n\t\t\tvar currentCommand []string\n\t\t\tif group, ok := groupCommands[cmd.commandGroup]; ok {\n\t\t\t\tcurrentCommand = append(currentCommand, group.Use)\n\t\t\t}\n\t\t\tcurrentCommand = append(currentCommand, strings.Split(cmd.cobraCommand.Use, \" \")[0])\n\t\t\tallCommands = append(allCommands, currentCommand)\n\t\t}\n\t}\n\treturn allCommands\n}", "func (a *Available) Commands() []*cobra.Command {\n\tcmds := []*cobra.Command{a.Cmd()}\n\ta.plugs.Range(func(_ string, p plug) bool {\n\t\tcmds = append(cmds, p.Cmd)\n\t\treturn true\n\t})\n\treturn cmds\n}", "func GetCommands() []*cli.Command {\n\treturn []*cli.Command{\n\t\t{\n\t\t\tName: \"xc\",\n\t\t\tUsage: \"Experimental Crypto\",\n\t\t\tHidden: true,\n\t\t\tDescription: \"\" +\n\t\t\t\t\"These subcommands are used to control and test the experimental crypto\" +\n\t\t\t\t\"implementation.\",\n\t\t\tSubcommands: []*cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName: \"list-private-keys\",\n\t\t\t\t\tAction: ListPrivateKeys,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"list-public-keys\",\n\t\t\t\t\tAction: ListPublicKeys,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"generate\",\n\t\t\t\t\tAction: GenerateKeypair,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"export\",\n\t\t\t\t\tAction: ExportPublicKey,\n\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"id\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"file\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"import\",\n\t\t\t\t\tAction: ImportPublicKey,\n\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"id\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"file\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"export-private-key\",\n\t\t\t\t\tAction: ExportPrivateKey,\n\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"id\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"file\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"import-private-key\",\n\t\t\t\t\tAction: ImportPrivateKey,\n\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"id\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"file\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"remove\",\n\t\t\t\t\tAction: RemoveKey,\n\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"id\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"encrypt\",\n\t\t\t\t\tAction: EncryptFile,\n\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"file\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringSliceFlag{\n\t\t\t\t\t\t\tName: \"recipients\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\t\tName: \"stream\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"decrypt\",\n\t\t\t\t\tAction: DecryptFile,\n\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"file\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\t\tName: \"stream\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (b *Bot) getCommands() []BotCommandType {\n\tbcts := make([]BotCommandType, 0)\n\tfor _, bct := range b.Commands {\n\t\tif bct.isRunning() {\n\t\t\tbcts = append(bcts, bct)\n\t\t}\n\t}\n\treturn bcts\n}", "func (lbbft *LBBFTCore) GetProposeCommands(timeout bool) *([]data.Command) {\n\n\tvar batch []data.Command\n\n\tif timeout { // timeout的时候,不管够不够batch都要发起共识。\n\t\tbatch = lbbft.cmdCache.RetriveFirst(lbbft.Config.BatchSize)\n\t} else {\n\t\tbatch = lbbft.cmdCache.RetriveExactlyFirst(lbbft.Config.BatchSize)\n\t}\n\n\treturn &batch\n}", "func (ref *UIElement) VisibleCells() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleCellsAttribute)\n}", "func GetCommands(app *App) []cli.Command {\n\t// if app.IsService {\n\t// \treturn []cli.Command{\n\t// \t\t*vmgo.MakeRequireMongo(serviceStartCmd()),\n\t// \t\t*vmgo.MakeRequireMongo(createUserCmd()),\n\t// \t\t*vmgo.MakeRequireMongo(setupCmd()),\n\t// \t\t*vmgo.MakeRequireMongo(resetCmd()),\n\t// \t\t*vmgo.MakeRequireMongo(overridePasswordCmd()),\n\t// \t\t*testEMail(),\n\t// \t}\n\t// }\n\treturn []cli.Command{\n\t\t*vmgo.MakeRequireMongo(createUserCmd()),\n\t\t*vmgo.MakeRequireMongo(setupCmd()),\n\t\t*vmgo.MakeRequireMongo(resetCmd()),\n\t\t*vmgo.MakeRequireMongo(overridePasswordCmd()),\n\t\t*testEMail(),\n\t}\n}", "func (c *Completed) Commands() []string {\n\treturn c.cs.s\n}", "func (o GroupContainerOutput) Commands() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GroupContainer) []string { return v.Commands }).(pulumi.StringArrayOutput)\n}", "func InteractiveCommands() map[string]Executor {\n return available.interactive\n}", "func (rcm *RecentCommandsService) GetRecentCommands() (ret []string, err error) {\n\tvar rows []*RecentCommand\n\tif dbErr := rcm.app.DBService.DB.Find(&rows).Error; dbErr != nil {\n\t\terr = fmt.Errorf(\"failed to query recent commands: %v\", dbErr)\n\t\treturn\n\t}\n\tret = make([]string, len(rows))\n\tfor i, r := range rows {\n\t\tret[i] = r.Command\n\t}\n\treturn\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisible() bool {\n\tif o == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\n\treturn o.Visible\n}", "func Get() []cli.Command {\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"network\",\n\t\t\tUsage: \"Software-defined networks used by servers.\",\n\t\t\tSubcommands: networkcommands.Get(),\n\t\t},\n\t\t{\n\t\t\tName: \"subnet\",\n\t\t\tUsage: \"Allocate IP address blocks, gateways, DNS servers, and host routes to networks.\",\n\t\t\tSubcommands: subnetcommands.Get(),\n\t\t},\n\t\t{\n\t\t\tName: \"port\",\n\t\t\tUsage: \"Virtual switch ports on logical network switches.\",\n\t\t\tSubcommands: portcommands.Get(),\n\t\t},\n\t\t{\n\t\t\tName: \"security-group\",\n\t\t\tUsage: \"Collections of rules for network traffic.\",\n\t\t\tSubcommands: securitygroupcommands.Get(),\n\t\t},\n\t\t{\n\t\t\tName: \"security-group-rule\",\n\t\t\tUsage: \"Define network ingress and egress rules.\",\n\t\t\tSubcommands: securitygrouprulecommands.Get(),\n\t\t},\n\t}\n}", "func (cmd *Command) VisibleFlagCategories() []VisibleFlagCategory {\n\tif cmd.flagCategories == nil {\n\t\tcmd.flagCategories = newFlagCategoriesFromFlags(cmd.Flags)\n\t}\n\treturn cmd.flagCategories.VisibleCategories()\n}", "func AllCmds() []string {\n\tvar ret []string\n\tfor k := range command.Commands {\n\t\tret = append(ret, k)\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}", "func (n *PipeNode) Commands() []*CommandNode {\n\treturn n.cmds\n}", "func (v *IconView) GetVisibleRange() (*TreePath, *TreePath) {\n\tvar (\n\t\tcpathStart, cpathEnd *C.GtkTreePath\n\t\tpathStart, pathEnd *TreePath\n\t)\n\n\tC.gtk_icon_view_get_visible_range(v.native(), &cpathStart, &cpathEnd)\n\n\tif cpathStart != nil {\n\t\tpathStart = &TreePath{cpathStart}\n\t\truntime.SetFinalizer(pathStart, func(v *TreePath) { glib.FinalizerStrategy(v.free) })\n\t}\n\n\tif cpathEnd != nil {\n\t\tpathEnd = &TreePath{cpathEnd}\n\t\truntime.SetFinalizer(pathEnd, func(v *TreePath) { glib.FinalizerStrategy(v.free) })\n\t}\n\n\treturn pathStart, pathEnd\n}", "func (m *Device) GetCommands() (val map[string]Command, set bool) {\n\tif m.Commands == nil {\n\t\treturn\n\t}\n\n\treturn *m.Commands, true\n}", "func (b *Bot) GetMyCommands() (result axon.A, err error) {\n\tvar response interface{}\n\tif response, err = b.doGet(\"getMyCommands\"); err == nil {\n\t\tresult = response.([]interface{})\n\t}\n\treturn\n}", "func Show() *cobra.Command {\n\t//黑名单列表打印出来\n\tcmd := &cobra.Command{\n\t\tUse: \"show\",\n\t\tShort: \"show all peer from blacklist\",\n\t\tRun: showblacklist,\n\t}\n\treturn cmd\n\n}", "func (c *Commands) All() []*Command {\n\treturn c.list\n}", "func TestHiddenCommandIsHidden(t *testing.T) {\n\tif cmdHidden.IsAvailableCommand() {\n\t\tt.Errorf(\"Hidden command found!\")\n\t}\n}", "func SubCommands() []cli.Command {\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"list-events\",\n\t\t\tUsage: \"Returns information about the events related to the account group.\",\n\t\t\tAction: cmd.EventList,\n\t\t},\n\t\t{\n\t\t\tName: \"list-system-events\",\n\t\t\tUsage: \"Returns information about system-wide events.\",\n\t\t\tAction: cmd.SysEventList,\n\t\t},\n\t}\n}", "func (bot *Bot) GetMyCommands() (cmds []Command, err error) {\n\t_, resBody, err := bot.client.Get(methodGetMyCommands, nil, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetMyCommands: %w\", err)\n\t}\n\n\tres := &response{\n\t\tResult: cmds,\n\t}\n\terr = res.unpack(resBody)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetMyCommands: %w\", err)\n\t}\n\n\treturn cmds, nil\n}", "func (commands Commands) Filter(names []string) Commands {\n\tvar filtered Commands\n\n\tfor _, cmd := range commands {\n\t\tfor _, name := range names {\n\t\t\tif cmd.HasName(name) {\n\t\t\t\tfiltered = append(filtered, cmd)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn filtered\n}", "func (d *DuinoDispatcher) GetCommands() (data []string) {\n\td.init()\n\n\tc := []string{\"CMSP\", \"gaga23\"}\n//\tc := []string{\"CSMP\"}\n\t//fmt.Printf(\"get Commands from Dispatcher called\\n\")\n\treturn c\n}", "func (o AppTemplateContainerOutput) Commands() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppTemplateContainer) []string { return v.Commands }).(pulumi.StringArrayOutput)\n}", "func (o GroupInitContainerOutput) Commands() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GroupInitContainer) []string { return v.Commands }).(pulumi.StringArrayOutput)\n}", "func ListCommands(params commands.ListCommandsParams, g *global.Global) middleware.Responder {\n\tnewDB := g.DB.Joins(\"inner join sessions on sessions.session_id = commands.session_id\")\n\tsince := time.Unix(*params.Since, 0)\n\tnewDB = newDB.Where(\"commands.created_at > ?\", since)\n\tif params.Content != nil && *params.Content != \"\" {\n\t\tnewDB = newDB.Where(\"commands.content LIKE ?\", *params.Content)\n\t}\n\tif params.AppName != nil && *params.AppName != \"\" {\n\t\tnewDB = newDB.Where(\"sessions.app_name LIKE ?\", *params.AppName)\n\t}\n\tif params.User != nil && *params.User != \"\" {\n\t\tnewDB = newDB.Where(\"sessions.user LIKE ?\", *params.User)\n\t}\n\tif params.SessionID != nil && *params.SessionID != 0 {\n\t\tnewDB = newDB.Where(\"commands.session_id = ?\", *params.SessionID)\n\t}\n\tvar dbCommands []models.Command\n\tnewDB.Order(\"commands.command_id desc\").Limit(*params.Limit).Offset(*params.Offset).Preload(\"Session\").Find(&dbCommands)\n\tpayload := make([]*swaggermodels.Command, len(dbCommands))\n\tfor i, dbCommand := range dbCommands {\n\t\tswaggerCommand := dbCommand.SwaggerModel()\n\t\tpayload[i] = &swaggerCommand\n\t}\n\treturn commands.NewListCommandsOK().WithPayload(payload)\n}", "func (ref *UIElement) VisibleRows() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleRowsAttribute)\n}", "func InfoCommands() map[string]Executor {\n return available.info\n}", "func (o GetAppTemplateContainerOutput) Commands() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainer) []string { return v.Commands }).(pulumi.StringArrayOutput)\n}", "func (c *Command) Commands() []*Command {\n\treturn c.commands\n}", "func SubCommands() []cli.Command {\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"templates\",\n\t\t\tUsage: \"Provides information about CAT\",\n\t\t\tSubcommands: append(templates.SubCommands()),\n\t\t},\n\t\t{\n\t\t\tName: \"deployments\",\n\t\t\tUsage: \"Provides information about CAT deployments\",\n\t\t\tSubcommands: append(deployments.SubCommands()),\n\t\t},\n\t}\n}", "func getCommands() []cli.Command {\n\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"create_room\",\n\t\t\tAliases: []string{\"cr\"},\n\t\t\tUsage: \"amity-golang create_room <room_name> <office|Livingspace>\",\n\t\t\tAction: createRoom,\n\t\t},\n\t\t{\n\t\t\tName: \"add_person\",\n\t\t\tAliases: []string{\"ap\"},\n\t\t\tUsage: \"amity-golang add_person <First_Name> <Last_Name> <fellow|staff> <office_Name> <livingSpace_Name>\",\n\t\t\tAction: addPerson,\n\t\t},\n\t\t{\n\t\t\tName: \"print_allocations\",\n\t\t\tAliases: []string{\"pa\"},\n\t\t\tUsage: \"amity-golang print_allocations\",\n\t\t\tAction: printAllocations,\n\t\t},\n\t\t{\n\t\t\tName: \"print_room\",\n\t\t\tAliases: []string{\"pr\"},\n\t\t\tUsage: \"amity-golang print_room <room_name>\",\n\t\t\tAction: printRoom,\n\t\t},\n\t\t{\n\t\t\tName: \"print_unallocated_people\",\n\t\t\tAliases: []string{\"pu\"},\n\t\t\tUsage: \"amity-golang print_unallocated_people\",\n\t\t\tAction: printUnallocatedPeople,\n\t\t},\n\t\t{\n\t\t\tName: \"reallocate_person\",\n\t\t\tAliases: []string{\"rp\"},\n\t\t\tUsage: \"amity-golang reallocate_person <First_Name> <Last_Name> <New_Room_Name>\",\n\t\t\tAction: reallocatePerson,\n\t\t},\n\t}\n}", "func TestHiddenCommandExecutes(t *testing.T) {\n\texecuted := false\n\tc := &Command{\n\t\tUse: \"c\",\n\t\tHidden: true,\n\t\tRun: func(*Command, []string) { executed = true },\n\t}\n\n\toutput, err := executeCommand(c)\n\tif output != \"\" {\n\t\tt.Errorf(\"Unexpected output: %v\", output)\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\n\tif !executed {\n\t\tt.Error(\"Hidden command should have been executed\")\n\t}\n}", "func CommandShowUnorganised(conf Config, ctx, query Query) error {\n\tif len(query.IDs) > 0 || query.HasOperators() {\n\t\treturn errors.New(\"query/context not used for show-unorganised\")\n\t}\n\n\tts, err := LoadTaskSet(conf.Repo, conf.IDsFile, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tts.FilterOrganised()\n\tts.DisplayByNext(ctx, true)\n\treturn nil\n}", "func (cluster *Cluster) GenerateCommandList(scope Scope, generator interface{}) []ShellCommand {\n\tcommands := []ShellCommand{}\n\tswitch generateCommand := generator.(type) {\n\tcase func(content int) []string:\n\t\tfor _, content := range cluster.ContentIDs {\n\t\t\tif content == -1 && scopeExcludesCoordinator(scope) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcommands = append(commands, NewShellCommand(scope, content, \"\", generateCommand(content)))\n\t\t}\n\tcase func(host string) []string:\n\t\tfor _, host := range cluster.Hostnames {\n\t\t\thostHasOneContent := len(cluster.GetContentsForHost(host)) == 1\n\t\t\tif host == cluster.GetHostForContent(-1, \"p\") && scopeExcludesCoordinator(scope) && hostHasOneContent {\n\t\t\t\t// Only exclude the coordinator host if there are no local segments\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif host == cluster.GetHostForContent(-1, \"m\") && scopeExcludesMirrors(scope) && hostHasOneContent {\n\t\t\t\t// Only exclude the standby coordinator host if there are no segments there\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcommands = append(commands, NewShellCommand(scope, -2, host, generateCommand(host)))\n\t\t}\n\tdefault:\n\t\tgplog.Fatal(nil, \"Generator function passed to GenerateCommandList had an invalid function header.\")\n\t}\n\treturn commands\n}", "func SubCommands() []cli.Command {\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"list\",\n\t\t\tUsage: \"Lists all existing node pools in a cluster\",\n\t\t\tAction: cmd.NodePoolList,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Cluster Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"show\",\n\t\t\tUsage: \"Shows information about the node pool identified by the given id\",\n\t\t\tAction: cmd.NodePoolShow,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Node pool Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"create\",\n\t\t\tUsage: \"Creates a new node pool\",\n\t\t\tAction: cmd.NodePoolCreate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Cluster Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tUsage: \"Logical name of the node pool\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"subnet-id\",\n\t\t\t\t\tUsage: \"Identifier of the subnet where this node pool is deployed\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"node-pool-plan-id\",\n\t\t\t\t\tUsage: \"Identifier of the node pool plan that this node pool is based\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"cpu-type\",\n\t\t\t\t\tUsage: \"Type of CPU each node of the node pools will have. \" +\n\t\t\t\t\t\t\"Can be nil only if the node pool plan does not have any cpu types\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"disk-size\",\n\t\t\t\t\tUsage: \"Size of the disk each node of the node pool will have, expressed in Gigabytes (GB)\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"min-nodes\",\n\t\t\t\t\tUsage: \"Minimum number of nodes the node pool will have\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"max-nodes\",\n\t\t\t\t\tUsage: \"Maximum number of nodes the node pool will have\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"desired-nodes\",\n\t\t\t\t\tUsage: \"Amount of nodes the node pool will tend to have if the node pool does not have autoscaling\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"pods-per-node\",\n\t\t\t\t\tUsage: \"Amount of pods each node of the node pool will have if the node pool plan supports it\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update\",\n\t\t\tUsage: \"Updates an existing node pool identified by the given id\",\n\t\t\tAction: cmd.NodePoolUpdate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Node pool Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tUsage: \"Logical name of the node pool\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"min-nodes\",\n\t\t\t\t\tUsage: \"Minimum number of nodes the node pool will have\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"max-nodes\",\n\t\t\t\t\tUsage: \"Maximum number of nodes the node pool will have\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"desired-nodes\",\n\t\t\t\t\tUsage: \"Amount of nodes the node pool will tend to have if the node pool does not have autoscaling\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"delete\",\n\t\t\tUsage: \"Deletes a node pool\",\n\t\t\tAction: cmd.NodePoolDelete,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Node pool Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"retry\",\n\t\t\tUsage: \"Retries the application of node pool identified by the given id\",\n\t\t\tAction: cmd.NodePoolRetry,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Node pool Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"show-plan\",\n\t\t\tUsage: \"Shows information about a specific node pool plan identified by the given id\",\n\t\t\tAction: cmd.NodePoolPlanShow,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Node pool plan Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (o AccessPackageCatalogOutput) ExternallyVisible() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *AccessPackageCatalog) pulumi.BoolPtrOutput { return v.ExternallyVisible }).(pulumi.BoolPtrOutput)\n}", "func (w *DebugModule) Commands() []Command {\n\treturn []Command{\n\t\t&echoCommand{},\n\t\t&echoEmbedCommand{},\n\t\t&disableCommand{},\n\t\t&enableCommand{},\n\t\t&updateCommand{},\n\t\t&dumpTablesCommand{},\n\t\t&listGuildsCommand{},\n\t\t&announceCommand{},\n\t\t&removeAliasCommand{},\n\t\t&getAuditCommand{},\n\t\t&setProfileCommand{},\n\t}\n}", "func Commands() []*cobra.Command {\n\treturn rootCmd.Commands()\n}", "func (w *DebugModule) Commands() []Command {\n\treturn []Command{\n\t\t&echoCommand{},\n\t\t&echoEmbedCommand{},\n\t\t&disableCommand{},\n\t\t&enableCommand{},\n\t\t&updateCommand{},\n\t\t&dumpTablesCommand{},\n\t\t&listGuildsCommand{},\n\t\t&announceCommand{},\n\t\t&removeAliasCommand{},\n\t\t&getAuditCommand{},\n\t}\n}", "func GetSubCommands() []*cobra.Command {\n\n\tvar getproject = &cobra.Command{\n\t\tUse: \"project\",\n\t\tRunE: actionGetProjectConfig,\n\t}\n\n\tvar getprojects = &cobra.Command{\n\t\tUse: \"projects\",\n\t\tRunE: actionGetProjectConfig,\n\t}\n\n\treturn []*cobra.Command{getproject, getprojects}\n}", "func Visible(m Map, station Coord, res chan<- VisibleResponse) {\n\tvisible := make([]Coord, 0)\n\tomap := m.Translate(station)\n\n\t// Remove the origin\n\tomap = omap.Without(Coord{0, 0})\n\n\t// Sort the map by clockwise angle\n\tsort.Sort(byClockwiseAngle(omap))\n\n\t// Loop through the map by clockwise angle\n\tfor _, ast := range omap {\n\t\t// Get all of the asteroids at the current clockwise angle, sort them\n\t\t// by distance, and choose the first (closest) one.\n\t\tmatches := omap.AsteroidsAtClockwiseAngle(ast.ClockwiseAngle())\n\t\tsort.Sort(byDistance(matches))\n\n\t\t// Append if it doesn't already exist\n\t\texists := false\n\t\tfor _, v := range visible {\n\t\t\tif v == matches[0] {\n\t\t\t\texists = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exists {\n\t\t\tvisible = append(visible, matches[0])\n\t\t}\n\t}\n\n\tres <- VisibleResponse{station, Map(visible).Translate(station.Negative())}\n}", "func (c *Client) listCommands(ctx context.Context, opts []string) (*Response, error) {\n\tres := &Response{\n\t\tColumns: []string{\"Command\", \"Description\"},\n\t}\n\tfor i, v := range c.commands {\n\t\titem := []interface{}{\n\t\t\tv,\n\t\t\tc.descriptions[i],\n\t\t}\n\t\tres.Result = append(res.Result, item)\n\t}\n\treturn res, nil\n}", "func (m *Image) ValidCommands() {\n\t//normarize command\n\tfor cmd := range m.Commands {\n\t\tif !CheckCmdName(cmd) {\n\t\t\tfmt.Println(\"Invalid command. Removed:\", cmd)\n\t\t\tdelete(m.Commands, cmd)\n\t\t}\n\t}\n\treturn\n}", "func DaemonizedCommands() map[string]Executor {\n return available.daemonized\n}", "func GetSubCommands() []*cobra.Command {\n\n\tvar getrule = &cobra.Command{\n\t\tUse: \"db-rule\",\n\t\tRunE: actionGetDbRules,\n\t}\n\n\tvar getconfig = &cobra.Command{\n\t\tUse: \"db-config\",\n\t\tRunE: actionGetDbConfig,\n\t}\n\n\tvar getschema = &cobra.Command{\n\t\tUse: \"db-schema\",\n\t\tRunE: actionGetDbSchema,\n\t}\n\n\tvar getrules = &cobra.Command{\n\t\tUse: \"db-rules\",\n\t\tRunE: actionGetDbRules,\n\t}\n\n\tvar getconfigs = &cobra.Command{\n\t\tUse: \"db-configs\",\n\t\tRunE: actionGetDbConfig,\n\t}\n\n\tvar getschemas = &cobra.Command{\n\t\tUse: \"db-schemas\",\n\t\tRunE: actionGetDbSchema,\n\t}\n\n\treturn []*cobra.Command{getrule, getconfig, getschema, getrules, getconfigs, getschemas}\n}", "func GetHidden() (tags []Type, err error) {\n\ttags = make([]Type, 0)\n\n\t_, err = mongo.Find(\"blotter\", \"tags\", bson.M{\n\t\t\"hide\": true,\n\t}, nil, &tags)\n\n\tif err != nil {\n\t\treturn\n\t}\n\treturn tags, nil\n}", "func (me TxsdPresentationAttributesViewportsOverflow) IsHidden() bool { return me.String() == \"hidden\" }", "func (s *tsLibrary) Visibility() []string {\n\treturn s.RuleConfig.GetVisibility()\n}", "func Get() []cli.Command {\n\treturn []cli.Command{\n\t\tsetMetadata,\n\t\tupdateMetadata,\n\t\tgetMetadata,\n\t\tdeleteMetadata,\n\t}\n}", "func (c *Compatibility) GetValidCommands() []string {\n\treturn []string{\n\t\t\"apply\",\n\t\t\"destroy\",\n\t\t\"env\",\n\t\t\"get\",\n\t\t\"graph\",\n\t\t\"import\",\n\t\t\"init\",\n\t\t\"output\",\n\t\t\"plan\",\n\t\t\"providers\",\n\t\t\"refresh\",\n\t\t\"show\",\n\t\t\"taint\",\n\t\t\"untaint\",\n\t\t\"workspace\",\n\t\t\"force-unlock\",\n\t\t\"state\",\n\t}\n}", "func (ws *Workspace) Visible() bool {\n\treturn ws.visible\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisibleOk() (*bool, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Visible, true\n}", "func SubCommands() []cli.Command {\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"list\",\n\t\t\tUsage: \"Lists the domains of the account group.\",\n\t\t\tAction: cmd.DomainList,\n\t\t},\n\t\t{\n\t\t\tName: \"show\",\n\t\t\tUsage: \"Shows information about a specific domain.\",\n\t\t\tAction: cmd.DomainShow,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Domain Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"create\",\n\t\t\tUsage: \"Creates a new domain.\",\n\t\t\tAction: cmd.DomainCreate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tUsage: \"Fully-qualified domain name (FQDN)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"ttl\",\n\t\t\t\t\tUsage: \"Time to live (TTL) of the Start of Authority (SOA) record\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"contact\",\n\t\t\t\t\tUsage: \"Contact e-mail\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"minimum\",\n\t\t\t\t\tUsage: \"The minimum TTL of the SOA record\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update\",\n\t\t\tUsage: \"Updates an existing domain\",\n\t\t\tAction: cmd.DomainUpdate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Domain Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"ttl\",\n\t\t\t\t\tUsage: \"Time to live (TTL) of the Start of Authority (SOA) record\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"contact\",\n\t\t\t\t\tUsage: \"Contact e-mail\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"minimum\",\n\t\t\t\t\tUsage: \"The minimum TTL of the SOA record\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"delete\",\n\t\t\tUsage: \"Deletes a domain\",\n\t\t\tAction: cmd.DomainDelete,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Domain Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"list_domain_records\",\n\t\t\tUsage: \"Lists the DNS records of a domain.\",\n\t\t\tAction: cmd.DomainRecordList,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"domain_id\",\n\t\t\t\t\tUsage: \"Domain Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"show_domain_record\",\n\t\t\tUsage: \"Shows information about a specific DNS record.\",\n\t\t\tAction: cmd.DomainRecordShow,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"domain_id\",\n\t\t\t\t\tUsage: \"Domain Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Record Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"create_domain_record\",\n\t\t\tUsage: \"Creates a new DNS record.\",\n\t\t\tAction: cmd.DomainRecordCreate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"domain_id\",\n\t\t\t\t\tUsage: \"Domain Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"type\",\n\t\t\t\t\tUsage: \"Type of record (A, AAAA, CNAME, MX, TXT)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tUsage: \"Record name\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"content\",\n\t\t\t\t\tUsage: \"Record content\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"ttl\",\n\t\t\t\t\tUsage: \"Time to live (TTL)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"prio\",\n\t\t\t\t\tUsage: \"Priority (only MX records)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"server_id\",\n\t\t\t\t\tUsage: \"Identifier of the associated server (only A and AAAA records)\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update_domain_record\",\n\t\t\tUsage: \"Updates an existing DNS record.\",\n\t\t\tAction: cmd.DomainRecordUpdate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"domain_id\",\n\t\t\t\t\tUsage: \"Domain Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Record Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tUsage: \"Record name\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"content\",\n\t\t\t\t\tUsage: \"Record content\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"ttl\",\n\t\t\t\t\tUsage: \"Time to live (TTL)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"prio\",\n\t\t\t\t\tUsage: \"Priority (only MX records)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"server_id\",\n\t\t\t\t\tUsage: \"Identifier of the associated server (only A and AAAA records)\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"delete_domain_record\",\n\t\t\tUsage: \"Deletes a DNS record\",\n\t\t\tAction: cmd.DomainRecordDelete,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Record Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"domain_id\",\n\t\t\t\t\tUsage: \"Record Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (b *BotInfo) GetCommands() (value []BotCommand) {\n\tif b == nil {\n\t\treturn\n\t}\n\treturn b.Commands\n}", "func (e Empty) Visible(v bool) { e.visible = v }", "func FilterHiddenServices(services []occlient.Service) []occlient.Service {\n\tvar filteredServices []occlient.Service\n\tfor _, service := range services {\n\t\tif !service.Hidden {\n\t\t\tfilteredServices = append(filteredServices, service)\n\t\t}\n\t}\n\treturn filteredServices\n}", "func (mw *MapWidget) Visible() bool {\n\treturn !mw.hidden\n}", "func (c *Command) HideFlags(flags ...string) {\n\tfor _, f := range flags {\n\t\tc.hidden.String(f, \"\", \"\")\n\t}\n}", "func GetMetadataGetAllCmd() *cobra.Command {\n\tnonLetterRegex := regexp.MustCompile(\"[^[:alpha:]]+\")\n\tcmd := &cobra.Command{\n\t\tUse: \"all {scopes|sessions|records|scopespecs|contractspecs|recordspecs|locators}\",\n\t\tAliases: []string{\"a\"},\n\t\tShort: \"Get all entries of a certain type\",\n\t\tLong: fmt.Sprintf(`%[1]s all scopes - gets all scopes.\n%[1]s all sessions - gets all sessions.\n%[1]s all records - gets all records.\n%[1]s all scopespecs - gets all scope specifications.\n%[1]s all contractspecs - gets all contract specifications.\n%[1]s all recordspecs - gets all record specifications.\n%[1]s all locators - gets all object store locators.`, cmdStart),\n\t\tExample: fmt.Sprintf(`%[1]s all scopes\n%[1]s all sessions\n%[1]s all records\n%[1]s all scopespecs\n%[1]s all contractspecs\n%[1]s all recordspecs\n%[1]s all locators`, cmdStart),\n\t\tArgs: cobra.MinimumNArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\t// Smash all the args together. We only want a single \"word\" anyway.\n\t\t\tinput := strings.ToLower(trimSpaceAndJoin(args, \"\"))\n\t\t\t// Get rid of non-letters\n\t\t\t// This simplifies the switch below, e.g. record-specs becomes recordspecs.\n\t\t\tinput = nonLetterRegex.ReplaceAllString(input, \"\")\n\t\t\t// Make sure it ends with an \"s\"\n\t\t\t// This simplifies the switch below, e.g. \"scope\" becomes \"scopes\"\n\t\t\tif input[len(input)-1:] != \"s\" {\n\t\t\t\tinput += \"s\"\n\t\t\t}\n\t\t\tswitch input {\n\t\t\tcase \"scopes\":\n\t\t\t\treturn outputScopesAll(cmd)\n\t\t\tcase \"sessions\", \"sess\":\n\t\t\t\treturn outputSessionsAll(cmd)\n\t\t\tcase \"records\", \"recs\":\n\t\t\t\treturn outputRecordsAll(cmd)\n\t\t\tcase \"scopespecs\", \"scopespecifications\":\n\t\t\t\treturn outputScopeSpecsAll(cmd)\n\t\t\tcase \"contractspecs\", \"cspecs\", \"contractspecifications\":\n\t\t\t\treturn outputContractSpecsAll(cmd)\n\t\t\tcase \"recordspecs\", \"recspecs\", \"recordspecifications\", \"recspecifications\":\n\t\t\t\treturn outputRecordSpecsAll(cmd)\n\t\t\tcase \"locators\", \"locs\":\n\t\t\t\treturn outputOSLocatorsAll(cmd)\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unknown entry type: %s\", input)\n\t\t},\n\t}\n\n\taddIncludeRequestFlag(cmd)\n\tflags.AddQueryFlagsToCmd(cmd)\n\tflags.AddPaginationFlagsToCmd(cmd, \"entries\")\n\n\treturn cmd\n}", "func ListCommands() {\n\tfmt.Fprintln(config.ErrOut, \"Commands:\")\n\tpadLen := 0\n\tfor _, cmd := range config.CommandList {\n\t\tif len(cmd.Name) > padLen {\n\t\t\tpadLen = len(cmd.Name)\n\t\t}\n\t}\n\tfor _, cmd := range config.CommandList {\n\t\tfmt.Fprintf(config.ErrOut, \" %s%s %s\\n\", cmd.Name, strings.Repeat(\" \", padLen-len(cmd.Name)), cmd.Title)\n\t}\n\tpad := strings.Repeat(\" \", len(config.Me)-1)\n\trunfileOpt := \"\"\n\tif config.EnableRunfileOverride {\n\t\trunfileOpt = \"[-r runfile] \"\n\t}\n\tfmt.Fprintf(config.ErrOut, \"Usage:\\n\")\n\tfmt.Fprintf(config.ErrOut, \" %s %shelp <command>\\n\", config.Me, runfileOpt)\n\tfmt.Fprintf(config.ErrOut, \" %s (show help for <command>)\\n\", pad)\n\tfmt.Fprintf(config.ErrOut, \" or %s %s<command> [option ...]\\n\", config.Me, runfileOpt)\n\tfmt.Fprintf(config.ErrOut, \" %s (run <command>)\\n\", pad)\n}", "func Get() []cli.Command {\n\treturn []cli.Command{\n\t\tabandon,\n\t\tadopt,\n\t\tcreate,\n\t\tget,\n\t\tgetTemplate,\n\t\tlist,\n\t\tlistEvents,\n\t\tpreview,\n\t\tremove,\n\t\tupdate,\n\t}\n}", "func (handler commandHandler) getCmds() cmdMap {\n\treturn handler.Cmds\n}", "func (handler commandHandler) getCmds() cmdMap {\n\treturn handler.Cmds\n}", "func (e *AliasExecutor) Commands() map[command.Verb]CommandFn {\n\treturn map[command.Verb]CommandFn{\n\t\tcommand.ListVerb: e.List,\n\t}\n}", "func (c *Circuit) ConcurrentCommands() int64 {\n\treturn c.concurrentCommands.Get()\n}", "func (m *TestMod) Commands() map[string]*base.ModCommand {\n\treturn m.commands\n}", "func (s *Sensor) Commands() ([]string, error) {\n\treturn stringSliceFrom(attributeOf(s, commands))\n}", "func GetCmdQueryActives(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: budget.QueryActiveList,\n\t\tShort: \"Query active programs\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\tres, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, budget.QueryActiveList), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar actives []budget.Program\n\t\t\tcdc.MustUnmarshalJSON(res, &actives)\n\n\t\t\tif len(actives) == 0 {\n\t\t\t\tfmt.Println(\"No active Programs found\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfor _, program := range actives {\n\t\t\t\tfmt.Println(program.String())\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t}\n\n\treturn cmd\n}", "func GetCommands() []Command {\n\treturn []Command{\n\t\t{\n\t\t\tActivation: \"quote\",\n\t\t\tAction: Quote,\n\t\t},\n\t\t{\n\t\t\tActivation: \"site\",\n\t\t\tAction: Site,\n\t\t},\n\t}\n}", "func (outer outer) Visible() bool {\r\n\treturn false\r\n}", "func ViewAllTasksCommand(db *sql.DB) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"view\",\n\t\tShort: \"view all tasks\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tresults, err := db.Query(\"SELECT * from task\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor results.Next() {\n\t\t\t\tvar task Task\n\t\t\t\terr = results.Scan(&task.Id, &task.Content, &task.Marked)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"%d. %s (%v)\\n\", task.Id, task.Content, task.Marked)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func IsCommandSet() PredicateFunc {\n\treturn func(v *VolumeGetProperty) bool {\n\t\treturn len(v.Command) != 0\n\t}\n}", "func printAvailableCommands() error {\n\n\t// Get available commands found in run.yaml\n\tavailableCommands, err := getAvailableCommands()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Print name of each command\n\tfmt.Println(\"\\nAvailable commands:\")\n\tfor c := range availableCommands {\n\t\tfmt.Printf(\"- %s\\n\", c)\n\t}\n\tfmt.Println()\n\n\t// No errors\n\treturn nil\n}", "func PrintCommands() {\n logger.Log(fmt.Sprintln(\"** Daemonized Commands **\"))\n for cmd, desc := range DaemonizedCommands() {\n logger.Log(fmt.Sprintf(\"%15s: %s\\n\", cmd, desc.description))\n }\n\n logger.Log(fmt.Sprintln(\"** Information Commands **\"))\n for cmd, desc := range InfoCommands() {\n logger.Log(fmt.Sprintf(\"%15s: %s\\n\", cmd, desc.description))\n }\n\n logger.Log(fmt.Sprintln(\"** Interactive Commands **\"))\n for cmd, desc := range InteractiveCommands() {\n logger.Log(fmt.Sprintf(\"%15s: %s\\n\", cmd, desc.description))\n }\n}", "func (c *Command) commandNames(includeRoot bool) []string {\n\tvar commands []string\n\tcmd := c.cobra\n\troot := cmd.Root()\n\tfor {\n\t\tif cmd == nil || (cmd == root && !includeRoot) {\n\t\t\tbreak\n\t\t}\n\t\tcommands = append(commands, cmd.Name())\n\t\tcmd = cmd.Parent()\n\t}\n\n\t// reverse commands\n\tfor i, j := 0, len(commands)-1; i < j; i, j = i+1, j-1 {\n\t\tcommands[i], commands[j] = commands[j], commands[i]\n\t}\n\n\treturn commands\n}", "func (me TxsdPresentationAttributesViewportsOverflow) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func SubCommands() []cli.Command {\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"list\",\n\t\t\tUsage: \"Lists all available templates\",\n\t\t\tAction: cmd.TemplateList,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"labels\",\n\t\t\t\t\tUsage: \"A list of comma separated label as a query filter\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"show\",\n\t\t\tUsage: \"Shows information about a specific template\",\n\t\t\tAction: cmd.TemplateShow,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"create\",\n\t\t\tUsage: \"Creates a new template.\",\n\t\t\tAction: cmd.TemplateCreate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tUsage: \"Name of the template\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"generic-image-id\",\n\t\t\t\t\tUsage: \"Identifier of the OS image that the template builds on\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"run-list\",\n\t\t\t\t\tUsage: \"A list of comma separated cookbook recipes that is run on the servers at start-up, i.e: --run-list imco::client,1password,joomla\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"cookbook-versions\",\n\t\t\t\t\tUsage: \"The cookbook versions used to configure the service recipes in the run-list, i.e: --cookbook-versions \\\"imco:3.0.3,1password~>1.3.0,joomla:0.11.0\\\" \\n\\tCookbook version format: [NAME<OPERATOR>VERSION] \\n\\tSupported Operators:\\n\\t\\tChef supermarket cookbook '~>','=','>=','>','<','<='\\n\\t\\tUploaded cookbook ':'\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"configuration-attributes\",\n\t\t\t\t\tUsage: \"The attributes used to configure the service recipes in the run-list, as a json formatted parameter. i.e: --configuration-attributes '{\\\"joomla\\\":{\\\"db\\\":{\\\"password\\\":\\\"my_pass\\\"},\\\"port\\\":\\\"8080\\\"}}'\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"configuration-attributes-from-file\",\n\t\t\t\t\tUsage: \"The attributes used to configure the service recipes in the run-list, from file or STDIN, as a json formatted parameter. \\n\\tFrom file: --configuration-attributes-from-file attrs.json \\n\\tFrom STDIN: --configuration-attributes-from-file -\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"labels\",\n\t\t\t\t\tUsage: \"A list of comma separated label names to be associated with template\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update\",\n\t\t\tUsage: \"Updates an existing template\",\n\t\t\tAction: cmd.TemplateUpdate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tUsage: \"Name of the template\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"run-list\",\n\t\t\t\t\tUsage: \"A list of comma separated cookbook recipes that is run on the servers at start-up, i.e: --run-list imco::client,1password,joomla\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"cookbook-versions\",\n\t\t\t\t\tUsage: \"The cookbook versions used to configure the service recipes in the run-list, i.e: --cookbook-versions \\\"imco:3.0.3,1password~>1.3.0,joomla:0.11.0\\\" \\n\\tCookbook version format: [NAME<OPERATOR>VERSION] \\n\\tSupported Operators:\\n\\t\\tChef supermarket cookbook '~>','=','>=','>','<','<='\\n\\t\\tUploaded cookbook ':'\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"configuration-attributes\",\n\t\t\t\t\tUsage: \"The attributes used to configure the service recipes in the run-list, as a json formatted parameter. i.e: --configuration-attributes '{\\\"joomla\\\":{\\\"db\\\":{\\\"password\\\":\\\"my_pass\\\"},\\\"port\\\":\\\"8080\\\"}}'\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"configuration-attributes-from-file\",\n\t\t\t\t\tUsage: \"The attributes used to configure the service recipes in the run-list, from file or STDIN, as a json formatted parameter. \\n\\tFrom file: --configuration-attributes-from-file attrs.json \\n\\tFrom STDIN: --configuration-attributes-from-file -\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"compile\",\n\t\t\tUsage: \"Compiles an existing template\",\n\t\t\tAction: cmd.TemplateCompile,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"delete\",\n\t\t\tUsage: \"Deletes a template\",\n\t\t\tAction: cmd.TemplateDelete,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"list-template-scripts\",\n\t\t\tUsage: \"Shows the script characterisations of a template\",\n\t\t\tAction: cmd.TemplateScriptList,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"template-id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"type\",\n\t\t\t\t\tUsage: \"Must be \\\"operational\\\", \\\"boot\\\" or \\\"shutdown\\\"\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"show-template-script\",\n\t\t\tUsage: \"Shows information about a specific script characterisation\",\n\t\t\tAction: cmd.TemplateScriptShow,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"template-id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Script Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"create-template-script\",\n\t\t\tUsage: \"Creates a new script characterisation for a template and appends it to the list of script characterisations of the same type.\",\n\t\t\tAction: cmd.TemplateScriptCreate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"template-id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"type\",\n\t\t\t\t\tUsage: \"Must be \\\"operational\\\", \\\"boot\\\" or \\\"shutdown\\\"\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"script-id\",\n\t\t\t\t\tUsage: \"Identifier for the script that is parameterised by the script characterisation\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"parameter-values\",\n\t\t\t\t\tUsage: \"A map that assigns a value to each script parameter, as a json formatted parameter; i.e: '{\\\"param1\\\":\\\"val1\\\",\\\"param2\\\":\\\"val2\\\"}'\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"parameter-values-from-file\",\n\t\t\t\t\tUsage: \"A map that assigns a value to each script parameter, from file or STDIN, as a json formatted parameter. \\n\\tFrom file: --parameter-values-from-file params.json \\n\\tFrom STDIN: --parameter-values-from-file -\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update-template-script\",\n\t\t\tUsage: \"Updates an existing script characterisation for a template.\",\n\t\t\tAction: cmd.TemplateScriptUpdate,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"template-id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Identifier for the template-script that is parameterised by the script characterisation\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"parameter-values\",\n\t\t\t\t\tUsage: \"A map that assigns a value to each script parameter, as a json formatted parameter; i.e: '{\\\"param1\\\":\\\"val1\\\",\\\"param2\\\":\\\"val2\\\"}'\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"parameter-values-from-file\",\n\t\t\t\t\tUsage: \"A map that assigns a value to each script parameter, from file or STDIN, as a json formatted parameter. \\n\\tFrom file: --parameter-values-from-file params.json \\n\\tFrom STDIN: --parameter-values-from-file -\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"reorder-template-scripts\",\n\t\t\tUsage: \"Reorders the scripts of the template and type specified according to the provided order, changing their execution order as corresponds.\",\n\t\t\tAction: cmd.TemplateScriptReorder,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"template-id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"type\",\n\t\t\t\t\tUsage: \"Must be \\\"operational\\\", \\\"boot\\\", or \\\"shutdown\\\"\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"script-ids\",\n\t\t\t\t\tUsage: \"A list of comma separated scripts ids that must contain all the ids of scripts of the given template and type in the desired execution order\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"delete-template-script\",\n\t\t\tUsage: \"Removes a parametrized script from a template\",\n\t\t\tAction: cmd.TemplateScriptDelete,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"template-id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Identifier for the template-script that is parameterised by the script characterisation\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"list-template-servers\",\n\t\t\tUsage: \"Returns information about the servers that use a specific template. \",\n\t\t\tAction: cmd.TemplateServersList,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"template-id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"add-label\",\n\t\t\tUsage: \"This action assigns a single label from a single labelable resource\",\n\t\t\tAction: cmd.LabelAdd,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"label\",\n\t\t\t\t\tUsage: \"Label name\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"resource-type\",\n\t\t\t\t\tUsage: \"Resource Type\",\n\t\t\t\t\tValue: \"template\",\n\t\t\t\t\tHidden: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"remove-label\",\n\t\t\tUsage: \"This action unassigns a single label from a single labelable resource\",\n\t\t\tAction: cmd.LabelRemove,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"id\",\n\t\t\t\t\tUsage: \"Template Id\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"label\",\n\t\t\t\t\tUsage: \"Label name\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"resource-type\",\n\t\t\t\t\tUsage: \"Resource Type\",\n\t\t\t\t\tValue: \"template\",\n\t\t\t\t\tHidden: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (mc ModuleClient) GetQueryCmd() *cobra.Command {\n\tstakingQueryCmd := &cobra.Command{\n\t\tUse: types.ModuleName,\n\t\tShort: \"Querying commands for the staking module\",\n\t}\n\n\tstakingQueryCmd.AddCommand(client.GetCommands(\n\t\tstakingcli.GetCmdQueryDelegation(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryDelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryUnbondingDelegation(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryUnbondingDelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryRedelegation(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryRedelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidator(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidators(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidatorDelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidatorUnbondingDelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryValidatorRedelegations(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryParams(mc.storeKey, mc.cdc),\n\t\tstakingcli.GetCmdQueryPool(mc.storeKey, mc.cdc))...)\n\n\treturn stakingQueryCmd\n\n}", "func (v *Filter) ToggleVisible() error {\n\t// delete all user input from the tree view\n\tv.view.Clear()\n\n\t// toggle hiding\n\tv.hidden = !v.hidden\n\n\tif !v.hidden {\n\t\t_, err := v.gui.SetCurrentView(v.name)\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"unable to toggle filter view: \", err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t// reset the cursor for the next time it is visible\n\t// Note: there is a subtle gocui behavior here where this cannot be called when the view\n\t// is newly visible. Is this a problem with dive or gocui?\n\treturn v.view.SetCursor(0, 0)\n}", "func (m *MarketplaceCmd) HideUsage() bool {\n\treturn true\n}", "func SubCommands() []cli.Command {\n\treturn []cli.Command{\n\t\t{\n\t\t\tName: \"export\",\n\t\t\tUsage: \"Exports infrastructure file from IMCO\",\n\t\t\tAction: cmd.TemporaryArchiveExport,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"server-ids\",\n\t\t\t\t\tUsage: \"A list of comma separated server identifiers to be exported\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"server-array-ids\",\n\t\t\t\t\tUsage: \"A list of comma separated server array identifiers to be exported\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"filepath\",\n\t\t\t\t\tUsage: \"Path and file name to download infrastructure 'csar' file, i.e: --filename /folder-path/filename.csar\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"time, t\",\n\t\t\t\t\tUsage: \"Time lapse -seconds- for export status check\",\n\t\t\t\t\tValue: cmd.DefaultTimeLapseExportStatusCheck,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"import\",\n\t\t\tUsage: \"Imports infrastructure file on IMCO\",\n\t\t\tAction: cmd.TemporaryArchiveImport,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"filepath\",\n\t\t\t\t\tUsage: \"Path and file name to infrastructure 'csar' file\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"label\",\n\t\t\t\t\tUsage: \"New label name to be associated with infrastructure\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"time, t\",\n\t\t\t\t\tUsage: \"Time lapse -seconds- for import status check\",\n\t\t\t\t\tValue: cmd.DefaultTimeLapseImportStatusCheck,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func GetCommands(cfg *config.Config) cli.Commands {\n\treturn []*cli.Command{\n\t\t// start this service\n\t\tServer(cfg),\n\n\t\t// interaction with this service\n\n\t\t// infos about this service\n\t\tHealth(cfg),\n\t\tVersion(cfg),\n\t}\n}" ]
[ "0.7514344", "0.7090503", "0.6987068", "0.6040602", "0.5923907", "0.57619125", "0.5538975", "0.5527406", "0.54518765", "0.54171145", "0.5409744", "0.53527737", "0.5350194", "0.53464735", "0.5335486", "0.5310782", "0.52715427", "0.5269247", "0.5267125", "0.5234093", "0.5224237", "0.5199841", "0.51996815", "0.5187415", "0.51865155", "0.5174202", "0.51678777", "0.5164847", "0.5158292", "0.51570624", "0.51528025", "0.5139714", "0.5126389", "0.5113141", "0.5084568", "0.5077496", "0.50771266", "0.50411546", "0.5031199", "0.5022633", "0.5010132", "0.5009485", "0.4997814", "0.49960637", "0.49954405", "0.4972681", "0.49643376", "0.49559465", "0.49504933", "0.49480492", "0.4928618", "0.49165353", "0.49137226", "0.48852852", "0.48822778", "0.48683402", "0.48652825", "0.4841793", "0.48404664", "0.48322657", "0.48245996", "0.4821542", "0.48173037", "0.48114234", "0.48054957", "0.47866258", "0.47857073", "0.4783538", "0.47729674", "0.47703028", "0.47570488", "0.47549352", "0.4728368", "0.47202933", "0.47149992", "0.4705444", "0.4704426", "0.46986163", "0.4690127", "0.46799573", "0.46799573", "0.46758527", "0.46731994", "0.46709654", "0.4670043", "0.46680284", "0.4651408", "0.46498573", "0.46446797", "0.4640099", "0.46395037", "0.46090496", "0.46084148", "0.46054447", "0.460525", "0.4595919", "0.45915622", "0.4583782", "0.45804414", "0.45716706" ]
0.8256026
0
VisibleFlagCategories returns a slice containing all the visible flag categories with the flags they contain
func (cmd *Command) VisibleFlagCategories() []VisibleFlagCategory { if cmd.flagCategories == nil { cmd.flagCategories = newFlagCategoriesFromFlags(cmd.Flags) } return cmd.flagCategories.VisibleCategories() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cmd *Command) VisibleCategories() []CommandCategory {\n\tret := []CommandCategory{}\n\tfor _, category := range cmd.categories.Categories() {\n\t\tif visible := func() CommandCategory {\n\t\t\tif len(category.VisibleCommands()) > 0 {\n\t\t\t\treturn category\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); visible != nil {\n\t\t\tret = append(ret, visible)\n\t\t}\n\t}\n\treturn ret\n}", "func (c Command) VisibleFlags() []Flag {\n\treturn visibleFlags(c.Flags)\n}", "func (cmd *Command) VisibleFlags() []Flag {\n\treturn visibleFlags(cmd.Flags)\n}", "func VisibleChatConversationStatuses() (res []chat1.ConversationStatus) {\n\tres = make([]chat1.ConversationStatus, 0, len(chat1.ConversationStatusMap))\n\tfor _, s := range chat1.ConversationStatusMap {\n\t\tif GetConversationStatusBehavior(s).ShowInInbox {\n\t\t\tres = append(res, s)\n\t\t}\n\t}\n\tsort.Sort(byConversationStatus(res))\n\treturn\n}", "func (ref *UIElement) VisibleCells() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleCellsAttribute)\n}", "func (ref *UIElement) VisibleColumns() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleColumnsAttribute)\n}", "func filterCategory(flags []cli.Flag, category string) (ret []cli.Flag) {\n\tfor _, f := range flags {\n\t\tif flagCategories[f.GetName()] == category {\n\t\t\tret = append(ret, f)\n\t\t}\n\t}\n\treturn\n}", "func (ref *UIElement) VisibleChildren() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleChildrenAttribute)\n}", "func (t *tagStorage) QueryCategories() ([]model.CategoryInfo, error) {\n\tvar cs []model.CategoryInfo\n\n\terr := t.db.Table(model.CateTableName()).Where(\"del_flag = ?\", 0).Find(&cs)\n\treturn cs, err\n}", "func (ref *UIElement) VisibleRows() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleRowsAttribute)\n}", "func (o OceanFiltersPtrOutput) Categories() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *OceanFilters) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Categories\n\t}).(pulumi.StringArrayOutput)\n}", "func (m *EducationAssignment) GetCategories()([]EducationCategoryable) {\n return m.categories\n}", "func (j *DSRocketchat) Categories() map[string]struct{} {\n\treturn RocketchatCategories\n}", "func (j *DSGit) Categories() map[string]struct{} {\n\treturn GitCategories\n}", "func filterContainers(filterMap map[string]string, conts []types.Container) []types.Container {\n\tvar res []types.Container\n\n\tfor _, cont := range conts {\n\t\tmatchesAll := true\n\t\tfor k, v := range filterMap {\n\t\t\tif iv, ok := cont.Labels[k]; !ok || v != iv {\n\t\t\t\tmatchesAll = false\n\t\t\t}\n\t\t}\n\t\tif matchesAll {\n\t\t\tres = append(res, cont)\n\t\t}\n\t}\n\n\treturn res\n}", "func (o OceanFiltersOutput) Categories() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v OceanFilters) []string { return v.Categories }).(pulumi.StringArrayOutput)\n}", "func (f *FlagBase[T, C, V]) IsVisible() bool {\n\treturn !f.Hidden\n}", "func (cmd *Command) VisibleCommands() []*Command {\n\tvar ret []*Command\n\tfor _, command := range cmd.Commands {\n\t\tif !command.Hidden {\n\t\t\tret = append(ret, command)\n\t\t}\n\t}\n\treturn ret\n}", "func (Visibility) Values() []Visibility {\n\treturn []Visibility{\n\t\t\"IGNORED\",\n\t\t\"VISIBLE\",\n\t}\n}", "func (v *IconView) GetVisibleRange() (*TreePath, *TreePath) {\n\tvar (\n\t\tcpathStart, cpathEnd *C.GtkTreePath\n\t\tpathStart, pathEnd *TreePath\n\t)\n\n\tC.gtk_icon_view_get_visible_range(v.native(), &cpathStart, &cpathEnd)\n\n\tif cpathStart != nil {\n\t\tpathStart = &TreePath{cpathStart}\n\t\truntime.SetFinalizer(pathStart, func(v *TreePath) { glib.FinalizerStrategy(v.free) })\n\t}\n\n\tif cpathEnd != nil {\n\t\tpathEnd = &TreePath{cpathEnd}\n\t\truntime.SetFinalizer(pathEnd, func(v *TreePath) { glib.FinalizerStrategy(v.free) })\n\t}\n\n\treturn pathStart, pathEnd\n}", "func (g *Game) Categories(filter *CategoryFilter, sort *Sorting, embeds string) (*CategoryCollection, *Error) {\n\tif g.CategoriesData == nil {\n\t\treturn fetchCategoriesLink(firstLink(g, \"categories\"), filter, sort, embeds)\n\t}\n\n\treturn toCategoryCollection(g.CategoriesData), nil\n}", "func GetCategories() []*datastore.Category {\n\t// Connect to database\n\tdb := OpenPostgresDB()\n\tdefer db.Close()\n\n\t// Query database\n\trows, err := db.Query(\"SELECT * FROM category\")\n\terror_handler.Check(err)\n\n\t// Store categories for landing page in this array\n\tlandingPage := make([]*datastore.Category, 0)\n\n\t// Go through list of categories returned from db\n\tfor rows.Next() {\n\t\tvar id int\n\t\tvar name string\n\t\tvar image string\n\t\terr = rows.Scan(&id, &name, &image)\n\n\t\terror_handler.Check(err)\n\n\t\t// Store single instance of categories\n\t\tvar lpage = datastore.NewCategory(id, name, image)\n\n\t\t// Store above categories in landing page array\n\t\tlandingPage = append(landingPage, lpage)\n\t}\n\treturn landingPage\n}", "func (s *Store) FindMatcategories(filter bson.M, opts ...*options.FindOptions) ([]models.MatCategory, error) {\n\tvar ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tdb := s.db.Database(dbName)\n\tcol := db.Collection(\"matcategories\")\n\tcur, err := col.Find(ctx, filter, opts...)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcats := make([]models.MatCategory, 0)\n\n\terr = cur.All(ctx, &cats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cats, nil\n}", "func (b BoardFeature) Slice() []string {\n\treturn _BoardFeature.slice(uint64(b))\n}", "func (r Repository) Categories() []DbCategory {\n\tsession, _ := mgo.Dial(r.ipAddress)\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\n\tcategories := []DbCategory{}\n\tcollection := session.DB(\"u-talk\").C(\"forum\")\n\terr := collection.Find(nil).All(&categories)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn categories\n}", "func (r *RPC) Categories(c context.Context, arg *model.ArgIP, res *model.Categories) (err error) {\n\t*res, err = r.s.ListCategories(c, arg.RealIP)\n\treturn\n}", "func NewLayerClassSlice(types []LayerType) LayerClassSlice {\n\tvar max LayerType\n\tfor _, typ := range types {\n\t\tif typ > max {\n\t\t\tmax = typ\n\t\t}\n\t}\n\tt := make([]bool, int(max+1))\n\tfor _, typ := range types {\n\t\tt[typ] = true\n\t}\n\treturn t\n}", "func (f *Feature) NumCategories() int {\n\tif f.Kind != Feature_CATEGORICAL {\n\t\treturn 0\n\t}\n\tif f.Strategy == Feature_IDENTITY {\n\t\treturn -1\n\t}\n\treturn int(f.HashBuckets) + len(f.Vocabulary)\n}", "func hiddenIndices(Disclosure []byte) []int {\n\tHiddenIndices := make([]int, 0)\n\tfor index, disclose := range Disclosure {\n\t\tif disclose == 0 {\n\t\t\tHiddenIndices = append(HiddenIndices, index)\n\t\t}\n\t}\n\treturn HiddenIndices\n}", "func hiddenIndices(Disclosure []byte) []int {\n\tHiddenIndices := make([]int, 0)\n\tfor index, disclose := range Disclosure {\n\t\tif disclose == 0 {\n\t\t\tHiddenIndices = append(HiddenIndices, index)\n\t\t}\n\t}\n\treturn HiddenIndices\n}", "func flattenPrivateCloudVcenterStateEnumSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudVcenterStateEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []PrivateCloudVcenterStateEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []PrivateCloudVcenterStateEnum{}\n\t}\n\n\titems := make([]PrivateCloudVcenterStateEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenPrivateCloudVcenterStateEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func Categories(filter bson.M) ([]structure.Category, error) {\n\tvar categories []structure.Category\n\n\tsession := mgoSession.Copy()\n\tdefer session.Close()\n\n\tc := session.DB(dbName).C(\"categories\")\n\n\tpipeline := []bson.M{\n\t\tbson.M{\n\t\t\t\"$match\": filter,\n\t\t},\n\t\tbson.M{\n\t\t\t\"$project\": bson.M{\n\t\t\t\t\"_id\": 1,\n\t\t\t\t\"name\": 1,\n\t\t\t},\n\t\t},\n\t}\n\n\terr := c.Pipe(pipeline).All(&categories)\n\n\treturn categories, err\n}", "func (tbl AssociationTable) SliceCategory(req require.Requirement, wh where.Expression, qc where.QueryConstraint) ([]Category, error) {\n\treturn sliceAssociationTableCategoryPtrList(tbl, req, \"category\", wh, qc)\n}", "func (c *Client) ShowCategories(ctx context.Context, path string) (*http.Response, error) {\n\treq, err := c.NewShowCategoriesRequest(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Client.Do(ctx, req)\n}", "func flattenPrivateCloudHcxStateEnumSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudHcxStateEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []PrivateCloudHcxStateEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []PrivateCloudHcxStateEnum{}\n\t}\n\n\titems := make([]PrivateCloudHcxStateEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenPrivateCloudHcxStateEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func (c *Client) List() (map[string]map[string]*flagger.Flag, error) {\n\tresp, err := http.Get(fmt.Sprintf(\"%s/flags\", c.URL))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tresponse := make(map[string]map[string]*flagger.Flag)\n\tdecoder := json.NewDecoder(resp.Body)\n\terr = decoder.Decode(&response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}", "func featuresFilteredByFlags(infos map[string]*FeatureInfo, activated, deactivated bool) []FeatureInfo {\n\tvar filteredList []FeatureInfo\n\tfor _, v := range infos {\n\t\tif activated && v.Activated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\n\t\tif deactivated && !v.Activated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\n\t\t// No flags were provided, so only filter out features that shouldn't be listed.\n\t\tif !activated && !deactivated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\t}\n\treturn filteredList\n}", "func (c *Session) VisibleProjects() []types.ID {\n\tvar projectIds []types.ID\n\tfor _, v := range c.Perms {\n\t\tpairs := strings.Split(v, \"_\")\n\t\tif len(pairs) == 2 {\n\t\t\tid, err := types.ParseID(pairs[1])\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprojectIds = append(projectIds, id)\n\t\t}\n\t}\n\tif projectIds == nil {\n\t\treturn []types.ID{}\n\t}\n\treturn projectIds\n}", "func (o *ClientConfiguration) GetCategoryRestrictions() []Category {\n\tif o == nil {\n\t\tvar ret []Category\n\t\treturn ret\n\t}\n\n\treturn o.CategoryRestrictions\n}", "func (d *Dao) ChannelCategories(c context.Context, arg *model.ArgChannelCategory) (res []*model.ChannelCategory, err error) {\n\trows, err := d.db.Query(c, _channelCategorieSQL, arg.State, arg.LastID, arg.Size)\n\tif err != nil {\n\t\tlog.Error(\"d.dao.ChannelCategories(%v) error(%v)\", arg, err)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tres = make([]*model.ChannelCategory, 0, arg.Size)\n\tfor rows.Next() {\n\t\tt := &model.ChannelCategory{}\n\t\tif err = rows.Scan(&t.ID, &t.Name, &t.Order, &t.Attr, &t.State, &t.CTime, &t.MTime); err != nil {\n\t\t\tlog.Error(\"d.dao.ChannelCategories(%v) rows.Scan() error(%v)\", arg, err)\n\t\t\treturn\n\t\t}\n\t\tres = append(res, t)\n\t}\n\treturn\n}", "func PossibleCategoryValues() []Category {\n\treturn []Category{\n\t\tCategoryCost,\n\t\tCategoryHighAvailability,\n\t\tCategoryOperationalExcellence,\n\t\tCategoryPerformance,\n\t\tCategorySecurity,\n\t}\n}", "func flattenProjectLifecycleStateEnumSlice(c *Client, i interface{}) []ProjectLifecycleStateEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []ProjectLifecycleStateEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []ProjectLifecycleStateEnum{}\n\t}\n\n\titems := make([]ProjectLifecycleStateEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenProjectLifecycleStateEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func (s UserSet) SidebarVisible() bool {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"SidebarVisible\", \"sidebar_visible\")).(bool)\n\treturn res\n}", "func (ob *Obj3D) SelectCats(cats []string) {\n\tnc := len(ob.Cats)\n\tfor ci := nc - 1; ci >= 0; ci-- {\n\t\tcat := ob.Cats[ci]\n\n\t\tsel := false\n\t\tfor _, cs := range cats {\n\t\t\tif cat == cs {\n\t\t\t\tsel = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !sel {\n\t\t\tob.Cats = append(ob.Cats[:ci], ob.Cats[ci+1:]...)\n\t\t\tob.ObjFilesAll = append(ob.ObjFilesAll[:ci], ob.ObjFilesAll[ci+1:]...)\n\t\t\tob.ObjFilesTrain = append(ob.ObjFilesTrain[:ci], ob.ObjFilesTrain[ci+1:]...)\n\t\t\tob.ObjFilesTest = append(ob.ObjFilesTest[:ci], ob.ObjFilesTest[ci+1:]...)\n\t\t}\n\t}\n\tob.Flats()\n}", "func flattenPrivateCloudStateEnumSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudStateEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []PrivateCloudStateEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []PrivateCloudStateEnum{}\n\t}\n\n\titems := make([]PrivateCloudStateEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenPrivateCloudStateEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func GetCategories() []e.Category {\n\tvar categories []e.Category\n\tdb.DB.Order(\"id\").Find(&categories)\n\treturn categories\n}", "func (d UserData) Categories() m.PartnerCategorySet {\n\tval := d.ModelData.Get(models.NewFieldName(\"Categories\", \"category_ids\"))\n\tif !d.Has(models.NewFieldName(\"Categories\", \"category_ids\")) || val == nil || val == (*interface{})(nil) {\n\t\tval = models.InvalidRecordCollection(\"PartnerCategory\")\n\t}\n\treturn val.(models.RecordSet).Collection().Wrap().(m.PartnerCategorySet)\n}", "func (v View) Slice() []bool {\n\t// TODO: This forces an alloc, as an alternative a slice could be taken\n\t// as input that can be (re)used by the client. Are there use cases\n\t// where this would actually make sense?\n\tresult := make([]bool, v.Len())\n\tfor i, j := range v.index {\n\t\tresult[i] = v.data[j]\n\t}\n\treturn result\n}", "func (r *RPC) CategoriesMap(c context.Context, arg *model.ArgIP, res *map[int64]*model.Category) (err error) {\n\t*res, err = r.s.ListCategoriesMap(c, arg.RealIP)\n\treturn\n}", "func (m *OutlookUser) GetMasterCategories()([]OutlookCategoryable) {\n return m.masterCategories\n}", "func (dt *DockerTracker) ListJobCategories() ([]string, error) {\n\tif err := dt.check(); err != nil {\n\t\treturn nil, err\n\t}\n\timages, err := dt.cli.ImageList(context.Background(), types.ImageListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tids := make([]string, len(images))\n\tfor _, i := range images {\n\t\tids = append(ids, strings.Join(i.RepoTags, \"/\"))\n\t}\n\treturn ids, nil\n}", "func GetSubCategories() []e.SubCategory {\n\tvar subCategories []e.SubCategory\n\tdb.DB.Where(\"is_searchable = ?\", true).Find(&subCategories)\n\treturn subCategories\n}", "func FilterHiddenServices(services []occlient.Service) []occlient.Service {\n\tvar filteredServices []occlient.Service\n\tfor _, service := range services {\n\t\tif !service.Hidden {\n\t\t\tfilteredServices = append(filteredServices, service)\n\t\t}\n\t}\n\treturn filteredServices\n}", "func (o AccessPackageCatalogOutput) ExternallyVisible() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *AccessPackageCatalog) pulumi.BoolPtrOutput { return v.ExternallyVisible }).(pulumi.BoolPtrOutput)\n}", "func (d *Domus) CategoriesInRoom(rk RoomKey) ([]Category, error) {\n\tqueries := map[string]string{\n\t\t\"room_key\": string(rk),\n\t}\n\tresp, err := d.GetWithSession(\"/Mobile/GetCategories\", queries)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar body struct {\n\t\tCategories []Category `json:\"category\"`\n\t}\n\tif err := json.NewDecoder(resp.Body).Decode(&body); err != nil {\n\t\treturn nil, err\n\t}\n\treturn []Category(body.Categories), nil\n}", "func PossibleCertificateVisibilityValues() []CertificateVisibility {\n\treturn []CertificateVisibility{CertificateVisibilityRemoteUser, CertificateVisibilityStartTask, CertificateVisibilityTask}\n}", "func flattenPrivateCloudNsxStateEnumSlice(c *Client, i interface{}, res *PrivateCloud) []PrivateCloudNsxStateEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []PrivateCloudNsxStateEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []PrivateCloudNsxStateEnum{}\n\t}\n\n\titems := make([]PrivateCloudNsxStateEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenPrivateCloudNsxStateEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func (cRepo *CategoryGormRepo) Categories() ([]entity.Category, []error) {\n\tctgs := []entity.Category{}\n\terrs := cRepo.conn.Find(&ctgs).GetErrors()\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn ctgs, errs\n}", "func kidsWithCandies(candies []int, extraCandies int) []bool {\n max := candies[0]\n var res []bool\n for _, value := range candies {\n if value >= max {\n max = value\n }\n }\n for _, value := range candies {\n if value+extraCandies >= max {\n res = append(res, true)\n } else {\n res = append(res, false)\n }\n }\n return res\n}", "func (m *PatientMutation) CategoryIDs() (ids []int) {\n\tif id := m.category; id != nil {\n\t\tids = append(ids, *id)\n\t}\n\treturn\n}", "func (cri *CategoryRepositoryImpl) Categories() ([]entity.Category, error) {\n\n\trows, err := cri.conn.Query(\"SELECT * FROM categories;\")\n\tif err != nil {\n\t\treturn nil, errors.New(\"Could not query the database\")\n\t}\n\tdefer rows.Close()\n\n\tctgs := []entity.Category{}\n\n\tfor rows.Next() {\n\t\tcategory := entity.Category{}\n\t\terr = rows.Scan(&category.ID, &category.Name, &category.Description, &category.Image)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tctgs = append(ctgs, category)\n\t}\n\n\treturn ctgs, nil\n}", "func (state *State) GetCinfoSlice() []*info.ZInfoContentTree {\n\treturn state.deviceInfo.Cinfo\n}", "func OccupiedVisibleNeighbours(seatMap *SeatMap, x, y int) int {\n\toccupied := 0\n\n\tfor i := -1; i <= 1; i++ {\n\t\tfor j := -1; j <= 1; j++ {\n\t\t\tif i == 0 && j == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif nextSeatOccupied(seatMap, x, y, i, j) {\n\t\t\t\toccupied++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn occupied\n}", "func getCategoryList(pages *PagesSlice) CategoryList {\n\tmapList := make(CategoryList)\n\t// recentList is passed in which is already sorted\n\t// just need to map the pages to category\n\n\t// read category mash config, which allows to create\n\t// a new category based on combining multiple categories\n\t// this is used on my site when I want to display a list\n\t// of recent items from similar categories together\n\treverseMap := make(map[string]string)\n\n\t// config consists of a hash with new category being the\n\t// key and a comma separated list of existing categories\n\t// being the value, create a reverse map\n\tfor k, v := range config.CategoryMash {\n\t\tcats := strings.Split(string(v), \",\")\n\t\t//loop through split and add to reverse map\n\t\tfor _, cat := range cats {\n\t\t\treverseMap[cat] = string(k)\n\t\t}\n\t}\n\n\tfor _, page := range *pages {\n\n\t\t// create new category from category mash map\n\t\tif reverseMap[page.Category] != page.Category {\n\t\t\tthisCategory := reverseMap[page.Category]\n\t\t\tmapList[thisCategory] = append(mapList[thisCategory], page)\n\t\t}\n\n\t\t// still want a list of regular categories\n\t\t// simpleCategory replaces / in sub-dir categories to _\n\t\t// this always the category to be referenced in template\n\t\tsimpleCategory := strings.Replace(page.Category, string(os.PathSeparator), \"_\", -1)\n\t\tmapList[simpleCategory] = append(mapList[simpleCategory], page)\n\t}\n\treturn mapList\n}", "func (d UserData) SidebarVisible() bool {\n\tval := d.ModelData.Get(models.NewFieldName(\"SidebarVisible\", \"sidebar_visible\"))\n\tif !d.Has(models.NewFieldName(\"SidebarVisible\", \"sidebar_visible\")) {\n\t\treturn *new(bool)\n\t}\n\treturn val.(bool)\n}", "func (s *tsLibrary) Visibility() []string {\n\treturn s.RuleConfig.GetVisibility()\n}", "func (tbl DbCompoundTable) SliceCategory(req require.Requirement, wh where.Expression, qc where.QueryConstraint) ([]Category, error) {\n\treturn tbl.sliceCategoryList(req, \"category\", wh, qc)\n}", "func (j *DSGitHub) Categories() map[string]struct{} {\n\treturn GitHubCategories\n}", "func getCategoryNames(categories []wikimedia.ApiPageCategory) []string {\n\tvar values []string\n\tfor _, cat := range categories {\n\t\tvalues = append(values, cat.Name)\n\t}\n\n\treturn values\n}", "func (k Feature_Kind) IsCategorical() bool { return k == Feature_CATEGORICAL }", "func PossibleExtensionCategoryValues() []ExtensionCategory {\n\treturn []ExtensionCategory{\n\t\tExtensionCategoryNotSpecified,\n\t\tExtensionCategoryResourceCreationBegin,\n\t\tExtensionCategoryResourceCreationCompleted,\n\t\tExtensionCategoryResourceCreationValidate,\n\t\tExtensionCategoryResourceDeletionBegin,\n\t\tExtensionCategoryResourceDeletionCompleted,\n\t\tExtensionCategoryResourceDeletionValidate,\n\t\tExtensionCategoryResourceMoveBegin,\n\t\tExtensionCategoryResourceMoveCompleted,\n\t\tExtensionCategoryResourcePatchBegin,\n\t\tExtensionCategoryResourcePatchCompleted,\n\t\tExtensionCategoryResourcePatchValidate,\n\t\tExtensionCategoryResourcePostAction,\n\t\tExtensionCategoryResourceReadBegin,\n\t\tExtensionCategoryResourceReadValidate,\n\t\tExtensionCategorySubscriptionLifecycleNotification,\n\t}\n}", "func (conn *DB) WhereItemsAreVisible(groupID int64) *DB {\n\treturn conn.WhereGroupHasPermissionOnItems(groupID, \"view\", \"info\")\n}", "func (o GoogleCloudRetailV2alphaProductResponseOutput) Categories() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaProductResponse) []string { return v.Categories }).(pulumi.StringArrayOutput)\n}", "func (v *Layer) IsVisible() bool {\n\treturn v != nil\n}", "func GetCategories() ([]Category, error) {\n\tvar (\n\t\terr error\n\t\tcategories []Category\n\t)\n\n\tc := newCategoryCollection()\n\tdefer c.Close()\n\n\terr = c.Session.Find(nil).Sort(\"-updated_on\").All(&categories)\n\tif err != nil {\n\t\treturn categories, err\n\t}\n\n\treturn categories, err\n}", "func (env *PhotonEnvReader) GetChannelsByState(state int) (channels []Channel) {\n\tfor _, token := range env.Tokens {\n\t\tfor _, channel := range token.Channels {\n\t\t\tif channel.State == state {\n\t\t\t\tchannels = append(channels, channel)\n\t\t\t}\n\t\t}\n\t}\n\treturn channels\n}", "func ToBoolSlice(i interface{}) []bool {\n\treturn cast.ToBoolSlice(i)\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisibleOk() (*bool, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Visible, true\n}", "func (TestGridSessionArtifactCategory) Values() []TestGridSessionArtifactCategory {\n\treturn []TestGridSessionArtifactCategory{\n\t\t\"VIDEO\",\n\t\t\"LOG\",\n\t}\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisible() bool {\n\tif o == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\n\treturn o.Visible\n}", "func GetCategories() []string {\n\tstmt := \"select name from category\"\n\trows := database.query(stmt)\n\tvar categories []string\n\tvar category string\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&category)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tcategories = append(categories, category)\n\t}\n\treturn categories\n}", "func flattenImageStatusEnumSlice(c *Client, i interface{}) []ImageStatusEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []ImageStatusEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []ImageStatusEnum{}\n\t}\n\n\titems := make([]ImageStatusEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenImageStatusEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func (b Bits) Slice() (s []int) {\n\tfor x, w := range b.Bits {\n\t\tif w == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tt := mb.TrailingZeros64(w)\n\t\ti := t // index in w of next 1 bit\n\t\tfor {\n\t\t\tn := x<<6 | i\n\t\t\tif n >= b.Num {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ts = append(s, n)\n\t\t\tw >>= uint(t + 1)\n\t\t\tif w == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt = mb.TrailingZeros64(w)\n\t\t\ti += 1 + t\n\t\t}\n\t}\n\treturn\n}", "func flattenInterconnectStateEnumSlice(c *Client, i interface{}) []InterconnectStateEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []InterconnectStateEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []InterconnectStateEnum{}\n\t}\n\n\titems := make([]InterconnectStateEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenInterconnectStateEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func GetLVMainCategories(url string) []string {\n\t// Slice to hold category names\n\tvar mainCategories []string\n\t// Init colly collector\n\tc := createCollyCollector()\n\t// Find within the children of each li tag anything with class .lv-header-main-nav__item\n\t// Append the text of that span to categories array\n\tc.OnHTML(\"li\", func(e *colly.HTMLElement) {\n\t\tpageDom := e.DOM\n\t\tpageDom.Find(\".lv-header-main-nav__item\").Each(func(i int, s *goquery.Selection) {\n\t\t\tmainCategories = append(mainCategories, s.Find(\"span\").Text())\n\t\t})\n\t})\n\t// Request Handler\n\tc.OnRequest(func(r *colly.Request) {\n\t\tfmt.Println(\"Visiting\", r.URL.String())\n\t})\n\t// Error Handler\n\tc.OnError(func(r *colly.Response, err error) {\n\t\tlog.Println(\"Request URL:\", r.Request.URL, \"failed with response:\", r, \"\\nError:\", err)\n\t})\n\t// Response Handler\n\tc.OnResponse(func(r *colly.Response) {\n\t\t//fmt.Println(r.Body)\n\t})\n\t// Send visit request to colly collector\n\tc.Visit(url)\n\treturn mainCategories\n}", "func NewGetAssetCategoriesForbidden() *GetAssetCategoriesForbidden {\n\treturn &GetAssetCategoriesForbidden{}\n}", "func AllocIds(kind FaceKind, count int) (ids []FaceId) {\n\tallocated := make(map[FaceId]bool)\n\tfor len(allocated) < count {\n\t\tallocated[AllocId(kind)] = true\n\t}\n\tfor id := range allocated {\n\t\tids = append(ids, id)\n\t}\n\treturn ids\n}", "func flattenImageDeprecatedStateEnumSlice(c *Client, i interface{}) []ImageDeprecatedStateEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []ImageDeprecatedStateEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []ImageDeprecatedStateEnum{}\n\t}\n\n\titems := make([]ImageDeprecatedStateEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenImageDeprecatedStateEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func (s UserSet) Categories() m.PartnerCategorySet {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"Categories\", \"category_ids\")).(models.RecordSet).Collection().Wrap(\"PartnerCategory\").(m.PartnerCategorySet)\n\treturn res\n}", "func (s BmCategoryStorage) GetAll(r api2go.Request, skip int, take int) []*BmModel.Category {\n\tin := BmModel.Category{}\n\tvar out []BmModel.Category\n\terr := s.db.FindMulti(r, &in, &out, skip, take)\n\tif err == nil {\n\t\tvar tmp []*BmModel.Category\n\t\t//tmp := make(map[string]*BmModel.Category)\n\t\tfor _, iter := range out {\n\t\t\ts.db.ResetIdWithId_(&iter)\n\t\t\ttmp = append(tmp, &iter)\n\t\t\t//tmp[iter.ID] = &iter\n\t\t}\n\t\treturn tmp\n\t} else {\n\t\treturn nil //make(map[string]*BmModel.Category)\n\t}\n}", "func featureInfoList(ctx context.Context, cl *featuregateclient.FeatureGateClient, featuregate string) ([]FeatureInfo, error) {\n\tclusterFeatures, err := cl.GetFeatureList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgateList, err := cl.GetFeatureGateList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfeatureInfos := collectFeaturesInfo(gateList.Items, clusterFeatures.Items)\n\n\tsetShowInList(featureInfos, includeExperimental, featuregate)\n\n\tfilteredList := featuresFilteredByFlags(featureInfos, activated, deactivated)\n\treturn filteredList, nil\n}", "func GetHidden() (tags []Type, err error) {\n\ttags = make([]Type, 0)\n\n\t_, err = mongo.Find(\"blotter\", \"tags\", bson.M{\n\t\t\"hide\": true,\n\t}, nil, &tags)\n\n\tif err != nil {\n\t\treturn\n\t}\n\treturn tags, nil\n}", "func (outer outer) Visible() bool {\r\n\treturn false\r\n}", "func flattenCommunities(aggCommunities, communities []map[int]bool,\n) []map[int]bool {\n\tresult := []map[int]bool{}\n\tfor _, aggC := range aggCommunities {\n\t\tnewC := map[int]bool{}\n\t\tfor idxC, _ := range aggC {\n\t\t\tc := communities[idxC]\n\t\t\tfor pt, _ := range c {\n\t\t\t\tnewC[pt] = true\n\t\t\t}\n\t\t}\n\t\tresult = append(result, newC)\n\t}\n\treturn result\n}", "func CascadingFilterMetricViews(level configtelemetry.Level) []*view.View {\n\tif level == configtelemetry.LevelNone {\n\t\treturn nil\n\t}\n\n\tlatencyDistributionAggregation := view.Distribution(1, 2, 5, 10, 25, 50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000)\n\tageDistributionAggregation := view.Distribution(1, 2, 5, 10, 20, 30, 40, 50, 60, 90, 120, 180, 300, 600, 1800, 3600, 7200)\n\n\toverallDecisionLatencyView := &view.View{\n\t\tName: statOverallDecisionLatencyus.Name(),\n\t\tMeasure: statOverallDecisionLatencyus,\n\t\tDescription: statOverallDecisionLatencyus.Description(),\n\t\tAggregation: latencyDistributionAggregation,\n\t}\n\n\ttraceRemovalAgeView := &view.View{\n\t\tName: statTraceRemovalAgeSec.Name(),\n\t\tMeasure: statTraceRemovalAgeSec,\n\t\tDescription: statTraceRemovalAgeSec.Description(),\n\t\tAggregation: ageDistributionAggregation,\n\t}\n\n\tlateSpanArrivalView := &view.View{\n\t\tName: statLateSpanArrivalAfterDecision.Name(),\n\t\tMeasure: statLateSpanArrivalAfterDecision,\n\t\tDescription: statLateSpanArrivalAfterDecision.Description(),\n\t\tAggregation: ageDistributionAggregation,\n\t}\n\n\tcountPolicyEvaluationErrorView := &view.View{\n\t\tName: statPolicyEvaluationErrorCount.Name(),\n\t\tMeasure: statPolicyEvaluationErrorCount,\n\t\tDescription: statPolicyEvaluationErrorCount.Description(),\n\t\tAggregation: view.Sum(),\n\t}\n\n\tcountFinalDecisionView := &view.View{\n\t\tName: statCascadingFilterDecision.Name(),\n\t\tMeasure: statCascadingFilterDecision,\n\t\tDescription: statCascadingFilterDecision.Description(),\n\t\tTagKeys: []tag.Key{tagPolicyKey, tagCascadingFilterDecisionKey},\n\t\tAggregation: view.Sum(),\n\t}\n\n\tcountPolicyDecisionsView := &view.View{\n\t\tName: statPolicyDecision.Name(),\n\t\tMeasure: statPolicyDecision,\n\t\tDescription: statPolicyDecision.Description(),\n\t\tTagKeys: []tag.Key{tagPolicyKey, tagPolicyDecisionKey},\n\t\tAggregation: view.Sum(),\n\t}\n\n\tpolicyLatencyView := &view.View{\n\t\tName: statDecisionLatencyMicroSec.Name(),\n\t\tMeasure: statDecisionLatencyMicroSec,\n\t\tDescription: statDecisionLatencyMicroSec.Description(),\n\t\tTagKeys: []tag.Key{tagPolicyKey},\n\t\tAggregation: view.Sum(),\n\t}\n\n\tcountTraceDroppedTooEarlyView := &view.View{\n\t\tName: statDroppedTooEarlyCount.Name(),\n\t\tMeasure: statDroppedTooEarlyCount,\n\t\tDescription: statDroppedTooEarlyCount.Description(),\n\t\tAggregation: view.Sum(),\n\t}\n\tcountTraceIDArrivalView := &view.View{\n\t\tName: statNewTraceIDReceivedCount.Name(),\n\t\tMeasure: statNewTraceIDReceivedCount,\n\t\tDescription: statNewTraceIDReceivedCount.Description(),\n\t\tAggregation: view.Sum(),\n\t}\n\ttrackTracesOnMemorylView := &view.View{\n\t\tName: statTracesOnMemoryGauge.Name(),\n\t\tMeasure: statTracesOnMemoryGauge,\n\t\tDescription: statTracesOnMemoryGauge.Description(),\n\t\tAggregation: view.LastValue(),\n\t}\n\n\tlegacyViews := []*view.View{\n\t\toverallDecisionLatencyView,\n\t\ttraceRemovalAgeView,\n\t\tlateSpanArrivalView,\n\n\t\tcountPolicyDecisionsView,\n\t\tpolicyLatencyView,\n\t\tcountFinalDecisionView,\n\n\t\tcountPolicyEvaluationErrorView,\n\t\tcountTraceDroppedTooEarlyView,\n\t\tcountTraceIDArrivalView,\n\t\ttrackTracesOnMemorylView,\n\t}\n\n\t// return obsreport.ProcessorMetricViews(typeStr, legacyViews)\n\treturn legacyViews\n}", "func FindTrackingCategories(provider xerogolang.IProvider, session goth.Session) (*TrackingCategories, error) {\n\tadditionalHeaders := map[string]string{\n\t\t\"Accept\": \"application/json\",\n\t}\n\n\ttrackingCategoryResponseBytes, err := provider.Find(session, \"TrackingCategories\", additionalHeaders, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn unmarshalTrackingCategory(trackingCategoryResponseBytes)\n}", "func (flag *flag) GetCategory() string {\n\treturn \"\"\n}", "func filterClustersWithIndices(elements [][]float64, indices []int) [][]float64 {\n\tvar ret [][]float64\n\tfor index, value := range elements {\n\t\tvar includes bool\n\t\tfor _, idx := range indices {\n\t\t\tif index == idx {\n\t\t\t\tincludes = true\n\t\t\t}\n\t\t}\n\n\t\tif !includes {\n\t\t\tret = append(ret, value)\n\t\t}\n\t}\n\treturn ret\n}", "func flattenInstanceNetworksModesEnumSlice(c *Client, i interface{}) []InstanceNetworksModesEnum {\n\ta, ok := i.([]interface{})\n\tif !ok {\n\t\treturn []InstanceNetworksModesEnum{}\n\t}\n\n\tif len(a) == 0 {\n\t\treturn []InstanceNetworksModesEnum{}\n\t}\n\n\titems := make([]InstanceNetworksModesEnum, 0, len(a))\n\tfor _, item := range a {\n\t\titems = append(items, *flattenInstanceNetworksModesEnum(item.(interface{})))\n\t}\n\n\treturn items\n}", "func (set *SetUI) Slice() SliceUI {\n\tset.lock.Lock()\n\tkeys := make(SliceUI, len(set.cache))\n\ti := 0\n\tfor k := range set.cache {\n\t\tkeys[i] = k\n\t}\n\tset.lock.Unlock()\n\treturn keys\n}" ]
[ "0.6493032", "0.61860156", "0.6127746", "0.58855647", "0.55006135", "0.53227764", "0.53040564", "0.50016135", "0.4928253", "0.48687777", "0.48639703", "0.4636552", "0.46091342", "0.46039593", "0.45843762", "0.4581012", "0.45706818", "0.45607626", "0.45442432", "0.45312253", "0.44781294", "0.4468224", "0.4428747", "0.44280922", "0.4398026", "0.4393473", "0.43913442", "0.43803257", "0.4380084", "0.4380084", "0.43792987", "0.43725535", "0.43624592", "0.4311772", "0.43087047", "0.4300163", "0.42839038", "0.42824364", "0.4279968", "0.42605194", "0.425166", "0.425165", "0.42316526", "0.4216537", "0.42158356", "0.42146105", "0.42061913", "0.41923648", "0.41885975", "0.41819793", "0.4179486", "0.41750392", "0.41703495", "0.41698927", "0.41670975", "0.4160706", "0.41595337", "0.4158027", "0.4156566", "0.41543406", "0.4142983", "0.41334307", "0.4127082", "0.41201583", "0.41149795", "0.41118348", "0.41068852", "0.41042838", "0.4104153", "0.40995234", "0.40881056", "0.40790623", "0.40767056", "0.40725762", "0.40718156", "0.40632322", "0.40607023", "0.40574276", "0.40558046", "0.40491486", "0.4049138", "0.40478766", "0.4042502", "0.40400073", "0.40395918", "0.40356463", "0.40336436", "0.40265763", "0.40249625", "0.4021248", "0.40198088", "0.40177718", "0.40170893", "0.40105593", "0.40075862", "0.40067333", "0.40049636", "0.40047213", "0.39945337", "0.39894283" ]
0.8015573
0
VisibleFlags returns a slice of the Flags with Hidden=false
func (cmd *Command) VisibleFlags() []Flag { return visibleFlags(cmd.Flags) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c Command) VisibleFlags() []Flag {\n\treturn visibleFlags(c.Flags)\n}", "func (f *FlagBase[T, C, V]) IsVisible() bool {\n\treturn !f.Hidden\n}", "func (cmd *Command) VisibleFlagCategories() []VisibleFlagCategory {\n\tif cmd.flagCategories == nil {\n\t\tcmd.flagCategories = newFlagCategoriesFromFlags(cmd.Flags)\n\t}\n\treturn cmd.flagCategories.VisibleCategories()\n}", "func (c *Command) HideFlags(flags ...string) {\n\tfor _, f := range flags {\n\t\tc.hidden.String(f, \"\", \"\")\n\t}\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisible() bool {\n\tif o == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\n\treturn o.Visible\n}", "func (cmd *Command) VisibleCommands() []*Command {\n\tvar ret []*Command\n\tfor _, command := range cmd.Commands {\n\t\tif !command.Hidden {\n\t\t\tret = append(ret, command)\n\t\t}\n\t}\n\treturn ret\n}", "func (am ArgMap) NonFlags() []string {\n\treturn am[\"_\"].([]string)\n}", "func (Visibility) Values() []Visibility {\n\treturn []Visibility{\n\t\t\"IGNORED\",\n\t\t\"VISIBLE\",\n\t}\n}", "func (m *AccessPackage) GetIsHidden()(*bool) {\n return m.isHidden\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisibleOk() (*bool, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Visible, true\n}", "func (l *Loader) AttrVisibilityHidden(i Sym) bool {\n\tif !l.IsExternal(i) {\n\t\treturn false\n\t}\n\treturn l.attrVisibilityHidden.Has(l.extIndex(i))\n}", "func (l *ActivityDumpRuntimeSetting) Hidden() bool {\n\treturn false\n}", "func (e Empty) Visible(v bool) { e.visible = v }", "func hiddenIndices(Disclosure []byte) []int {\n\tHiddenIndices := make([]int, 0)\n\tfor index, disclose := range Disclosure {\n\t\tif disclose == 0 {\n\t\t\tHiddenIndices = append(HiddenIndices, index)\n\t\t}\n\t}\n\treturn HiddenIndices\n}", "func hiddenIndices(Disclosure []byte) []int {\n\tHiddenIndices := make([]int, 0)\n\tfor index, disclose := range Disclosure {\n\t\tif disclose == 0 {\n\t\t\tHiddenIndices = append(HiddenIndices, index)\n\t\t}\n\t}\n\treturn HiddenIndices\n}", "func (me TxsdPresentationAttributesViewportsOverflow) IsHidden() bool { return me.String() == \"hidden\" }", "func (s *tsLibrary) Visibility() []string {\n\treturn s.RuleConfig.GetVisibility()\n}", "func (m *WorkbookNamedItem) GetVisible()(*bool) {\n val, err := m.GetBackingStore().Get(\"visible\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}", "func (ref *UIElement) VisibleColumns() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleColumnsAttribute)\n}", "func (o *GetClientConfig200ResponseDenylist) SetVisible(v bool) {\n\to.Visible = v\n}", "func (v *IconView) GetVisibleRange() (*TreePath, *TreePath) {\n\tvar (\n\t\tcpathStart, cpathEnd *C.GtkTreePath\n\t\tpathStart, pathEnd *TreePath\n\t)\n\n\tC.gtk_icon_view_get_visible_range(v.native(), &cpathStart, &cpathEnd)\n\n\tif cpathStart != nil {\n\t\tpathStart = &TreePath{cpathStart}\n\t\truntime.SetFinalizer(pathStart, func(v *TreePath) { glib.FinalizerStrategy(v.free) })\n\t}\n\n\tif cpathEnd != nil {\n\t\tpathEnd = &TreePath{cpathEnd}\n\t\truntime.SetFinalizer(pathEnd, func(v *TreePath) { glib.FinalizerStrategy(v.free) })\n\t}\n\n\treturn pathStart, pathEnd\n}", "func (state *State) LegalActionsMask() []bool {\n\tlength := int(C.StateNumDistinctActions(state.state))\n\tlegalActionMask := make([]bool, length)\n\tcppLegalActionsMask := make([]C.int, length)\n\tC.StateFillLegalActionsMask(state.state, unsafe.Pointer(&cppLegalActionsMask[0]))\n\tfor i := 0; i < length; i++ {\n\t\tlegalActionMask[i] = (cppLegalActionsMask[i] > 0)\n\t}\n\treturn legalActionMask\n}", "func (me TxsdPresentationAttributesGraphicsVisibility) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func (o AccessPackageCatalogOutput) ExternallyVisible() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *AccessPackageCatalog) pulumi.BoolPtrOutput { return v.ExternallyVisible }).(pulumi.BoolPtrOutput)\n}", "func (me TxsdPresentationAttributesGraphicsVisibility) IsHidden() bool {\n\treturn me.String() == \"hidden\"\n}", "func (l *LogLevelRuntimeSetting) Hidden() bool {\n\treturn false\n}", "func FilterHiddenServices(services []occlient.Service) []occlient.Service {\n\tvar filteredServices []occlient.Service\n\tfor _, service := range services {\n\t\tif !service.Hidden {\n\t\t\tfilteredServices = append(filteredServices, service)\n\t\t}\n\t}\n\treturn filteredServices\n}", "func (cmd InspectCmd) Hidden() bool {\n\treturn true\n}", "func (outer outer) Visible() bool {\r\n\treturn false\r\n}", "func (fa FileAttributes) IsHidden() bool {\n\treturn fa&2 > 0\n}", "func (ref *UIElement) VisibleCells() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleCellsAttribute)\n}", "func (v *Filter) IsVisible() bool {\n\tif v == nil {\n\t\treturn false\n\t}\n\treturn !v.hidden\n}", "func (me TxsdPresentationAttributesViewportsOverflow) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func (o *AddOn) Hidden() bool {\n\tif o != nil && o.bitmap_&128 != 0 {\n\t\treturn o.hidden\n\t}\n\treturn false\n}", "func (f *flags) Flags() []cli.Flag {\n\treturn f.flags\n}", "func Visibility(value bool) *SimpleElement { return newSEBool(\"visibility\", value) }", "func (ref *UIElement) VisibleChildren() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleChildrenAttribute)\n}", "func (mw *MapWidget) Visible() bool {\n\treturn !mw.hidden\n}", "func Hidden(hidden bool) Option {\n\treturn func(i *Info) {\n\t\ti.Hidden = hidden\n\t}\n}", "func (o *AddOn) GetHidden() (value bool, ok bool) {\n\tok = o != nil && o.bitmap_&128 != 0\n\tif ok {\n\t\tvalue = o.hidden\n\t}\n\treturn\n}", "func (c *Client) List() (map[string]map[string]*flagger.Flag, error) {\n\tresp, err := http.Get(fmt.Sprintf(\"%s/flags\", c.URL))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tresponse := make(map[string]map[string]*flagger.Flag)\n\tdecoder := json.NewDecoder(resp.Body)\n\terr = decoder.Decode(&response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}", "func (_BaseContent *BaseContentCaller) Visibility(opts *bind.CallOpts) (uint8, error) {\n\tvar out []interface{}\n\terr := _BaseContent.contract.Call(opts, &out, \"visibility\")\n\n\tif err != nil {\n\t\treturn *new(uint8), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(uint8)).(*uint8)\n\n\treturn out0, err\n\n}", "func (r Virtual_Guest) GetPrivateNetworkOnlyFlag() (resp bool, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getPrivateNetworkOnlyFlag\", nil, &r.Options, &resp)\n\treturn\n}", "func (p *Packet) Flags() (flags byte, present bool) {\n\tif b, ok := p.m[keys.flags].([]byte); ok {\n\t\treturn b[0], true\n\t}\n\treturn 0, false\n}", "func GetHidden() (tags []Type, err error) {\n\ttags = make([]Type, 0)\n\n\t_, err = mongo.Find(\"blotter\", \"tags\", bson.M{\n\t\t\"hide\": true,\n\t}, nil, &tags)\n\n\tif err != nil {\n\t\treturn\n\t}\n\treturn tags, nil\n}", "func (m *Monster) Visible(v bool) { m.Visibility = v }", "func (w *Window) Visible() bool {\n\treturn !w.hidden\n}", "func (v *Layer) IsVisible() bool {\n\treturn v != nil\n}", "func (ws *Workspace) Visible() bool {\n\treturn ws.visible\n}", "func (sf *TWindow) Visible() bool {\n\treturn !sf.hidden\n}", "func (o *GetMessagesAllOf) GetFlags() []string {\n\tif o == nil || o.Flags == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.Flags\n}", "func VisibleChatConversationStatuses() (res []chat1.ConversationStatus) {\n\tres = make([]chat1.ConversationStatus, 0, len(chat1.ConversationStatusMap))\n\tfor _, s := range chat1.ConversationStatusMap {\n\t\tif GetConversationStatusBehavior(s).ShowInInbox {\n\t\t\tres = append(res, s)\n\t\t}\n\t}\n\tsort.Sort(byConversationStatus(res))\n\treturn\n}", "func (v *Status) IsVisible() bool {\n\treturn v != nil\n}", "func (d *Desc) IsHidden() bool {\n\treturn d.isHidden\n}", "func (v *Filter) ToggleVisible() error {\n\t// delete all user input from the tree view\n\tv.view.Clear()\n\n\t// toggle hiding\n\tv.hidden = !v.hidden\n\n\tif !v.hidden {\n\t\t_, err := v.gui.SetCurrentView(v.name)\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"unable to toggle filter view: \", err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t// reset the cursor for the next time it is visible\n\t// Note: there is a subtle gocui behavior here where this cannot be called when the view\n\t// is newly visible. Is this a problem with dive or gocui?\n\treturn v.view.SetCursor(0, 0)\n}", "func (ref *UIElement) IsHidden() bool {\n\tret, _ := ref.BoolAttr(HiddenAttribute)\n\treturn ret\n}", "func (u *User) GetStoriesHidden() (value bool) {\n\tif u == nil {\n\t\treturn\n\t}\n\treturn u.Flags2.Has(3)\n}", "func (s *Service) IsVisible(ctx context.Context) (bool, error) {\n\tprops, err := s.GetProperties(ctx)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"unable to get properties\")\n\t}\n\tvisible, err := props.GetBool(shillconst.ServicePropertyVisible)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"unable to get IsVisible from properties\")\n\t}\n\treturn visible, nil\n}", "func PossibleIsReadOnlyValues() []IsReadOnly {\n return []IsReadOnly{IsReadOnlyFalse,IsReadOnlyTrue}\n }", "func boolAndNonBoolFlags(flags []*kong.Flag) (boolFlags, nonBoolFlags []*kong.Flag) {\n\tboolFlags = make([]*kong.Flag, 0, len(flags))\n\tnonBoolFlags = make([]*kong.Flag, 0, len(flags))\n\tfor _, flag := range flags {\n\t\tswitch flag.Value.IsBool() {\n\t\tcase true:\n\t\t\tboolFlags = append(boolFlags, flag)\n\t\tcase false:\n\t\t\tnonBoolFlags = append(nonBoolFlags, flag)\n\t\t}\n\t}\n\treturn boolFlags, nonBoolFlags\n}", "func PossiblePartnerRegistrationVisibilityStateValues() []PartnerRegistrationVisibilityState {\n\treturn []PartnerRegistrationVisibilityState{GenerallyAvailable, Hidden, PublicPreview}\n}", "func IsHidden(fileInfo os.FileInfo) bool {\n // Check for windows hidden attribute.\n sysInfo := fileInfo.Sys();\n if (sysInfo != nil) {\n winAttributes, ok := sysInfo.(syscall.Win32FileAttributeData);\n if (ok) {\n if (winAttributes.FileAttributes & syscall.FILE_ATTRIBUTE_HIDDEN != 0) {\n return true;\n }\n }\n }\n\n return false;\n}", "func Hidden(arg interface{}) Value {\n\treturn Conceal(arg)\n}", "func (me TdisplayModeEnumType) IsHide() bool { return me == \"hide\" }", "func (m *AccessPackageCatalog) GetIsExternallyVisible()(*bool) {\n return m.isExternallyVisible\n}", "func (m *ConditionalAccessDeviceStates) GetExcludeStates()([]string) {\n val, err := m.GetBackingStore().Get(\"excludeStates\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}", "func WithHidden(hidden bool) FlagOption {\n\treturn func(f *pflag.Flag) {\n\t\tf.Hidden = hidden\n\t}\n}", "func (m *WorkbookChartTitle) GetVisible()(*bool) {\n val, err := m.GetBackingStore().Get(\"visible\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}", "func (v View) Slice() []bool {\n\t// TODO: This forces an alloc, as an alternative a slice could be taken\n\t// as input that can be (re)used by the client. Are there use cases\n\t// where this would actually make sense?\n\tresult := make([]bool, v.Len())\n\tfor i, j := range v.index {\n\t\tresult[i] = v.data[j]\n\t}\n\treturn result\n}", "func (m *AccessPackage) SetIsHidden(value *bool)() {\n m.isHidden = value\n}", "func (v *VerbalExpression) getFlags() string {\n\tflags := \"misU\" // warning, follow Flag const order\n\tresult := []rune{}\n\n\tfor i, flag := range flags {\n\t\tif v.flags&(1<<uint(i)) != 0 {\n\t\t\tresult = append(result, flag)\n\t\t}\n\t}\n\n\treturn string(result)\n}", "func PossibleCertificateVisibilityValues() []CertificateVisibility {\n\treturn []CertificateVisibility{CertificateVisibilityRemoteUser, CertificateVisibilityStartTask, CertificateVisibilityTask}\n}", "func (e *HTMLApplet) Hidden(v bool) *HTMLApplet {\n\tif v {\n\t\te.a[\"hidden\"] = \"\"\n\t} else {\n\t\tdelete(e.a, \"hidden\")\n\t}\n\treturn e\n}", "func FieldFlagsRepr(acc Access) string {\n\tvar text []string\n\n\tif (acc & AccPublic) > 0 {\n\t\ttext = append(text, \"public\")\n\t}\n\n\tif (acc & AccPrivate) > 0 {\n\t\ttext = append(text, \"private\")\n\t}\n\n\tif (acc & AccProtected) > 0 {\n\t\ttext = append(text, \"protected\")\n\t}\n\n\tif (acc & AccStatic) > 0 {\n\t\ttext = append(text, \"static\")\n\t}\n\n\tif (acc & AccFinal) > 0 {\n\t\ttext = append(text, \"final\")\n\t}\n\n\tif (acc & AccVolatile) > 0 {\n\t\ttext = append(text, \"volatile\")\n\t}\n\n\tif (acc & AccTransient) > 0 {\n\t\ttext = append(text, \"transient\")\n\t}\n\n\treturn strings.Join(text, \" \")\n}", "func Flags() int {\n\treturn std.Flags()\n}", "func Flags() int {\n\treturn std.Flags()\n}", "func checkFlags(c *Command) map[string]bool {\n\tnotMyFlags := make(map[string]bool)\n\t// visit all flags that were passed in via command line:\n\tflag.Visit(func(f *flag.Flag) {\n\t\tisNotMyFlag := true\n\t\tif c != nil {\n\t\t\tfor _, myFlag := range c.Flags {\n\t\t\t\tif f.Name == myFlag {\n\t\t\t\t\tisNotMyFlag = false // yes, f is among my flags\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif isNotMyFlag {\n\t\t\tfor pf := range privateFlags {\n\t\t\t\tif f.Name == pf {\n\t\t\t\t\tnotMyFlags[pf] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\treturn notMyFlags\n}", "func (c StandardCommand) GetFlags() ([]Flag, error) {\n\treturn c.Flags, nil\n}", "func (o *Partition) GetFlags(ctx context.Context) (flags uint64, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfacePartition, \"Flags\").Store(&flags)\n\treturn\n}", "func featuresFilteredByFlags(infos map[string]*FeatureInfo, activated, deactivated bool) []FeatureInfo {\n\tvar filteredList []FeatureInfo\n\tfor _, v := range infos {\n\t\tif activated && v.Activated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\n\t\tif deactivated && !v.Activated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\n\t\t// No flags were provided, so only filter out features that shouldn't be listed.\n\t\tif !activated && !deactivated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\t}\n\treturn filteredList\n}", "func (c *Command) Flags() []Flag {\n\t//return c.Name\n\tflags := make([]Flag, 0)\n\tfor _, val := range c.orderedFlags {\n\t\tflags = append(flags, *val)\n\t}\n\treturn flags\n}", "func Flags() int {\n return Std.Flags()\n}", "func HideConfigFlags() {\n\tconfig.HideConfigFlags()\n}", "func (tc *TermColor) Hidden() *TermColor {\n\ttc.settingsCount++\n\ttc.hidden = true\n\treturn tc\n}", "func addHiddenFlags() {\n\tRootCmd.PersistentFlags().MarkHidden(\"azure-container-registry-config\")\n}", "func (t *Dense) bools() []bool { return *(*[]bool)(unsafe.Pointer(t.hdr)) }", "func (*CMsgHideWindow) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{135}\n}", "func (d UserData) SidebarVisible() bool {\n\tval := d.ModelData.Get(models.NewFieldName(\"SidebarVisible\", \"sidebar_visible\"))\n\tif !d.Has(models.NewFieldName(\"SidebarVisible\", \"sidebar_visible\")) {\n\t\treturn *new(bool)\n\t}\n\treturn val.(bool)\n}", "func (Path_HiddenReason) EnumDescriptor() ([]byte, []int) {\n\treturn file_route_api_route_proto_rawDescGZIP(), []int{1, 1}\n}", "func PossibleServerPublicNetworkAccessStateValues() []ServerPublicNetworkAccessState {\n return []ServerPublicNetworkAccessState{ServerPublicNetworkAccessStateDisabled,ServerPublicNetworkAccessStateEnabled}\n }", "func (pl PredicateList) Privates() PredicateList {\n\treturn pl.filter(Predicate.IsPrivate)\n}", "func TakeBool(n int, list []bool) []bool {\n\tif n < 0 {\n\t\treturn []bool{}\n\t}\n\n\tnewListLen := len(list)\n\n\tif n < newListLen {\n\t\tnewListLen = n\n\t}\n\tnewList := make([]bool, newListLen)\n\tfor i := 0; i < newListLen; i++ {\n\t\tnewList[i] = list[i]\n\t}\n\treturn newList\n}", "func IsHidden(file string) (bool, error) {\n\tpointer, err := syscall.UTF16PtrFromString(file)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tattributes, err := syscall.GetFileAttributes(pointer)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn attributes&syscall.FILE_ATTRIBUTE_HIDDEN != 0, nil\n}", "func (me TxsdPresentationAttributesGraphicsPointerEvents) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func (opts Options) IsNotOkListable() bool {\n\tif *opts.ok || *opts.notOk {\n\t\treturn *opts.notOk\n\t}\n\n\treturn true\n}", "func (ref *UIElement) VisibleRows() []*UIElement {\n\treturn ref.SliceOfUIElementAttr(VisibleRowsAttribute)\n}", "func UserHiddenCommentTypesFromRequest(ctx *context.Context) *big.Int {\n\tbitset := new(big.Int)\n\tfor group, commentTypes := range hiddenCommentTypeGroups {\n\t\tif ctx.FormBool(group) {\n\t\t\tfor _, commentType := range commentTypes {\n\t\t\t\tbitset = bitset.SetBit(bitset, int(commentType), 1)\n\t\t\t}\n\t\t}\n\t}\n\treturn bitset\n}", "func (f *FlagSet) HasAvailableFlags() bool {\n\tfor _, flag := range f.formal {\n\t\tif !flag.Hidden {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o GoogleCloudRetailV2alphaSearchRequestFacetSpecPtrOutput) ExcludedFilterKeys() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *GoogleCloudRetailV2alphaSearchRequestFacetSpec) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ExcludedFilterKeys\n\t}).(pulumi.StringArrayOutput)\n}", "func (*CMsgWasHidden) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{24}\n}" ]
[ "0.7877598", "0.62979424", "0.6034252", "0.5960518", "0.56332713", "0.55720407", "0.5533088", "0.54752606", "0.544442", "0.5408661", "0.5385666", "0.53385854", "0.5321876", "0.53136355", "0.53136355", "0.52708465", "0.52653545", "0.5263343", "0.52615714", "0.5245738", "0.5219789", "0.5214525", "0.5211894", "0.5189446", "0.5188787", "0.51868016", "0.51816505", "0.51781046", "0.51469797", "0.5143164", "0.50740004", "0.50594014", "0.5033228", "0.50273705", "0.50219405", "0.5017544", "0.5003379", "0.5000065", "0.49963272", "0.49958172", "0.49946296", "0.49787474", "0.49686947", "0.49440166", "0.49303645", "0.4905986", "0.4888163", "0.48852497", "0.4881004", "0.48797685", "0.48788196", "0.48556393", "0.48552546", "0.48520002", "0.48350722", "0.48325318", "0.48288307", "0.48236996", "0.47988236", "0.47908267", "0.47831813", "0.47815654", "0.47797725", "0.47765282", "0.47742918", "0.47315472", "0.47284207", "0.4717014", "0.47156653", "0.47119358", "0.47113845", "0.46989462", "0.46982503", "0.46961743", "0.46949545", "0.46949545", "0.46795917", "0.4675578", "0.4663845", "0.4662566", "0.4643587", "0.46417195", "0.46263048", "0.46261176", "0.46207476", "0.46185714", "0.4614383", "0.4614241", "0.46011558", "0.46008176", "0.45905754", "0.4578221", "0.45750073", "0.4574464", "0.4573915", "0.45698795", "0.45684955", "0.4565954", "0.4542665", "0.45412314" ]
0.79325193
0
Root returns the Command at the root of the graph
func (cmd *Command) Root() *Command { if cmd.parent == nil { return cmd } return cmd.parent.Root() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Root(appName, version string) (opt *RootCmdOpt) {\n\troot := &RootCommand{AppName: appName, Version: version, Command: Command{BaseOpt: BaseOpt{Name: appName}}}\n\t// rootCommand = root\n\topt = RootFrom(root)\n\treturn\n}", "func Root(appName, version string) (opt *RootCmdOpt) {\n\troot := &RootCommand{AppName: appName, Version: version, Command: Command{BaseOpt: BaseOpt{Name: appName}}}\n\t// rootCommand = root\n\topt = RootFrom(root)\n\treturn\n}", "func GetRootCommand() *cobra.Command { return rootCmd }", "func Root() *cobra.Command {\n\troot := cmd.Skeleton(\"flanders\", \"Do container-ey and Kubernetes-ey things with less fuss\")\n\tvar debug bool\n\tflags := root.PersistentFlags()\n\tflags.BoolVarP(&debug, \"debug\", \"d\", false, \"Turn on debug logging\")\n\tctx := cmd.NewContext(context.Background(), func() bool {\n\t\treturn debug\n\t})\n\n\troot.AddCommand(chartmuseum.Root(ctx))\n\troot.AddCommand(censusai.Root(ctx))\n\troot.AddCommand(docker.Root(ctx))\n\troot.AddCommand(ingress.Root(ctx))\n\troot.AddCommand(athens.Root(ctx))\n\troot.AddCommand(advanced.Root(ctx))\n\treturn root\n}", "func getRootCmd() *cobra.Command {\n\treturn rootCmd\n}", "func RootCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"deploy\",\n\t\tShort: \"Akash deploy tool commands\",\n\t\tSilenceUsage: true,\n\t}\n\n\tcmd.PersistentFlags().String(flags.FlagNode, \"http://localhost:26657\", \"The node address\")\n\tif err := viper.BindPFlag(flags.FlagNode, cmd.PersistentFlags().Lookup(flags.FlagNode)); err != nil {\n\t\treturn nil\n\t}\n\n\tcmd.AddCommand(createCmd())\n\n\treturn cmd\n}", "func NewRootCommand(parent cmd.Registerer, g *global.Data) *RootCommand {\n\tvar c RootCommand\n\tc.Globals = g\n\tc.CmdClause = parent.Command(\"stats\", \"View historical and realtime statistics for a Fastly service\")\n\treturn &c\n}", "func RootCommand() *cobra.Command {\n\trootCmd.PersistentFlags().StringP(\"config\", \"c\", \"\", \"the config file to use\")\n\trootCmd.Flags().IntP(\"port\", \"p\", 0, \"the port to use\")\n\n\trootCmd.AddCommand(&versionCmd)\n\n\treturn &rootCmd\n}", "func main() {\n\terr := cmd.RootCmd.Execute()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func NewRootCommand(parent common.Registerer, globals *config.Data) *RootCommand {\n\tvar c RootCommand\n\tc.Globals = globals\n\tc.CmdClause = parent.Command(\"stats\", \"View statistics (historical and realtime) for a Fastly service\")\n\treturn &c\n}", "func main() {\n\tcmd.Root().Execute()\n}", "func DefaultRootCommand() *cobra.Command {\n\n\tc := &cobra.Command{\n\t\tUse: \"abstrakt\",\n\t\tShort: \"Scalable, config driven data pipelines for Kubernetes.\",\n\t\tLong: \"Scalable, config driven data pipelines for Kubernetes.\",\n\t}\n\n\tc.PersistentPreRunE = func(cmd *cobra.Command, args []string) (err error) {\n\t\tverbose := cmd.Flag(\"verbose\").Value.String()\n\n\t\tif verbose == \"true\" {\n\t\t\tlogger.SetLevelDebug()\n\t\t} else {\n\t\t\tlogger.SetLevelInfo()\n\t\t}\n\n\t\treturn nil\n\t}\n\n\taddCommands(c, newComposeCmd(), newVersionCmd(), newVisualiseCmd(), newValidateCmd(), newDiffCmd())\n\n\treturn c\n}", "func Execute() error { return rootCmd.Execute() }", "func (g *Graph) Root() *Node {\n\treturn g.Nodes[0]\n}", "func RootCmd() error {\n\tconfigFile := flag.String(\"config\", defaultConfigFile, \"provide path to json configuration file\")\n\tflag.Parse()\n\terr := config.LoadApplication(*configFile)\n\tif err != nil {\n\t\tfmt.Println(\"error in loading application = \", err)\n\t}\n\tr := router.NewRouter()\n\terr = http.ListenAndServe(\":8080\", r)\n\tif err != nil {\n\t\tconfig.AppLogger.ErrorLogger.Fatal(\"Error occured while listening :\", err)\n\t\t//fmt.Println(\"Error :\", err)\n\t}\n\treturn nil\n}", "func RootCmd() *cobra.Command {\n\tman, err := helper.GetManual(\"root\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: man.Use,\n\t\tShort: man.Short,\n\t\tLong: man.Long,\n\t}\n\n\t// Here you will define your flags and configuration settings.\n\t// Cobra supports persistent flags, which, if defined here will be global for your application.\n\tcmd.PersistentFlags().StringVarP(&profile, \"profile\", \"p\", \"default\", \"Use a specific profile from your credentials and configurations file.\")\n\n\t// TODO: allow users to pass their prefer location for clencli's configurations directory\n\n\treturn cmd\n}", "func (t *Operator) Root() Type {\n\treturn t\n}", "func (r *RootCmd) Execute() error {\n\treturn r.c.Execute()\n}", "func rootCommand(version string) *cobra.Command {\n\trootCommand := &cobra.Command{\n\t\tUse: \"refreturn <PATH>\",\n\t\tVersion: version,\n\t\tShort: shortHelp,\n\t\tLong: longHelp,\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tpath := args[0]\n\t\t\treturn Run(path)\n\t\t},\n\t}\n\n\treturn rootCommand\n}", "func NewRootCommand() *cobra.Command {\n\n\tcmd := &cobra.Command{\n\t\tUse: \"katib-cli\",\n\t\tShort: \"katib cli\",\n\t\tLong: `This is katib cli client using cobra framework`,\n\t}\n\n\t//initialize config\n\tinitFlag(cmd)\n\n\t//add command\n\tcmd.AddCommand(NewCommandCreate())\n\tcmd.AddCommand(NewCommandGet())\n\tcmd.AddCommand(NewCommandStop())\n\tcmd.AddCommand(NewCommandPush())\n\tcmd.AddCommand(NewCommandPull())\n\n\t//\tcmd.AddCommand(NewCommandModel())\n\n\t//MISC\n\t//cmd.AddCommand(NewCommandVersion())\n\n\t//Generate bash completion file\n\t//cmd.AddCommand(NewCommandBashCmp())\n\n\treturn cmd\n}", "func main() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}", "func (obj *language) Root() string {\n\treturn obj.root\n}", "func RootCmd(version string) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"manala\",\n\t\tShort: \"Let your project's plumbing up to date\",\n\t\tLong: `Manala synchronize some boring parts of your projects,\nsuch as makefile targets, virtualization and provisioning files...\n\nRecipes are pulled from a git repository, or a local directory.`,\n\t\tSilenceErrors: true,\n\t\tSilenceUsage: true,\n\t\tVersion: version,\n\t\tDisableAutoGenTag: true,\n\t}\n\n\tcmd.PersistentFlags().StringP(\"cache-dir\", \"c\", viper.GetString(\"cache_dir\"), \"cache directory\")\n\t_ = viper.BindPFlag(\"cache_dir\", cmd.PersistentFlags().Lookup(\"cache-dir\"))\n\n\tcmd.PersistentFlags().BoolP(\"debug\", \"d\", viper.GetBool(\"debug\"), \"debug mode\")\n\t_ = viper.BindPFlag(\"debug\", cmd.PersistentFlags().Lookup(\"debug\"))\n\n\treturn cmd\n}", "func NewRootCommand(c2ClientFactory cli.APIClientFactory) cli.Command {\n\ttopicListCommand := NewListCommand(c2ClientFactory)\n\ttopicCreateCommand := NewCreateCommand(c2ClientFactory)\n\ttopicRemoveCommand := NewRemoveCommand(c2ClientFactory)\n\tlistClientsCommand := NewListClientsCommand(c2ClientFactory)\n\n\tcmd := &rootCommand{}\n\tcobraCmd := &cobra.Command{\n\t\tUse: \"topic\",\n\t\tShort: \"group commands to interact with c2 topics\",\n\t}\n\n\tcobraCmd.AddCommand(\n\t\ttopicListCommand.CobraCmd(),\n\t\ttopicCreateCommand.CobraCmd(),\n\t\ttopicRemoveCommand.CobraCmd(),\n\t\tlistClientsCommand.CobraCmd(),\n\t)\n\n\tcmd.cobraCmd = cobraCmd\n\n\treturn cmd\n}", "func RootCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"passgen\",\n\t\tShort: \"XKCD inspired CLI password generator\",\n\t\tLong: `A command line tool for generating\npasswords inspired by an XKCD commic:\nhttps://xkcd.com/936/`,\n\t}\n\tcmd.AddCommand(\n\t\tgenerateCmd(),\n\t)\n\treturn cmd\n}", "func RootCommand() *cobra.Command {\n\trootCmd := cobra.Command{\n\t\tUse: \"example\",\n\t\tRun: run,\n\t}\n\n\t// This is where we will configure everything!\n\trootCmd.Flags().IntP(\"port\", \"p\", 0, \"the port to do things on\")\n\n\treturn &rootCmd\n}", "func NewRootCmd() (*cobra.Command, app.EncodingConfig) {\n\tencodingConfig := app.MakeEncodingConfig()\n\n\tconfig := sdk.GetConfig()\n\tconfig.SetBech32PrefixForAccount(app.Bech32PrefixAccAddr, app.Bech32PrefixAccPub)\n\tconfig.SetBech32PrefixForValidator(app.Bech32PrefixValAddr, app.Bech32PrefixValPub)\n\tconfig.SetBech32PrefixForConsensusNode(app.Bech32PrefixConsAddr, app.Bech32PrefixConsPub)\n\tconfig.Seal()\n\n\tinitClientCtx := client.Context{}.\n\t\tWithJSONMarshaler(encodingConfig.Marshaler).\n\t\tWithInterfaceRegistry(encodingConfig.InterfaceRegistry).\n\t\tWithTxConfig(encodingConfig.TxConfig).\n\t\tWithLegacyAmino(encodingConfig.Amino).\n\t\tWithInput(os.Stdin).\n\t\tWithAccountRetriever(authtypes.AccountRetriever{}).\n\t\tWithBroadcastMode(flags.BroadcastBlock).\n\t\tWithHomeDir(app.DefaultNodeHome)\n\n\trootCmd := &cobra.Command{\n\t\tUse: version.AppName,\n\t\tShort: \"Orai Daemon (server)\",\n\t\tPersistentPreRunE: func(cmd *cobra.Command, _ []string) error {\n\t\t\tif err := client.SetCmdClientContextHandler(initClientCtx, cmd); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn server.InterceptConfigsPreRunHandler(cmd)\n\t\t},\n\t}\n\n\tinitRootCmd(rootCmd, encodingConfig)\n\n\treturn rootCmd, encodingConfig\n}", "func NewRootCmd() *cobra.Command {\r\n\treturn rootCmd\r\n}", "func NewRootCmd(version string) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"analyzer\",\n\t\tVersion: version,\n\t}\n\n\tcmd.AddCommand(NewAllCmd())\n\tcmd.AddCommand(NewUsersCmd())\n\tcmd.AddCommand(NewReposCmd())\n\n\treturn cmd\n}", "func NewRootCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"keto\",\n\t\tShort: \"Global and consistent permission and authorization server\",\n\t}\n\n\tconfigx.RegisterConfigFlag(cmd.PersistentFlags(), []string{filepath.Join(userHomeDir(), \"keto.yml\")})\n\n\trelationtuple.RegisterCommandsRecursive(cmd)\n\tnamespace.RegisterCommandsRecursive(cmd)\n\tmigrate.RegisterCommandsRecursive(cmd)\n\tserver.RegisterCommandsRecursive(cmd)\n\tcheck.RegisterCommandsRecursive(cmd)\n\texpand.RegisterCommandsRecursive(cmd)\n\tstatus.RegisterCommandRecursive(cmd)\n\n\tcmd.AddCommand(cmdx.Version(&config.Version, &config.Commit, &config.Date))\n\n\treturn cmd\n}", "func NewRootCmd(sl service.CommandServicer) (*cobra.Command, error) {\n\trootCmd := &cobra.Command{\n\t\tUse: \"versions\",\n\t\tShort: \"a group of actions for working with versions\",\n\t\tTraverseChildren: true,\n\t}\n\n\tcmdFuncs := []func(locator service.CommandServicer) (*cobra.Command, error){\n\t\tnewListCommand,\n\t\tnewShowCommand,\n\t\tnewPutCommand,\n\t}\n\n\tfor _, f := range cmdFuncs {\n\t\tcmd, err := f(sl)\n\t\tif err != nil {\n\t\t\treturn rootCmd, err\n\t\t}\n\t\trootCmd.AddCommand(cmd)\n\t}\n\n\treturn rootCmd, nil\n}", "func (fs *FS) Root() (fspkg.Node, error) {\n\tte, ok := fs.r.Lookup(\"\")\n\tif !ok {\n\t\treturn nil, errors.New(\"failed to find root in stargz\")\n\t}\n\treturn &node{fs, te}, nil\n}", "func NewCmdRoot(outWriter, errWriter io.Writer) (*cobra.Command, *RootOptions) {\n\toptions := &RootOptions{}\n\n\tcmd := &cobra.Command{\n\t\tSilenceUsage: true,\n\t\tSilenceErrors: true,\n\t\tVersion: version.String(),\n\t}\n\n\tcmd.SetOut(outWriter)\n\tcmd.SetErr(errWriter)\n\n\t// GateClient Flags\n\tcmd.PersistentFlags().StringVar(&options.configPath, \"config\", \"\", \"path to config file (default $HOME/.spin/config)\")\n\tcmd.PersistentFlags().StringVar(&options.gateEndpoint, \"gate-endpoint\", \"\", \"Gate (API server) endpoint (default http://localhost:8084)\")\n\tcmd.PersistentFlags().BoolVarP(&options.ignoreCertErrors, \"insecure\", \"k\", false, \"ignore certificate errors\")\n\tcmd.PersistentFlags().BoolVarP(&options.ignoreRedirects, \"ignore-redirects\", \"\", false, \"ignore redirects\")\n\tcmd.PersistentFlags().StringVar(&options.defaultHeaders, \"default-headers\", \"\", \"configure default headers for gate client as comma separated list (e.g. key1=value1,key2=value2)\")\n\tcmd.PersistentFlags().IntVar(&options.retryTimeout, \"retry-timeout\", 0, \"maximum time to wait for tasks to complete in seconds (default 60)\")\n\n\t// UI Flags\n\tcmd.PersistentFlags().BoolVarP(&options.quiet, \"quiet\", \"q\", false, \"squelch non-essential output\")\n\tcmd.PersistentFlags().BoolVar(&options.color, \"no-color\", true, \"disable color\")\n\tcmd.PersistentFlags().StringVarP(&options.outputFormat, \"output\", \"o\", \"\", \"configure output formatting\")\n\n\t// Initialize UI & GateClient\n\toutw := outWriter\n\terrw := errWriter\n\tcmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {\n\t\toutputFormater, err := output.ParseOutputFormat(options.outputFormat)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toptions.Ui = output.NewUI(options.quiet, options.color, outputFormater, outw, errw)\n\n\t\tgateClient, err := gateclient.NewGateClient(\n\t\t\toptions.Ui,\n\t\t\toptions.gateEndpoint,\n\t\t\toptions.defaultHeaders,\n\t\t\toptions.configPath,\n\t\t\toptions.ignoreCertErrors,\n\t\t\toptions.ignoreRedirects,\n\t\t\toptions.retryTimeout,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toptions.GateClient = gateClient\n\n\t\treturn nil\n\t}\n\n\treturn cmd, options\n}", "func RootCmd() *cobra.Command {\n\trootCmd := &cobra.Command{\n\t\tUse: \"capi-yaml\",\n\t\tShort: \"Devtool to help with YAML for CAPI and CAPI providers\",\n\t\tLong: \"Devtool to help with YAML for CAPI and CAPI providers\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tif err := cmd.Help(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\trootCmd.AddCommand(getGenerateCommand())\n\n\treturn rootCmd\n}", "func RootCommand() *cobra.Command {\n\trootCmd := cobra.Command{\n\t\tUse: \"exemplar\",\n\t\tLong: `exemplar is a golang boilerplate for LambdaTest microservices`,\n\t\tVersion: global.BINARY_VERSION,\n\t\tRun: run,\n\t}\n\n\t// define flags used for this command\n\tAttachCLIFlags(&rootCmd)\n\n\treturn &rootCmd\n}", "func RootCmdName() string {\n\treturn path.Base(os.Args[0])\n}", "func rootCmd() *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"ipv\",\n\t\tShort: \"IPVanish CLI utility\",\n\t\tLong: utils.HelpText(`IPVanish is a VPN provider\n\t\tThis command lists the servers and connects to the\n\t\tselected server in a particular country.\n\t\tComplete documentation is available at http://ipvanish.com/.`),\n\t\tVersion: \"0.1\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\t_ = cmd.Help()\n\t\t},\n\t}\n}", "func (dfs *DaosFileSystem) Root() *DaosNode {\n\treturn dfs.root\n}", "func (s *Session) Root() Rootable {\n\n\treturn s.root\n}", "func NewRootCmd(args []string) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"cain\",\n\t\tShort: \"\",\n\t\tLong: ``,\n\t}\n\n\tout := cmd.OutOrStdout()\n\n\tcmd.AddCommand(NewBackupCmd(out))\n\tcmd.AddCommand(NewRestoreCmd(out))\n\tcmd.AddCommand(NewSchemaCmd(out))\n\n\treturn cmd\n}", "func Execute() {\n if err := rootCmd.Execute(); err != nil {\n fmt.Println(err)\n os.Exit(1)\n }\n}", "func Execute() {\n if err := rootCmd.Execute(); err != nil {\n fmt.Println(err)\n os.Exit(1)\n }\n}", "func Execute() {\n if err := rootCmd.Execute(); err != nil {\n fmt.Println(err)\n os.Exit(1)\n }\n}", "func Root() *Descriptor {\n\treturn &root\n}", "func (r *RPCClient) Root() (t *RPCClientRoot) {\n\treturn &RPCClientRoot{r}\n}", "func NewRootCommand(name string) *appcmd.Command {\n\tbuilder := appflag.NewBuilder(\n\t\tname,\n\t\tappflag.BuilderWithTimeout(120*time.Second),\n\t\tappflag.BuilderWithTracing(),\n\t)\n\tglobalFlags := bufcli.NewGlobalFlags()\n\treturn &appcmd.Command{\n\t\tUse: name,\n\t\tVersion: bufcli.Version,\n\t\tBindPersistentFlags: appcmd.BindMultiple(builder.BindRoot, globalFlags.BindRoot),\n\t\tSubCommands: []*appcmd.Command{\n\t\t\tbuild.NewCommand(\"build\", builder),\n\t\t\texport.NewCommand(\"export\", builder),\n\t\t\tlint.NewCommand(\"lint\", builder),\n\t\t\tbreaking.NewCommand(\"breaking\", builder),\n\t\t\tgenerate.NewCommand(\"generate\", builder),\n\t\t\tprotoc.NewCommand(\"protoc\", builder),\n\t\t\tlsfiles.NewCommand(\"ls-files\", builder),\n\t\t\tpush.NewCommand(\"push\", builder),\n\t\t\t{\n\t\t\t\tUse: \"mod\",\n\t\t\t\tShort: \"Configure and update buf modules.\",\n\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\tappcmd.NewDeletedCommand(\"init\", modInitDeprecationMessage),\n\t\t\t\t\tmodprune.NewCommand(\"prune\", builder),\n\t\t\t\t\tmodupdate.NewCommand(\"update\", builder),\n\t\t\t\t\tmodclearcache.NewCommand(\"clear-cache\", builder, \"cc\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUse: \"config\",\n\t\t\t\tShort: \"Interact with the configuration of Buf.\",\n\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\tconfiginit.NewCommand(\"init\", builder),\n\t\t\t\t\tconfiglslintrules.NewCommand(\"ls-lint-rules\", builder),\n\t\t\t\t\tconfiglsbreakingrules.NewCommand(\"ls-breaking-rules\", builder),\n\t\t\t\t\tconfigmigratev1beta1.NewCommand(\"migrate-v1beta1\", builder),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUse: \"registry\",\n\t\t\t\tShort: \"Interact with the Buf Schema Registry.\",\n\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\tregistrylogin.NewCommand(\"login\", builder),\n\t\t\t\t\tregistrylogout.NewCommand(\"logout\", builder),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUse: \"beta\",\n\t\t\t\tShort: \"Beta commands. Unstable and will likely change.\",\n\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t{\n\t\t\t\t\t\tUse: \"registry\",\n\t\t\t\t\t\tShort: \"Interact with the Buf Schema Registry.\",\n\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tUse: \"organization\",\n\t\t\t\t\t\t\t\tShort: \"Organization commands.\",\n\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\torganizationcreate.NewCommand(\"create\", builder),\n\t\t\t\t\t\t\t\t\torganizationget.NewCommand(\"get\", builder),\n\t\t\t\t\t\t\t\t\torganizationdelete.NewCommand(\"delete\", builder),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tUse: \"repository\",\n\t\t\t\t\t\t\t\tShort: \"Repository commands.\",\n\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\trepositorycreate.NewCommand(\"create\", builder),\n\t\t\t\t\t\t\t\t\trepositoryget.NewCommand(\"get\", builder),\n\t\t\t\t\t\t\t\t\trepositorylist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t\t\trepositorydelete.NewCommand(\"delete\", builder),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t//{\n\t\t\t\t\t\t\t//\tUse: \"branch\",\n\t\t\t\t\t\t\t//\tShort: \"Repository branch commands.\",\n\t\t\t\t\t\t\t//\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t//\t\tbranchcreate.NewCommand(\"create\", builder),\n\t\t\t\t\t\t\t//\t\tbranchlist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t//\t},\n\t\t\t\t\t\t\t//},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tUse: \"tag\",\n\t\t\t\t\t\t\t\tShort: \"Repository tag commands.\",\n\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\ttagcreate.NewCommand(\"create\", builder),\n\t\t\t\t\t\t\t\t\ttaglist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tUse: \"commit\",\n\t\t\t\t\t\t\t\tShort: \"Repository commit commands.\",\n\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\tcommitget.NewCommand(\"get\", builder),\n\t\t\t\t\t\t\t\t\tcommitlist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tUse: \"plugin\",\n\t\t\t\t\t\t\t\tShort: \"Plugin commands.\",\n\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\tplugincreate.NewCommand(\"create\", builder),\n\t\t\t\t\t\t\t\t\tpluginlist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t\t\tplugindelete.NewCommand(\"delete\", builder),\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tUse: \"version\",\n\t\t\t\t\t\t\t\t\t\tShort: \"Plugin version commands.\",\n\t\t\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\t\t\tpluginversionlist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tUse: \"template\",\n\t\t\t\t\t\t\t\tShort: \"Template commands.\",\n\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\ttemplatecreate.NewCommand(\"create\", builder),\n\t\t\t\t\t\t\t\t\ttemplatelist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t\t\ttemplatedelete.NewCommand(\"delete\", builder),\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tUse: \"version\",\n\t\t\t\t\t\t\t\t\t\tShort: \"Template version commands.\",\n\t\t\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\t\t\ttemplateversioncreate.NewCommand(\"create\", builder),\n\t\t\t\t\t\t\t\t\t\t\ttemplateversionlist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tUse: \"config\",\n\t\t\t\t\t\tShort: \"Interact with the configuration of Buf.\",\n\t\t\t\t\t\tDeprecated: betaConfigDeprecationMessage,\n\t\t\t\t\t\tHidden: true,\n\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\tappcmd.NewDeletedCommand(\"init\", betaConfigInitDeprecationMessage),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tUse: \"image\",\n\t\t\t\t\t\tShort: \"Work with Images and FileDescriptorSets.\",\n\t\t\t\t\t\tDeprecated: imageDeprecationMessage,\n\t\t\t\t\t\tHidden: true,\n\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\tappcmd.NewDeletedCommand(\"convert\", betaImageConvertDeprecationMessage),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tUse: \"mod\",\n\t\t\t\t\t\tShort: \"Configure and update buf modules.\",\n\t\t\t\t\t\tDeprecated: betaModDeprecationMessage,\n\t\t\t\t\t\tHidden: true,\n\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\tappcmd.NewDeletedCommand(\"init\", betaModInitDeprecationMessage),\n\t\t\t\t\t\t\tappcmd.NewDeletedCommand(\"update\", betaModUpdateDeprecationMessage),\n\t\t\t\t\t\t\tappcmd.NewDeletedCommand(\"export\", betaModExportDeprecationMessage),\n\t\t\t\t\t\t\tappcmd.NewDeletedCommand(\"clear-cache\", betaModClearCacheDeprecationMessage, \"cc\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tappcmd.NewDeletedCommand(\"push\", betaPushDeprecationMessage),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUse: \"alpha\",\n\t\t\t\tShort: \"Alpha commands. These are so early in development that they should not be used except in development.\",\n\t\t\t\tHidden: true,\n\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t{\n\t\t\t\t\t\tUse: \"registry\",\n\t\t\t\t\t\tShort: \"Interact with the Buf Schema Registry.\",\n\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tUse: \"token\",\n\t\t\t\t\t\t\t\tShort: \"Token commands.\",\n\t\t\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\t\t\ttokencreate.NewCommand(\"create\", builder),\n\t\t\t\t\t\t\t\t\ttokenget.NewCommand(\"get\", builder),\n\t\t\t\t\t\t\t\t\ttokenlist.NewCommand(\"list\", builder),\n\t\t\t\t\t\t\t\t\ttokendelete.NewCommand(\"delete\", builder),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tappcmd.NewDeletedCommand(\"login\", loginDeprecationMessage),\n\t\t\tappcmd.NewDeletedCommand(\"logout\", logoutDeprecationMessage),\n\t\t\tappcmd.NewDeletedCommand(\"push\", pushDeprecationMessage),\n\t\t\t{\n\t\t\t\tUse: \"image\",\n\t\t\t\tShort: \"Work with Images and FileDescriptorSets.\",\n\t\t\t\tDeprecated: imageDeprecationMessage,\n\t\t\t\tHidden: true,\n\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\tappcmd.NewDeletedCommand(\"build\", imageBuildDeprecationMessage),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUse: \"check\",\n\t\t\t\tShort: \"Run linting or breaking change detection.\",\n\t\t\t\tDeprecated: checkDeprecationMessage,\n\t\t\t\tHidden: true,\n\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\tappcmd.NewDeletedCommand(\"lint\", checkLintDeprecationMessage),\n\t\t\t\t\tappcmd.NewDeletedCommand(\"breaking\", checkBreakingDeprecationMessage),\n\t\t\t\t\tappcmd.NewDeletedCommand(\"ls-lint-checkers\", checkLsLintCheckersDeprecationMessage),\n\t\t\t\t\tappcmd.NewDeletedCommand(\"ls-breaking-checkers\", checkLsBreakingCheckersDeprecationMessage),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUse: \"experimental\",\n\t\t\t\tShort: \"Experimental commands. Unstable and will likely change.\",\n\t\t\t\tDeprecated: experimentalDeprecationMessage,\n\t\t\t\tHidden: true,\n\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t{\n\t\t\t\t\t\tUse: \"image\",\n\t\t\t\t\t\tShort: \"Work with Images and FileDescriptorSets.\",\n\t\t\t\t\t\tDeprecated: imageDeprecationMessage,\n\t\t\t\t\t\tHidden: true,\n\t\t\t\t\t\tSubCommands: []*appcmd.Command{\n\t\t\t\t\t\t\tappcmd.NewDeletedCommand(\"convert\", experimentalImageConvertDeprecationMessage),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func NewCmdRoot(in io.Reader, out, err io.Writer) (cmd *cobra.Command) {\n\t// Create CLI\n\tcmd = &cobra.Command{\n\t\tUse: \"rabdis\",\n\t\tShort: \"rabdis is a program that help to delete Redis keys from RabbitMQ messages\",\n\t\tVersion: ver.Version,\n\t\tRun: RunCmdRoot,\n\t}\n\n\tcmd.AddCommand(version.NewCmdVersion(in, out, err))\n\n\treturn\n}", "func RootCMD() *cobra.Command {\n\tviperSetup, v := aduuapp.SetupViper(\"mylsm\", &aduuapp.SetupViperConfig{})\n\t_ = v\n\n\tcmd := &cobra.Command{\n\t\tUse: \"mylsm\",\n\t\tShort: \"runs its own lsm\",\n\t\tSilenceUsage: true,\n\t}\n\n\tviperSetup.SetupFlags(cmd, &aduuapp.SetupFlagsConfig{})\n\n\tcmd.RunE = func(cmd *cobra.Command, args []string) (err error) {\n\t\tif viperSetup.IsWriteConfigSet() {\n\t\t\treturn viperSetup.WriteConfig()\n\t\t}\n\n\t\treturn run()\n\t}\n\tcmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) (err error) {\n\t\treturn nil\n\t}\n\n\tcmd.SetOut(os.Stdout)\n\tcmd.SetErr(os.Stderr)\n\tcmd.AddCommand()\n\treturn cmd\n}", "func (t *tree) Root() *node {\n\treturn t.root\n}", "func getRootCmd(env map[string]string) *cobra.Command {\n\tin := runInput{}\n\troot := &cobra.Command{\n\t\tUse: \"sonolark\",\n\t\tShort: \"Sonolark is a tool which allows users to easily build scripts using the Starlark language on top of our library of useful functions including assertions, Kubernetes API access, and more.\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\n\t\t\tthread := &starlark.Thread{}\n\t\t\tshared.SetGoCtx(thread, context.Background())\n\n\t\t\t// Automatically start/end suite.\n\t\t\tsonobuoy.StartSuite(thread, -1)\n\t\t\tdefer sonobuoy.Done(thread)\n\n\t\t\tpredeclared, err := getLibraryFuncs(in.KubeConfigPath, env)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_, err = starlark.ExecFile(thread, in.Filename, nil, *predeclared)\n\t\t\tif err != nil {\n\t\t\t\tif evalErr, ok := err.(*starlark.EvalError); ok {\n\t\t\t\t\tsonobuoy.FailTest(thread, evalErr.Backtrace())\n\t\t\t\t\treturn errors.New(evalErr.Backtrace())\n\t\t\t\t}\n\t\t\t\tsonobuoy.FailTest(thread, err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\troot.Flags().StringVarP(&in.Filename, \"file\", \"f\", getDefaultScriptName(env), \"The name of the script to run\")\n\troot.Flags().Var(&in.LogLevel, \"level\", \"The Log level. One of {panic, fatal, error, warn, info, debug, trace}\")\n\tif home := homedir.HomeDir(); home != \"\" {\n\t\troot.Flags().StringVar(&in.KubeConfigPath, \"kubeconfig\", filepath.Join(home, \".kube\", \"config\"), \"(optional) absolute path to the kubeconfig file\")\n\t} else {\n\t\troot.Flags().StringVar(&in.KubeConfigPath, \"kubeconfig\", \"\", \"absolute path to the kubeconfig file\")\n\t}\n\n\troot.AddCommand(NewCmdVersion())\n\treturn root\n}", "func (fs *FS) Root() (fs.Node, error) {\n\tfs.μ.RLock()\n\tdefer fs.μ.RUnlock()\n\treturn fs.rnode, nil\n}", "func RootFrom(root *RootCommand) (opt *RootCmdOpt) {\n\toptCtx = &optContext{current: &root.Command, root: root, workingFlag: nil}\n\n\topt = &RootCmdOpt{optCommandImpl: optCommandImpl{working: optCtx.current}}\n\topt.parent = opt\n\treturn\n}", "func RootFrom(root *RootCommand) (opt *RootCmdOpt) {\n\toptCtx = &optContext{current: &root.Command, root: root, workingFlag: nil}\n\n\topt = &RootCmdOpt{optCommandImpl: optCommandImpl{working: optCtx.current}}\n\topt.parent = opt\n\treturn\n}", "func (r *router) Root() *OpenAPI {\n\treturn r.root\n}", "func NewRootCommand(clients *pkg.Clients) *cobra.Command {\n\tresult := cobra.Command{\n\t\tUse: \"kn-vsphere\",\n\t\tShort: \"Knative plugin to create Knative compatible Event Sources for VSphere events,\\nand Bindings to access the vSphere API\",\n\t}\n\tresult.AddCommand(NewLoginCommand(clients))\n\tresult.AddCommand(NewSourceCommand(clients))\n\tresult.AddCommand(NewBindingCommand(clients))\n\tresult.AddCommand(NewVersionCommand())\n\treturn &result\n}", "func (agent *MerkleAgent) Root() []byte {\n\treturn agent.root\n}", "func Execute() {\n\tif err := RootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func (l *Loader) Root() *ecsgen.Root {\n\treturn l.root\n}", "func (r *Root) Root() (fs.Node, error) {\n\tdebug.Log(\"Root()\")\n\treturn r, nil\n}", "func (db *Database) Root() *Group {\n\treturn db.root\n}", "func NewRootCmd() *cobra.Command {\n\treturn rootCmd\n}", "func (w *RootWalker) Root() *Root {\n\treturn w.r\n}", "func (n *NodeBuilder) Root() *TagNode {\n\treturn n.root\n}", "func (t *Tree) Root() *node {\n\treturn t.root\n}", "func (c *Client) GetRoot() (*RootOutput, *ErrorOutput) {\n\toutput := new(RootOutput)\n\terrorOutput := c.sendAPIRequest(\"GET\", \"\", nil, output)\n\treturn output, errorOutput\n}", "func NewRootCommand() *cobra.Command {\n\n\tvar rootCmd = &cobra.Command{\n\t\tUse: \"kn-sample\",\n\t\tShort: \"Sample kn plugin printing out a nice message\",\n\t\tLong: `Longer description of this fantastic plugin that can go over several lines.`,\n\t}\n\n\trootCmd.AddCommand(command.NewPrintCommand())\n\trootCmd.AddCommand(command.NewVersionCommand())\n\n\treturn rootCmd\n}", "func (c *Container) Root() *Root {\n\treturn c.root\n}", "func (k *KeyTransactions) Root() []byte {\n\treturn k.root\n}", "func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func Execute() error {\n\treturn RootCmd.Execute()\n}", "func NewRootCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"hydra\",\n\t\tShort: \"Run and manage ORY Hydra\",\n\t}\n\tRegisterCommandRecursive(cmd)\n\treturn cmd\n}", "func (t *Tree) Root() *TreeNode {\n\treturn t.root\n}", "func Root() *cobra.Command {\n\tvar verbosity string\n\tcmd := &cobra.Command{\n\t\tUse: \"zipspy\",\n\t\tShort: \"Interface with remote ZIP archives\",\n\t\tLong: ` \n ____ __ ____ ____ ____ _ _ \n(__ )( )( _ \\/ ___)( _ \\( \\/ )\n / _/ )( ) __/\\___ \\ ) __/ ) / \n(____)(__)(__) (____/(__) (__/ \n\nZipspy allows you interact with ZIP archives stored in remote locations without\nrequiring a local copy. For example, you can list the filenames in an S3 ZIP archive, \ndownload a subset of files, search and retrieve files with regular expressions, and more!`,\n\t}\n\tcmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {\n\t\tif err := cfg.initProvider(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to initialize provider: %v\", err)\n\t\t}\n\t\tif err := setupLogger(verbosity); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to initialize logger: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tcmd.PersistentFlags().BoolVar(&cfg.development, \"development\", false, \"whether or not to use development settings\")\n\tcmd.PersistentFlags().StringVar(&cfg.archiveLocation, \"location\", \"\", `(required) protocol and address of your ZIP archive (\"file://archive.zip\", \"s3://<bucket_name>/archive.zip\")`)\n\tcmd.LocalFlags().StringVar(&verbosity, \"verbosity\", logrus.WarnLevel.String(), \"global log level (trace, debug, info, warn, error, fatal, panic)\")\n\tmust(cmd.MarkPersistentFlagRequired(\"location\"))\n\n\tcmd.AddCommand(List())\n\tcmd.AddCommand(Extract())\n\n\treturn cmd\n}", "func (p *Parser) Root() *FileNode {\n\treturn p.nod\n}", "func NewRootCmd() *RootCmd {\n\tconfigOptions = NewConfigOptions()\n\trootOpts := NewRootOptions()\n\trootCmd := &cobra.Command{\n\t\tUse: \"driverkit\",\n\t\tShort: \"A command line tool to build Falco kernel modules and eBPF probes.\",\n\t\tValidArgs: validProcessors,\n\t\tArgs: cobra.OnlyValidArgs,\n\t\tDisableFlagsInUseLine: true,\n\t\tDisableAutoGenTag: true,\n\t\tRun: func(c *cobra.Command, args []string) {\n\t\t\tif len(args) == 0 {\n\t\t\t\tlogger.WithField(\"processors\", validProcessors).Info(\"specify the processor\")\n\t\t\t}\n\t\t\t// Fallback to help\n\t\t\tc.Help()\n\t\t},\n\t}\n\trootCmd.PersistentPreRunE = persistentValidateFunc(rootCmd, rootOpts)\n\n\tflags := rootCmd.PersistentFlags()\n\n\tflags.StringVarP(&configOptions.ConfigFile, \"config\", \"c\", configOptions.ConfigFile, \"config file path (default $HOME/.driverkit.yaml if exists)\")\n\tflags.StringVarP(&configOptions.LogLevel, \"loglevel\", \"l\", configOptions.LogLevel, \"log level\")\n\tflags.IntVar(&configOptions.Timeout, \"timeout\", configOptions.Timeout, \"timeout in seconds\")\n\tflags.BoolVar(&configOptions.DryRun, \"dryrun\", configOptions.DryRun, \"do not actually perform the action\")\n\n\tflags.StringVar(&rootOpts.Output.Module, \"output-module\", rootOpts.Output.Module, \"filepath where to save the resulting kernel module\")\n\tflags.StringVar(&rootOpts.Output.Probe, \"output-probe\", rootOpts.Output.Probe, \"filepath where to save the resulting eBPF probe\")\n\tflags.StringVar(&rootOpts.DriverVersion, \"driverversion\", rootOpts.DriverVersion, \"driver version as a git commit hash or as a git tag\")\n\tflags.Uint16Var(&rootOpts.KernelVersion, \"kernelversion\", rootOpts.KernelVersion, \"kernel version to build the module for, it's the numeric value after the hash when you execute 'uname -v'\")\n\tflags.StringVar(&rootOpts.KernelRelease, \"kernelrelease\", rootOpts.KernelRelease, \"kernel release to build the module for, it can be found by executing 'uname -v'\")\n\tflags.StringVarP(&rootOpts.Target, \"target\", \"t\", rootOpts.Target, \"the system to target the build for\")\n\tflags.StringVar(&rootOpts.KernelConfigData, \"kernelconfigdata\", rootOpts.KernelConfigData, \"base64 encoded kernel config data: in some systems it can be found under the /boot directory, in other it is gzip compressed under /proc\")\n\n\tviper.BindPFlags(flags)\n\n\t// Subcommands\n\trootCmd.AddCommand(NewKubernetesCmd(rootOpts))\n\trootCmd.AddCommand(NewDockerCmd(rootOpts))\n\n\treturn &RootCmd{\n\t\tc: rootCmd,\n\t}\n}", "func (db *Database) Root() *doltdb.RootValue {\n\treturn db.root\n}", "func NewRootCmd() *cobra.Command {\n\tctx := context.Background()\n\n\tcmd := &cobra.Command{\n\t\tUse: \"grpc-sample\",\n\t\tShort: \"Go gRPC sample is a sample to realize micro service with grpc.\",\n\t\tLong: `\nGo gRPC sample is a sample to realize micro service with grpc.`,\n\t}\n\n\tcmd.AddCommand(NewServerStartCmd(ctx))\n\tcmd.AddCommand(NewServiceStartCmd(ctx))\n\n\treturn cmd\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}", "func Execute() error {\n\treturn rootCmd.Execute()\n}" ]
[ "0.74413633", "0.74413633", "0.72362185", "0.6880526", "0.68118465", "0.67302173", "0.6711436", "0.66773975", "0.66617537", "0.6641351", "0.6569897", "0.6499735", "0.6418801", "0.6369525", "0.63511467", "0.62980586", "0.6211899", "0.62098473", "0.6208147", "0.61993384", "0.61674803", "0.61263293", "0.6123744", "0.6116324", "0.61146235", "0.6114447", "0.6073171", "0.6062865", "0.60514575", "0.60271966", "0.6025328", "0.6011941", "0.6011099", "0.6007754", "0.6005688", "0.6001508", "0.5998109", "0.5994262", "0.5955698", "0.59426427", "0.5936548", "0.5936548", "0.5936548", "0.5928921", "0.59252363", "0.59189385", "0.5902352", "0.5894022", "0.5890908", "0.5887404", "0.5882961", "0.58817875", "0.58817875", "0.5859533", "0.5853941", "0.5846692", "0.5832115", "0.58308876", "0.58272207", "0.58209527", "0.5813765", "0.5808893", "0.5802516", "0.58013713", "0.57813674", "0.5777403", "0.57746375", "0.5725441", "0.57218224", "0.57218224", "0.571996", "0.57185334", "0.5717652", "0.569445", "0.5693565", "0.56853795", "0.5673101", "0.566766", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283", "0.5662283" ]
0.80711883
0
IntToBinary int covert to binary
func IntToBinary(number int32) string { tempn := number if number < 0 { //求补码,因为负数是以补码的方式存储的 number = ((-number) ^ Int32Max) + 1 } s := "" var i int32 i = 0 for i = 31; i >= 0; i = i - 1 { //处理最高位 if i == 31 { if tempn >= 0 { s = s + "0" } else { s = s + "1" } } else { //逻辑与位运算 bit := int32(0) if (int32(1)<<i)&number > 0 { bit = int32(1) } s = s + fmt.Sprintf("%d", bit) } } return s }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ConvertIntToBinary(inum int, n int) []byte {\n\tbinary := make([]byte, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tbinary[i] = byte(inum % 2)\n\t\tinum = inum / 2\n\t}\n\n\treturn binary\n}", "func numToBinary(v int64) string {\n\ts := strconv.FormatInt(v, 2)\n\tw := (len(s) + 7) / 8\n\tif w == 0 {\n\t\tw = 8\n\t}\n\treturn pad(s, w, '0')\n}", "func convertToBinary(num int) string {\n\treturn \"\"\n}", "func IntToBytes(i int) ([]byte, error) {\n\n\t// Tiny Int\n\tif i < 0x80 {\n\t\treturn []byte{byte(uint(i))}, nil\n\t}\n\n\t// Regular Int\n\tbuf := new(bytes.Buffer)\n\tif i < 0x100 {\n\t\tbuf.Write([]byte{0xc8, byte(uint(i))})\n\t} else if i < 0x10000 {\n\t\tbuf.WriteByte(byte(0xc9))\n\t\terr := binary.Write(buf, binary.BigEndian, uint16(i))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if i < 0x100000000 {\n\t\tbuf.WriteByte(byte(0xca))\n\t\terr := binary.Write(buf, binary.BigEndian, uint32(i))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tbuf.WriteByte(byte(0xcb))\n\t\terr := binary.Write(buf, binary.BigEndian, uint64(i))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func intToBytes(i int) []byte {\n\tvar il = strconv.IntSize / 8\n\tb := make([]byte, il)\n\tb[0] = byte(i)\n\tb[1] = byte(i >> 8)\n\tb[2] = byte(i >> 16)\n\tb[3] = byte(i >> 24)\n\tif il == 8 {\n\t\tb[4] = byte(i >> 32)\n\t\tb[5] = byte(i >> 40)\n\t\tb[6] = byte(i >> 48)\n\t\tb[7] = byte(i >> 56)\n\t}\n\treturn b\n}", "func IntBinConverter(str string, target reflect.Value) (ok bool) {\n\ti, err := strconv.ParseInt(str, 2, 64)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttarget.SetInt(i)\n\treturn true\n}", "func (n *Uint256) toBin() []byte {\n\tif n.IsZero() {\n\t\treturn []byte(\"0\")\n\t}\n\n\t// Create space for the max possible number of output digits.\n\tmaxOutDigits := n.BitLen()\n\tresult := make([]byte, maxOutDigits)\n\n\t// Convert each internal base 2^64 word to base 2 from least to most\n\t// significant. Since the value is guaranteed to be non-zero per a previous\n\t// check, there will always be a nonzero most-significant word. Also, note\n\t// that no partial digit handling is needed in this case because the shift\n\t// amount evenly divides the bits per internal word.\n\tconst shift = 1\n\tconst mask = 1<<shift - 1\n\tconst digitsPerInternalWord = bitsPerInternalWord\n\toutputIdx := maxOutDigits - 1\n\tnumInputWords := n.numDigits()\n\tinputWord := n.n[0]\n\tfor inputIdx := 1; inputIdx < numInputWords; inputIdx++ {\n\t\tfor i := 0; i < digitsPerInternalWord; i++ {\n\t\t\tresult[outputIdx] = '0' + byte(inputWord&mask)\n\t\t\tinputWord >>= shift\n\t\t\toutputIdx--\n\t\t}\n\t\tinputWord = n.n[inputIdx]\n\t}\n\tfor inputWord != 0 {\n\t\tresult[outputIdx] = '0' + byte(inputWord&mask)\n\t\tinputWord >>= shift\n\t\toutputIdx--\n\t}\n\n\treturn result[outputIdx+1:]\n}", "func int_to_bytes(s int) []byte {\n\tt := uint32(s)\n\tbt := make([]byte, 4)\n\tbt[0] = byte(t)\n\tbt[1] = byte(t >> 8)\n\tbt[2] = byte(t >> 16)\n\tbt[3] = byte(t >> 24)\n\treturn bt\n}", "func int2bytes(val uint64) []byte {\n\tdata, j := make([]byte, 8), -1\n\tfor i := 0; i < 8; i++ {\n\t\tshift := uint64((7 - i) * 8)\n\t\tdata[i] = byte((val & (0xff << shift)) >> shift)\n\n\t\tif j == -1 && data[i] != 0 {\n\t\t\tj = i\n\t\t}\n\t}\n\n\tif j != -1 {\n\t\treturn data[j:]\n\t}\n\treturn data[:1]\n}", "func intToBytes(n int) []byte {\n\tbuf := make([]byte, 3)\n\tbuf[0] = byte((n >> 16) & 0xFF)\n\tbuf[1] = byte((n >> 8) & 0xFF)\n\tbuf[2] = byte(n & 0xFF)\n\treturn buf\n}", "func bcd2bin(n int) int {\n\treturn (n & 0xF) + ((n & 0xF0) >> 4 * 10)\n}", "func (d *DataItem) ToIntBytes() ([]byte, error) {\n\tswitch v := d.raw.(type) {\n\tcase int64:\n\t\treturn prvEncodeInt(v), nil\n\t}\n\treturn nil, typeError\n}", "func Int2Byte(data uint32) (ret []byte) {\n\tvar len uint32 = 4\n\tret = make([]byte, len)\n\tvar tmp uint32 = 0xff\n\tvar index uint32 = 0\n\tfor index = 0; index < len; index++ {\n\t\tret[index] = byte((tmp << (index * 8) & data) >> (index * 8))\n\t}\n\treturn ret\n}", "func intToByte(v, l int) (out []byte) {\n\tconst (\n\t\tspace\t= string(0x20)\n\t\tnul\t= string(0x00)\n\t)\n\ttabl := map[string]int{\t\" \": 0, \"0\": 0, \"1\": 1, \"2\": 2, \"3\": 3, \"4\": 4,\n\t\t\"5\": 5, \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9, \"a\": 0x0a,\n\t\t\"b\": 0x0b, \"c\": 0x0c, \"d\": 0x0d,\"e\": 0x0e, \"f\": 0x0f}\n\tif l%2 != 0 {\n\t\tl++\n\t}\n\ts := fmt.Sprintf(\"%\"+strconv.Itoa(l)+\"x\", v)\n\ts = strings.Replace(s, space, nul, -1)\n\tout = make([]byte, l/2)\n\n\tfor k := 1; k <= l-1; k += 2 {\n\t\tout[(k-1)/2] = byte(tabl[string(s[k-1])]<<4 + tabl[string(s[k])])\n\t}\n\treturn out\n}", "func IntToBytes(dst []byte, val interface{}) ([]byte, error) {\n\tvar i int64\n\tswitch val.(type) {\n\tcase int:\n\t\ti = int64(val.(int))\n\tcase *int:\n\t\ti = int64(*val.(*int))\n\tcase int8:\n\t\ti = int64(val.(int8))\n\tcase *int8:\n\t\ti = int64(*val.(*int8))\n\tcase int16:\n\t\ti = int64(val.(int16))\n\tcase *int16:\n\t\ti = int64(*val.(*int16))\n\tcase int32:\n\t\ti = int64(val.(int32))\n\tcase *int32:\n\t\ti = int64(*val.(*int32))\n\tcase int64:\n\t\ti = val.(int64)\n\tcase *int64:\n\t\ti = *val.(*int64)\n\tdefault:\n\t\treturn dst, ErrUnknownType\n\t}\n\n\tdst = strconv.AppendInt(dst, i, 10)\n\treturn dst, nil\n}", "func itob(v int) []byte {\n b := make([]byte, 8)\n binary.BigEndian.PutUint64(b, uint64(v))\n return b\n}", "func itob(v int) []byte {\n b := make([]byte, 8)\n binary.BigEndian.PutUint64(b, uint64(v))\n return b\n}", "func toBInteger(n int) string {\n\n\treturn fmt.Sprintf(\"i%de\", n)\n}", "func ConvertUint64ToBinary(number uint64, n int) []*operation.Scalar {\n\tif number == 0 {\n\t\tres := make([]*operation.Scalar, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tres[i] = new(operation.Scalar).FromUint64(0)\n\t\t}\n\t\treturn res\n\t}\n\n\tbinary := make([]*operation.Scalar, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tbinary[i] = new(operation.Scalar).FromUint64(number % 2)\n\t\tnumber = number / 2\n\t}\n\treturn binary\n}", "func ConvertUint64ToBinary(number uint64, n int) []*operation.Scalar {\n\tif number == 0 {\n\t\tres := make([]*operation.Scalar, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tres[i] = new(operation.Scalar).FromUint64(0)\n\t\t}\n\t\treturn res\n\t}\n\n\tbinary := make([]*operation.Scalar, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tbinary[i] = new(operation.Scalar).FromUint64(number % 2)\n\t\tnumber = number / 2\n\t}\n\treturn binary\n}", "func integerToBytes(log log.T, input int32) (result []byte, err error) {\n\tbuf := new(bytes.Buffer)\n\tbinary.Write(buf, binary.BigEndian, input)\n\tif buf.Len() != 4 {\n\t\tlog.Error(\"integerToBytes failed: buffer output length is not equal to 4.\")\n\t\treturn make([]byte, 4), errors.New(\"Input array size is not equal to 4.\")\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func Bin(n int) string {\n\treturn fmt.Sprintf(\"0b%b\", n)\n}", "func intToBase64(n *big.Int) string {\n\tvar result string\n\tand := big.NewInt(0x3f)\n\tvar tmp, nn big.Int\n\tnn.Set(n)\n\n\tfor nn.Cmp(big.NewInt(0)) > 0 {\n\t\tbit := tmp.And(&nn, and).Uint64()\n\t\tresult += string(base64en[bit])\n\t\tnn.Rsh(&nn, 6)\n\t}\n\treturn result + string(base64en[0]*byte(86-len(result)))\n}", "func IntBytes(x *big.Int,) []byte", "func IntBit(x *big.Int, i int) uint", "func intBytes(i int) []byte {\n\tb := make([]byte, il)\n\tb[0] = byte(i)\n\tb[1] = byte(i >> 8)\n\tb[2] = byte(i >> 16)\n\tb[3] = byte(i >> 24)\n\tif il == 8 {\n\t\tb[4] = byte(i >> 32)\n\t\tb[5] = byte(i >> 40)\n\t\tb[6] = byte(i >> 48)\n\t\tb[7] = byte(i >> 56)\n\t}\n\treturn b\n}", "func EncodedInt(n int) []byte {\n\tvar b [5]byte\n\ti := 0\n\te := uint32(n)\n\tif e == 0 {\n\t\treturn []byte{0}\n\t}\n\n\tfor e != 0 {\n\t\tb[i] = byte(e)\n\t\te >>= 7\n\t\tif e != 0 {\n\t\t\tb[i] |= 0x80\n\t\t}\n\t\ti++\n\t}\n\n\treturn b[:i]\n}", "func n2b(n int) []byte {\n\tvar b [8]byte\n\tbinary.BigEndian.PutUint64(b[:], uint64(n))\n\treturn b[:]\n}", "func flippingBits(n int64) int64 {\n\tn_binary := strconv.FormatInt(n, 2)\n\tn_binary_len := len(n_binary)\n\tif n_binary_len < 32 {\n\t\tfor i := 0; i < 32 - n_binary_len; i++ {\n\t\t\tn_binary = \"0\" + n_binary \n\t\t}\n\t}\n\t\n\tn_binary_slice := []byte(n_binary)\n\tfor i, v := range n_binary_slice {\n\t\tif v == '1' {\n\t\t\tn_binary_slice[i] = '0'\n\t\t} else {\n\t\t\tn_binary_slice[i] = '1'\n\t\t}\n\t}\n\t\n\tres_int, _ := strconv.ParseInt(string(n_binary_slice), 2, 64)\n\treturn res_int\n}", "func IntBits(x *big.Int,) []big.Word", "func itob(v int) []byte {\n\treturn []byte(strconv.FormatInt(int64(v), 10))\n}", "func (e *Encoder) Binary(v []byte) (int, error) {\n\ts, err := e.Int32(int32(len(v)))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err := e.buf.Write(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn s + n, nil\n}", "func (buff *Bytes) ToInt() int {\r\n\treturn (int)(binary.LittleEndian.Uint32(*buff))\r\n}", "func bin2bcd(n int) int {\n\tunits := n % 10\n\ttens := (n - units) / 10\n\n\treturn (tens << 4) | units\n}", "func btoi(b []byte) int {\n\tv := int(binary.BigEndian.Uint64(b))\n return v\n}", "func IntMarshalText(x *big.Int,) ([]byte, error)", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func itob(v int) []byte {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(v))\n\treturn b\n}", "func encodeInt(b []byte, v int64) []byte {\n\tvar data [8]byte\n\tu := encodeIntToCmpUint(v)\n\tbinary.BigEndian.PutUint64(data[:], u)\n\treturn append(b, data[:]...)\n}", "func canonicalizeInt(val *big.Int) []byte {\n\tb := val.Bytes()\n\tif len(b) == 0 {\n\t\tb = []byte{0x00}\n\t}\n\tif b[0]&0x80 != 0 {\n\t\tpaddedBytes := make([]byte, len(b)+1)\n\t\tcopy(paddedBytes[1:], b)\n\t\tb = paddedBytes\n\t}\n\treturn b\n}", "func intToIP(ipInt *big.Int, bits int) net.IP {\n\tipBytes := ipInt.Bytes()\n\tval := make([]byte, bits/8)\n\n\t// big.Int.Bytes() removes front zero padding.\n\t// IP bytes packed at the end of the return array,\n\tfor i := 1; i <= len(ipBytes); i++ {\n\t\tval[len(val)-i] = ipBytes[len(ipBytes)-i]\n\t}\n\n\treturn net.IP(val)\n}", "func (sb *Builder) WriteInt(i int64, base int) {\n\t// if 0 <= i && i < nSmalls && base == 10 {\n\t// \tsb.WriteString(smallInt(int(i)))\n\t// }\n\tsb.formatBits(uint64(i), base, i < 0)\n}", "func btoi(b []byte) int {\n\treturn int(binary.BigEndian.Uint64(b))\n}", "func btoi(b []byte) int {\n\treturn int(binary.BigEndian.Uint64(b))\n}", "func btoi(b []byte) int {\n\treturn int(binary.BigEndian.Uint64(b))\n}", "func bin(x uint64) string {\n\ts := fmt.Sprintf(\"/%016x/%d\", x, x)\n\tfor i := 0; i < 64; i++ {\n\t\tthisBit := \"0\"\n\t\tif x&1 == 1 {\n\t\t\tthisBit = \"1\"\n\t\t}\n\t\ts = thisBit + s\n\t\tx >>= 1\n\t}\n\n\treturn s\n}", "func IntGobEncode(x *big.Int,) ([]byte, error)", "func IntegerToByteCode(i int64) (uint32, uint32) {\n\treturn uint32(i & 0xFFFFFFFF), uint32(i >> 32)\n}", "func RtoB(r int) uint64", "func EncodeInteger(toEncode int64) []byte {\n\t// Calculate the length we'll need for the encoded value.\n\tvar l int64 = 1\n\tif toEncode > 0 {\n\t\tfor i := toEncode; i > 255; i >>= 8 {\n\t\t\tl++\n\t\t}\n\t} else {\n\t\tfor i := -toEncode; i > 255; i >>= 8 {\n\t\t\tl++\n\t\t}\n\t\t// Ensure room for the sign if necessary.\n\t\tif toEncode < 0 {\n\t\t\tl++\n\t\t}\n\t}\n\n\t// Now create a byte array of the correct length and copy the value into it.\n\tresult := make([]byte, l)\n\tfor i := int64(0); i < l; i++ {\n\t\tresult[i] = byte(toEncode >> uint(8*(l-i-1)))\n\t}\n\tif result[0] > 127 && toEncode > 0 {\n\t\tresult = append([]byte{0}, result...)\n\t}\n\t/*\n\t\t// Chop off superfluous 0xff's.\n\t\ts := 0\n\t\tfor ; s+1 < len(result) && result[s] == 0xff && result[s+1] == 0xff; s++ {\n\t\t}\n\t\treturn result[s:]*/\n\treturn result\n}", "func (x *Int) Bytes() []byte {}", "func (n *Uint256) toOctal() []byte {\n\tif n.IsZero() {\n\t\treturn []byte(\"0\")\n\t}\n\n\t// Create space for the max possible number of output digits using the fact\n\t// that 3 bits converts directly to a single octal digit.\n\tmaxOutDigits := (n.BitLen() + 2) / 3\n\tresult := make([]byte, maxOutDigits)\n\n\t// Convert each internal base 2^64 word to base 8 from least to most\n\t// significant. Since the value is guaranteed to be non-zero per a previous\n\t// check, there will always be a nonzero most-significant word. Also, note\n\t// that partial digit handling is needed in this case because the shift\n\t// amount does not evenly divide the bits per internal word.\n\tconst shift = 3\n\tconst mask = 1<<shift - 1\n\tunconvertedBits := bitsPerInternalWord\n\toutputIdx := maxOutDigits - 1\n\tnumInputWords := n.numDigits()\n\tinputWord := n.n[0]\n\tfor inputIdx := 1; inputIdx < numInputWords; inputIdx++ {\n\t\t// Convert full digits.\n\t\tfor ; unconvertedBits >= shift; unconvertedBits -= shift {\n\t\t\tresult[outputIdx] = '0' + byte(inputWord&mask)\n\t\t\tinputWord >>= shift\n\t\t\toutputIdx--\n\t\t}\n\n\t\t// Move to the next input word when there are not any remaining\n\t\t// unconverted bits that need to be handled.\n\t\tif unconvertedBits == 0 {\n\t\t\tinputWord = n.n[inputIdx]\n\t\t\tunconvertedBits = bitsPerInternalWord\n\t\t\tcontinue\n\t\t}\n\n\t\t// Account for the remaining unconverted bits from the current word and\n\t\t// the bits needed from the next word to form a full digit for the next\n\t\t// digit.\n\t\tinputWord |= n.n[inputIdx] << unconvertedBits\n\t\tresult[outputIdx] = '0' + byte(inputWord&mask)\n\t\toutputIdx--\n\n\t\t// Move to the next input word while accounting for the bits already\n\t\t// consumed above by shifting it and updating the unconverted bits\n\t\t// accordingly.\n\t\tinputWord = n.n[inputIdx] >> (shift - unconvertedBits)\n\t\tunconvertedBits = bitsPerInternalWord - (shift - unconvertedBits)\n\t}\n\tfor inputWord != 0 {\n\t\tresult[outputIdx] = '0' + byte(inputWord&mask)\n\t\tinputWord >>= shift\n\t\toutputIdx--\n\t}\n\n\treturn result[outputIdx+1:]\n}", "func Int64ToBytes(i int64) []byte {\n\treturn Uint64ToBytes(uint64(i))\n}", "func btoi(b []byte) int {\n\ti, err := strconv.Atoi(string(b))\n\tassert(err == nil, \"btoi error: %s\", err)\n\treturn i\n}", "func appendInt(dst []byte, n uint8, nn uint64) []byte {\n\tnu := uint64(1<<n - 1)\n\tm := len(dst) - 1\n\tif m == -1 {\n\t\tdst = append(dst, 0)\n\t\tm++\n\t}\n\n\tif nn < nu {\n\t\tdst[m] |= byte(nn)\n\t} else {\n\t\tnn -= nu\n\t\tdst[m] |= byte(nu)\n\t\tm = len(dst)\n\t\tnu = 1 << (n + 1)\n\t\ti := 0\n\t\tfor nn > 0 {\n\t\t\ti++\n\t\t\tif i == m {\n\t\t\t\tdst = append(dst, 0)\n\t\t\t\tm++\n\t\t\t}\n\t\t\tdst[i] = byte(nn | 128)\n\t\t\tnn >>= 7\n\t\t}\n\t\tdst[i] &= 127\n\t}\n\treturn dst\n}", "func btoi(b []byte) uint64 {\n\treturn binary.BigEndian.Uint64(b)\n}", "func (x *Int) Bits() []Word {}", "func (i Int) Serialize() ([]byte, error) {\n\treturn []byte(strconv.Itoa(int(i))), nil\n}", "func bitsToBytes(bits uint) uint {\n\treturn (bits + 7) / 8\n}", "func relay2ic(in int) byte {\n\tvar v byte\n\tfor i := 0; i < 8; i++ {\n\t\tif (in & (1 << i)) != 0 {\n\t\t\tv |= 1 << relay2Addr[i]\n\t\t}\n\t}\n\treturn v\n}", "func VarInt(i uint64) []byte {\n\tb := make([]byte, 9)\n\tif i < 0xfd {\n\t\tb[0] = byte(i)\n\t\treturn b[:1]\n\t}\n\tif i < 0x10000 {\n\t\tb[0] = 0xfd\n\t\tbinary.LittleEndian.PutUint16(b[1:3], uint16(i))\n\t\treturn b[:3]\n\t}\n\tif i < 0x100000000 {\n\t\tb[0] = 0xfe\n\t\tbinary.LittleEndian.PutUint32(b[1:5], uint32(i))\n\t\treturn b[:5]\n\t}\n\tb[0] = 0xff\n\tbinary.LittleEndian.PutUint64(b[1:9], i)\n\treturn b\n}", "func btoi(b bool) int {\n\tif b {\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}", "func ExampleInt() {\n\n\tfmt.Println(conv.Uint(\"123.456\")) // 123\n\tfmt.Println(conv.Uint(\"-123.456\")) // 0\n\tfmt.Println(conv.Uint8(uint64(math.MaxUint64))) // 255\n\t// Output:\n\t// 123 <nil>\n\t// 0 <nil>\n\t// 255 <nil>\n}", "func btoi(b bool) int {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func Int64ToBytes(n uint64) []byte {\n\tvar res [8]byte\n\tbinary.LittleEndian.PutUint64(res[:], n)\n\treturn res[:]\n}", "func Int32ToBytes(n uint32) []byte {\n\tvar res [4]byte\n\tbinary.LittleEndian.PutUint32(res[:], n)\n\treturn res[:]\n}", "func concatenatedBinary(n int) int {\n\tresult := 0\n\tfor i := 1; i <= n; i++ {\n\t\tshift := numToShift(i)\n\t\tresult = result << shift\n\t\tresult = result | i // we know the bits are cleared, so logical-or them into the result\n\t\t//for result > mod { // reduce if bigger. this is okay, becuase the biggest shift possible is for 10^5, which is less than 14 bits, with 10^9 being less than 20, 20+14, reduced by 20 bits, brings it back under. (19+ 14) = 33, -20 => 13.\n\t\t//\tresult = result - mod\n\t\t//}\n\t\tresult = result % mod\n\t}\n\treturn result\n}", "func Byte2Int(data []byte) uint32 {\n\tvar ret uint32 = 0\n\tvar len uint32 = 4\n\tvar i uint32 = 0\n\tfor i = 0; i < len; i++ {\n\t\tret = ret | (uint32(data[i]) << (i * 8))\n\t}\n\treturn ret\n}", "func bytes2int(data []byte) uint64 {\n\tn, val := len(data), uint64(0)\n\n\tfor i, b := range data {\n\t\tval += uint64(b) << uint64((n-i-1)*8)\n\t}\n\treturn val\n}", "func Int32ToBytes(i int32) []byte {\n\treturn Uint32ToBytes(uint32(i))\n}", "func Btoi(b bool) int {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func binaryEncode(dst, src []byte) {\n\td := uint(0)\n\t_, _ = src[0], dst[7]\n\tfor i := 7; i >= 0; i-- {\n\t\tif src[0]&(1<<d) == 0 {\n\t\t\tdst[i] = '0'\n\t\t} else {\n\t\t\tdst[i] = '1'\n\t\t}\n\t\td++\n\t}\n}", "func Encode16BitsInt(n int) []byte {\n\tvar buf bytes.Buffer\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, uint64(n))\n\tbuf.WriteByte(b[6])\n\tbuf.WriteByte(b[7])\n\treturn buf.Bytes()\n}", "func encodeInt64(i int64) []byte {\n\tb := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(b, uint64(i))\n\treturn b\n}", "func toBase(bi *big.Int, destBase []string) string {\n\t// Hack in order to \"clone\" the big.Int and avoid changing it.\n\tsrc := big.NewInt(0)\n\tsrc.Add(bi, big.NewInt(0))\n\n\tif big.NewInt(0).Cmp(src) == 0 {\n\t\treturn destBase[0]\n\t}\n\n\tvar digits []string\n\tnumericBase := big.NewInt(int64(len(destBase)))\n\n\t// Keep going while bi is greater than 0.\n\tfor src.Cmp(big.NewInt(0)) > 0 {\n\t\tremainder := big.NewInt(0).Rem(src, numericBase)\n\t\tsrc.Div(src, numericBase)\n\t\tdigits = append(digits, destBase[remainder.Int64()])\n\t}\n\n\treturn strings.Join(digits, \" \")\n}", "func IntBitLen(x *big.Int,) int", "func packInt(Data int, pad *scratchpad) {\n\tif Data < 256 && Data > 0 {\n\t\t// We can pack as a small int.\n\t\tpad.endAppend('a', byte(Data))\n\t} else if 2147483647 > Data && Data > -2147483647 {\n\t\t// We should pack as a standard int.\n\t\ta := make([]byte, 5)\n\t\ta[0] = 'b'\n\t\ti32 := int32(Data)\n\t\tntohl32(*(*uint32)(unsafe.Pointer(&i32)), a, 1)\n\t\tpad.endAppend(a...)\n\t} else {\n\t\t// Call packInt64 (this will only ever get here on a 64-bit system).\n\t\tpackInt64(int64(Data), pad)\n\t}\n}", "func IntAppend(x *big.Int, buf []byte, base int) []byte", "func hexStringToBinary(s string) string {\n\tres := \"\"\n\tb, _ := hex.DecodeString(s)\n\tfor _, c := range b {\n\t\tbinary, _ := strconv.Atoi(fmt.Sprintf(\"%.b\", c))\n\t\tres = fmt.Sprintf(\"%s%s\", res, fmt.Sprintf(\"%08d\", binary))\n\t}\n\treturn res\n}", "func AppendInt(dst []byte, val int) []byte {\n\tmajor := majorTypeUnsignedInt\n\tcontentVal := val\n\tif val < 0 {\n\t\tmajor = majorTypeNegativeInt\n\t\tcontentVal = -val - 1\n\t}\n\tif contentVal <= additionalMax {\n\t\tlb := byte(contentVal)\n\t\tdst = append(dst, byte(major|lb))\n\t} else {\n\t\tdst = appendCborTypePrefix(dst, major, uint64(contentVal))\n\t}\n\treturn dst\n}", "func Int2String(v int) string {\n\treturn strconv.Itoa(v)\n}", "func (x *Int) Bit(i int) uint {}", "func ToInt8(i interface{}) int8 {\n\treturn cast.ToInt8(i)\n}", "func int2ip(binip uint32) net.IP {\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, binip)\n\treturn ip\n}", "func GibToByte(nGib int) int64 {\n\treturn int64(nGib) * Gib\n}", "func Int2Str(i int) string {\n\treturn strconv.Itoa(i)\n}", "func binarySolution() {\n seats := parseInput()\n ids := []int{}\n for _, seat := range seats {\n\tbinString := strings.\n\t NewReplacer(\"F\", \"0\", \"B\", \"1\", \"L\", \"0\", \"R\", \"1\").\n\t Replace(seat)\n\tid, _ := strconv.ParseUint(binString, 2, 10)\n\tids = append(ids, int(id))\n\n }\n sort.Ints(ids)\n}", "func binU(x uint) string {\n\treturn bin(uint64(x))\n}", "func bytesToInt(b []byte) int {\n\tresult := 0\n\tfor _, v := range b {\n\t\tresult = result<<8 + int(v)\n\t}\n\treturn result\n}", "func EncodeInt(b []byte, v int64) []byte {\n\tvar data [8]byte\n\tu := EncodeIntToCmpUint(v)\n\tendian.PutUint64(data[:], u)\n\treturn append(b, data[:]...)\n}", "func (r *Encoder) VarInt(u uint64) {\n\tbytes := make([]byte, 0, 1)\n\tfor {\n\t\tb := uint8(u & 0x7f)\n\t\tu = u >> 7\n\t\tif u > 0 {\n\t\t\tbytes = append(bytes, b|0x80)\n\t\t} else {\n\t\t\t// this is the most significant byte\n\t\t\tbytes = append(bytes, b)\n\t\t\tr.Bytes(bytes)\n\t\t\treturn\n\t\t}\n\t}\n}", "func EncodeInt(buf []byte, i int) []byte {\n\tvar u uint\n\tif i < 0 {\n\t\tu = uint(^(i << 1))\n\t} else {\n\t\tu = uint(i << 1)\n\t}\n\treturn EncodeUint(buf, u)\n}", "func IntToUint8(int_ int) (uint8_ uint8) {\n\tuint8_ = uint8(int_)\n\n\treturn\n}" ]
[ "0.85502505", "0.7415773", "0.69417083", "0.68668336", "0.6751084", "0.67082614", "0.66370434", "0.6554462", "0.65086925", "0.6468067", "0.6435964", "0.6381753", "0.6350033", "0.63494414", "0.63122267", "0.6281484", "0.6281484", "0.6257358", "0.62489444", "0.62489444", "0.6219783", "0.6146875", "0.6102298", "0.60918456", "0.60804045", "0.6050679", "0.6030509", "0.5989353", "0.5971366", "0.5956681", "0.5954705", "0.5941629", "0.59303576", "0.59251946", "0.5899197", "0.5881071", "0.5870467", "0.5870467", "0.5870467", "0.5870467", "0.5870467", "0.5870467", "0.5870467", "0.5870467", "0.5870467", "0.5818342", "0.5797025", "0.5793464", "0.57875085", "0.5786231", "0.5786231", "0.5786231", "0.57643783", "0.57484007", "0.573045", "0.5715209", "0.56956446", "0.5695091", "0.55908597", "0.55672896", "0.55619127", "0.5561746", "0.5561572", "0.5547724", "0.5545868", "0.5545047", "0.55268323", "0.55224895", "0.55128545", "0.54985976", "0.5479292", "0.5463894", "0.5456925", "0.54558843", "0.5448235", "0.5442503", "0.5441174", "0.5433084", "0.5416876", "0.5410173", "0.5409105", "0.54078525", "0.54011315", "0.53876704", "0.5386496", "0.53754216", "0.5367804", "0.53656447", "0.53512853", "0.53465146", "0.53313196", "0.5328449", "0.5327338", "0.53268903", "0.53212476", "0.52798", "0.5275585", "0.52722585", "0.52704763", "0.5261595" ]
0.789347
1
HTTPHandler implements the handler required for executing the graphql queries and mutations
func HTTPHandler(schema *graphql.Schema) http.Handler { return &httpHandler{ handler{ schema: schema, executor: &graphql.Executor{}, }, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func graphQLHandler(w http.ResponseWriter, r *http.Request) {\n\tctx := appengine.NewContext(r)\n\tvar stringOutput string\n\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tbody, err := ioutil.ReadAll(r.Body) // Read the query via the request body, assuming a POST request\n\t\tif err != nil {\n\t\t\tmiddleware.ResponseError(w, \"Invalid request body\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tstringOutput = string(body)\n\tcase \"GET\":\n\t\tstringOutput = r.URL.Query().Get(\"query\")\n\tdefault:\n\t\tstringOutput = \"\"\n\t}\n\n\tqueryParams := graphql.Params{ // compose the GraphQL query parameters\n\t\tSchema: schema,\n\t\tRequestString: stringOutput,\n\t\tContext: ctx,\n\t}\n\n\tresp := graphql.Do(queryParams) // execute the GraphQL request\n\n\tif len(resp.Errors) > 0 { // check for response errors\n\t\tmiddleware.ResponseError(w, fmt.Sprintf(\"%+v\", resp.Errors), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tmiddleware.ResponseJSON(w, resp) // return the query result\n}", "func GetHTTPHandler(r ResolverRoot, db *DB, migrations []*gormigrate.Migration, res http.ResponseWriter, req *http.Request) {\n\tif os.Getenv(\"DEBUG\") == \"true\" {\n\t\tlog.Debug().Msgf(\"Path base: %s\", path.Base(req.URL.Path))\n\t}\n\texecutableSchema := NewExecutableSchema(Config{Resolvers: r})\n\tgqlHandler := handler.New(executableSchema)\n\tgqlHandler.AddTransport(transport.Websocket{\n\t\tKeepAlivePingInterval: 10 * time.Second,\n\t})\n\tgqlHandler.AddTransport(transport.Options{})\n\tgqlHandler.AddTransport(transport.GET{})\n\tgqlHandler.AddTransport(transport.POST{})\n\tgqlHandler.AddTransport(transport.MultipartForm{})\n\tgqlHandler.Use(extension.FixedComplexityLimit(300))\n\tif os.Getenv(\"DEBUG\") == \"true\" {\n\t\tgqlHandler.Use(extension.Introspection{})\n\t}\n\tgqlHandler.Use(apollotracing.Tracer{})\n\tgqlHandler.Use(extension.AutomaticPersistedQuery{\n\t\tCache: lru.New(100),\n\t})\n\tloaders := GetLoaders(db)\n\tif os.Getenv(\"EXPOSE_MIGRATION_ENDPOINT\") == \"true\" {\n\t\tif path.Base(req.URL.Path) == \"migrate\" {\n\t\t\terr := db.Migrate(migrations)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(res, err.Error(), 400)\n\t\t\t}\n\t\t\tfmt.Fprintf(res, \"OK\")\n\t\t}\n\t\tif path.Base(req.URL.Path) == \"automigrate\" {\n\t\t\terr := db.AutoMigrate()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(res, err.Error(), 400)\n\t\t\t}\n\t\t\tfmt.Fprintf(res, \"OK\")\n\t\t}\n\t}\n\tgqlBasePath := os.Getenv(\"API_GRAPHQL_BASE_RESOURCE\")\n\tif gqlBasePath == \"\" {\n\t\tgqlBasePath = \"graphql\"\n\t}\n\tif path.Base(req.URL.Path) == gqlBasePath {\n\t\tctx := initContextWithJWTClaims(req)\n\t\tctx = context.WithValue(ctx, KeyLoaders, loaders)\n\t\tctx = context.WithValue(ctx, KeyExecutableSchema, executableSchema)\n\t\treq = req.WithContext(ctx)\n\t\tgqlHandler.ServeHTTP(res, req)\n\t}\n\n\tif os.Getenv(\"EXPOSE_PLAYGROUND_ENDPOINT\") == \"true\" && path.Base(req.URL.Path) == \"playground\" {\n\t\tplaygroundHandler := playground.Handler(\"GraphQL playground\", gqlBasePath)\n\t\tctx := initContextWithJWTClaims(req)\n\t\tctx = context.WithValue(ctx, KeyLoaders, loaders)\n\t\tctx = context.WithValue(ctx, KeyExecutableSchema, executableSchema)\n\t\treq = req.WithContext(ctx)\n\t\tif req.Method == \"GET\" {\n\t\t\tplaygroundHandler(res, req)\n\t\t}\n\t}\n}", "func GraphqlHandler(orm *orm.ORM, gqlConfig *utils.GQLConfig) gin.HandlerFunc {\n\t// NewExecutableSchema and Config are in the generated.go file\n\tc := gql.Config{\n\t\tResolvers: &resolvers.Resolver{\n\t\t\tORM: orm, // pass in the ORM instance in the resolvers to be used\n\t\t},\n\t\tDirectives: gql.DirectiveRoot{},\n\t\tComplexity: gql.ComplexityRoot{},\n\t}\n\n\t// setProjectComplexity(&c)\n\t// h := handler.GraphQL(gql.NewExecutableSchema(c), handler.ComplexityLimit(gqlConfig.ComplexityLimit))\n\n\tsrv := handler.New(gql.NewExecutableSchema(c))\n\tsrv.AddTransport(transport.Websocket{\n\t\tKeepAlivePingInterval: 10 * time.Second,\n\t})\n\tsrv.AddTransport(transport.Options{})\n\tsrv.AddTransport(transport.GET{})\n\tsrv.AddTransport(transport.POST{})\n\tsrv.AddTransport(transport.MultipartForm{})\n\tsrv.Use(extension.FixedComplexityLimit(gqlConfig.ComplexityLimit))\n\tif gqlConfig.IsIntrospectionEnabled {\n\t\tsrv.Use(extension.Introspection{})\n\t}\n\tsrv.Use(apollotracing.Tracer{})\n\tsrv.Use(extension.AutomaticPersistedQuery{\n\t\tCache: lru.New(100),\n\t})\n\treturn func(c *gin.Context) {\n\t\t// h.ServeHTTP(c.Writer, c.Request)\n\t\tsrv.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func graphQLHandler(c echo.Context) error {\n\n\tgrq := new(GQLRequest)\n\tif err := c.Bind(grq); err != nil {\n\t\treturn err\n\t}\n\n\tquery := grq.Query\n\tvariables := grq.Variables\n\tgqlContext := map[string]interface{}{}\n\n\tresult, err := nap_executor.Execute(gqlContext, query, variables, \"\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn c.JSON(http.StatusOK, result)\n\n}", "func GraphqlHandler() gin.HandlerFunc {\n // NewExecutableSchema and Config are in the generated.go file\n // Resolver is in the resolver.go file\n h := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &resolvers.Resolver{}}))\n\n return func(c *gin.Context) {\n h.ServeHTTP(c.Writer, c.Request)\n }\n}", "func graphqlHandler() gin.HandlerFunc {\n\th := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}}))\n\treturn func(c *gin.Context) {\n\t\th.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func graphqlHandler() gin.HandlerFunc {\n\t// NewExecutableSchema and Config are in the generated.go file\n\t// Resolver is in the resolver.go file\n\tclient := database.NewDefaultDB()\n\th := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &resolver.Resolver{Client: client}}))\n\n\treturn func(c *gin.Context) {\n\t\th.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func graphqlHandler() gin.HandlerFunc {\n\t// NewExecutableSchema and Config are in the generated.go file\n\t// Resolver is in the resolver.go file\n\th := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}}))\n\n\treturn func(c *gin.Context) {\n\t\th.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func Handler() func(ctx *routing.Context) error {\n\treturn func(ctx *routing.Context) error {\n\t\t// r.URL.Query().Get(\"query\")\n\t\tprint(ctx.Request.URI().QueryString())\n\t\tresult := executeQuery(string(ctx.Request.URI().QueryString()), schema)\n\t\tjson.NewEncoder(ctx.Response.BodyWriter()).Encode(result)\n\t\treturn nil\n\t}\n}", "func handleQuery(schema *graphql.Schema, w http.ResponseWriter, r *http.Request, db database.DB) {\n\tif r.Body == nil {\n\t\thttp.Error(w, \"Must provide graphql query in request body\", 400)\n\t\treturn\n\t}\n\n\t// Read and close JSON request body\n\tbody, err := ioutil.ReadAll(r.Body)\n\n\tdefer func() {\n\t\t_ = r.Body.Close()\n\t}()\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"%d error request: %v\", http.StatusBadRequest, err)\n\t\tlog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar req data\n\tif err := json.Unmarshal(body, &req); err != nil {\n\t\tmsg := fmt.Sprintf(\"Unmarshal request: %v\", err)\n\t\tlog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Execute graphql query\n\tresult := graphql.Do(graphql.Params{\n\t\tSchema: *schema,\n\t\tRequestString: req.Query,\n\t\tVariableValues: req.Variables,\n\t\tOperationName: req.Operation,\n\t\tContext: context.WithValue(context.Background(), \"database\", db), //nolint\n\t})\n\n\t//// Error check\n\t//if len(result.Errors) > 0 {\n\t//\tlog.\n\t//\t\tWithField(\"query\", req.Query).\n\t//\t\tWithField(\"variables\", req.Variables).\n\t//\t\tWithField(\"operation\", req.Operation).\n\t//\t\tWithField(\"errors\", result.Errors).Error(\"Execute query error(s)\")\n\t//}\n\n\trender.JSON(w, r, result)\n}", "func Handler() gin.HandlerFunc {\n\t// Creates a GraphQL-go HTTP handler with the defined schema\n\th := handler.GraphQL(gqlgen.NewExecutableSchema(gqlgen.Config{Resolvers: &gqlgen.Resolver{}}))\n\n\treturn func(c *gin.Context) {\n\t\th.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func PlaygroundHandler(path string) gin.HandlerFunc {\n \n h := playground.Handler(\"GraphQL\", path)\n\n return func(c *gin.Context) {\n h.ServeHTTP(c.Writer, c.Request)\n }\n}", "func (s *Server) GraphQL() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// Check to ensure query was provided in the request body\n\t\tif r.Body == nil {\n\t\t\thttp.Error(w, \"Must provide graphql query in request body\", 400)\n\t\t\treturn\n\t\t}\n\n\t\tvar rBody reqBody\n\t\t// Decode the request body into rBody\n\t\terr := json.NewDecoder(r.Body).Decode(&rBody)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error parsing JSON request body\", 400)\n\t\t}\n\n\t\t// Execute graphql query\n\t\tresult := gql.ExecuteQuery(rBody.Query, *s.GqlSchema)\n\n\t\t// render.JSON comes from the chi/render package and handles\n\t\t// marshalling to json, automatically escaping HTML and setting\n\t\t// the Content-Type as application/json.\n\t\trender.JSON(w, r, result)\n\t}\n}", "func handleQuery() func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tresult := graphql.Do(graphql.Params{\n\t\t\tSchema: createSchema(),\n\t\t\tRequestString: r.URL.Query().Get(\"query\"),\n\t\t})\n\t\terr := json.NewEncoder(w).Encode(result)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error Serializing result\")\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func GraphqlHandlerFunc(w http.ResponseWriter, r *http.Request) {\n\n\t// get query\n\topts := handler.NewRequestOptions(r)\n\tbasicToken := r.Header.Get(\"Authorization\")\n\n\tctx := context.WithValue(r.Context(), authKey, basicToken)\n\n\t// execute graphql query\n\tparams := graphql.Params{\n\t\tSchema: Schema,\n\t\tRequestString: opts.Query,\n\t\tVariableValues: opts.Variables,\n\t\tOperationName: opts.OperationName,\n\t\tContext: ctx,\n\t}\n\n\tresult := graphql.Do(params)\n\n\tvar buff []byte\n\n\tw.WriteHeader(http.StatusOK)\n\n\tbuff, _ = json.MarshalIndent(result, \"\", \"\\t\")\n\n\tw.Write(buff)\n}", "func mutationsHandler(c *web.C, h http.Handler) http.Handler {\n\tmutConfig := MutationLogSpec()\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif mutConfig.Httpstore != \"\" {\n\t\t\tbuf, err := ioutil.ReadAll(r.Body)\n\t\t\tif err != nil {\n\t\t\t\tBadRequest(w, r, \"unable to read POST for mirroring: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdup := make([]byte, len(buf))\n\t\t\tcopy(dup, buf)\n\t\t\tr.Body = ioutil.NopCloser(bytes.NewBuffer(dup))\n\n\t\t\tuuid, ok := c.Env[\"uuid\"].(dvid.UUID)\n\t\t\tif !ok {\n\t\t\t\tmsg := fmt.Sprintf(\"Bad format for UUID %q\\n\", c.Env[\"uuid\"])\n\t\t\t\tBadRequest(w, r, msg)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar dataID dvid.UUID\n\t\t\tdataname := c.Env[\"dataname\"]\n\t\t\tif dataname != nil {\n\t\t\t\tinstancename, ok := dataname.(dvid.InstanceName)\n\t\t\t\tif !ok {\n\t\t\t\t\tBadRequest(w, r, \"bad data instance name\")\n\t\t\t\t}\n\t\t\t\tdata, err := datastore.GetDataByUUIDName(uuid, instancename)\n\t\t\t\tif err != nil {\n\t\t\t\t\tBadRequest(w, r, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdataID = data.DataUUID()\n\t\t\t}\n\t\t\tif err := LogHTTPMutation(uuid, dataID, r, buf); err != nil {\n\t\t\t\tBadRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\th.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func playgroundHandler() gin.HandlerFunc {\n\th := playground.Handler(\"GraphQL\", \"/query\")\n\n\treturn func(c *gin.Context) {\n\t\th.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func (g *GraphQL) Do(ctx context.Context, command string, r io.Reader, response interface{}) error {\n\n\t// Want to capture the query being executed for logging.\n\t// The TeeReader will write the query to this buffer when\n\t// the request reads the query for the http call.\n\tvar query bytes.Buffer\n\tr = io.TeeReader(r, &query)\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, g.url+command, r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"graphql create request error: %w\", err)\n\t}\n\n\treq.Header.Set(\"Cache-Control\", \"no-cache\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Accept\", \"application/json\")\n\tif g.authToken != \"\" {\n\t\treq.Header.Set(g.authHeaderName, g.authToken)\n\t}\n\n\tresp, err := g.client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"graphql request error: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"graphql copy error: %w\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"graphql op error: status code: %s\", resp.Status)\n\t}\n\n\t// fmt.Println(\"*****graphql*******>\\n\", query.String(), \"\\n\", string(data))\n\n\tresult := struct {\n\t\tData interface{}\n\t\tErrors []struct {\n\t\t\tMessage string\n\t\t}\n\t}{\n\t\tData: response,\n\t}\n\tif err := json.Unmarshal(data, &result); err != nil {\n\t\treturn fmt.Errorf(\"graphql decoding error: %w response: %s\", err, string(data))\n\t}\n\n\tif len(result.Errors) > 0 {\n\t\treturn fmt.Errorf(\"graphql op error:\\nquery:\\n%sgraphql error:\\n%s\", query.String(), result.Errors[0].Message)\n\t}\n\n\treturn nil\n}", "func playgroundHandler() gin.HandlerFunc {\n\th := playground.Handler(\"AID-GraphQL\", \"/query\")\n\n\treturn func(c *gin.Context) {\n\t\th.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func Handler(service e2e.Service, hooks *twirp.ServerHooks) *handler.Server {\n\tes := NewExecutableSchema(Config{Resolvers: &Resolver{service}})\n\tsrv := handler.New(es)\n\tsrv.AddTransport(transport.POST{})\n\tsrv.Use(extension.Introspection{})\n\tif hooks == nil {\n\t\treturn srv\n\t}\n\tsrv.AroundFields(func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) {\n\t\tf := graphql.GetFieldContext(ctx)\n\t\tparent := f.Parent.Path().String()\n\t\tif parent != \"\" {\n\t\t\treturn next(ctx)\n\t\t}\n\t\tctx = ctxsetters.WithMethodName(ctx, f.Field.Name)\n\t\tif hooks.RequestRouted != nil {\n\t\t\tctx, err = hooks.RequestRouted(ctx)\n\t\t\tif err != nil {\n\t\t\t\tif terr, ok := err.(twirp.Error); ok && hooks.Error != nil {\n\t\t\t\t\tctx = hooks.Error(ctx, terr)\n\t\t\t\t}\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tres, err = next(ctx)\n\t\tif terr, ok := err.(twirp.Error); ok && hooks.Error != nil {\n\t\t\tctx = hooks.Error(ctx, terr)\n\t\t}\n\t\treturn res, err\n\t})\n\treturn srv\n}", "func MakeGraphQLHandler(\n\tendpoints Endpoints,\n\tproviderEndpoints ProviderEndpoints,\n\tserviceEndpoints ServiceEndpoints,\n\tregionEndpoints RegionEndpoints,\n\terrorHandler cloudinfo.ErrorHandler,\n) http.Handler {\n\t// nolint: staticcheck\n\treturn handler.GraphQL(graphql.NewExecutableSchema(graphql.Config{\n\t\tResolvers: &resolver{\n\t\t\tendpoints: endpoints,\n\t\t\tproviderEndpoints: providerEndpoints,\n\t\t\tserviceEndpoints: serviceEndpoints,\n\t\t\tregionEndpoints: regionEndpoints,\n\t\t\terrorHandler: errorHandler,\n\t\t},\n\t}))\n}", "func playgroundHandler() gin.HandlerFunc {\n\th := playground.Handler(\"GraphQL playground\", \"/query\")\n\treturn func(c *gin.Context) {\n\t\th.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func QueryHandler(w http.ResponseWriter, r *http.Request) {\n\tdb := Connect()\n\tdefer db.Close()\n\n\tcanAccess, account := ValidateAuth(db, r, w)\n\tif !canAccess {\n\t\treturn\n\t}\n\n\tconnection, err := GetConnection(db, account.Id)\n\tif err != nil {\n\t\tif isBadConn(err, false) {\n\t\t\tpanic(err);\n\t\t\treturn;\n\t\t}\n\t\tstateResponse := &StateResponse{\n\t\t\tPeerId: 0,\n\t\t\tStatus: \"\",\n\t\t\tShouldFetch: false,\n\t\t\tShouldPeerFetch: false,\n\t\t}\n\t\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn;\n\t}\n\n\tpeerId := connection.GetPeerId(account.Id)\n\tstatus := \"\"\n\tif connection.Status == PENDING {\n\t\tif connection.InviteeId == account.Id {\n\t\t\tstatus = \"pendingWithMe\"\n\t\t} else {\n\t\t\tstatus = \"pendingWithPeer\"\n\t\t}\n\t} else {\n\t\tstatus = \"connected\"\n\t}\n\n\tstateResponse := &StateResponse{\n\t\tPeerId: peerId,\n\t\tStatus: status,\n\t}\n\terr = CompleteFetchResponse(stateResponse, db, connection, account)\n\tif err != nil {\n\t\tlog.Printf(\"QueryPayload failed: %s\", err)\n\t\thttp.Error(w, \"could not query payload\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\tpanic(err)\n\t}\n}", "func QueryHandler(w http.ResponseWriter, req *http.Request) {\n\tswitch req.Method {\n\tcase \"GET\":\n\n\t\tdata, err := v1.GetFileInfo(req.FormValue(\"fileName\"))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to fetch the files , err:\", err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tbyteData, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to marshal the data , err\", err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.Write(byteData)\n\tdefault:\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func visitHandler(p *pool.Pool) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tconn, err := p.Get()\n\t\tdefer p.Put(conn)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif req.Method == http.MethodGet {\n\t\t\tu := req.URL.Query()\n\t\t\tlog.Println(\"uri is\", u[\"q\"])\n\t\t\tres, err := conn.Cmd(\"HGET\", u[\"q\"], \"Survey\").Str()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tj, _ := json.Marshal(res)\n\t\t\tw.Write(j)\n\t\t}\n\t})\n}", "func (b *backend) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n}", "func (Executor) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tlogger.Logging(logger.DEBUG, \"receive msg\", req.Method, req.URL.Path)\n\tdefer logger.Logging(logger.DEBUG, \"OUT\")\n\n\tswitch reqUrl := req.URL.Path; {\n\tdefault:\n\t\tlogger.Logging(logger.DEBUG, \"Unknown URL\")\n\t\tcommon.MakeErrorResponse(w, errors.NotFoundURL{reqUrl})\n\n\tcase !(strings.Contains(reqUrl, (url.Base()+url.Management())) ||\n\t\tstrings.Contains(reqUrl, (url.Base()+url.Monitoring())) ||\n\t\tstrings.Contains(reqUrl, (url.Base()+url.Notification()))):\n\t\tlogger.Logging(logger.DEBUG, \"Unknown URL\")\n\t\tcommon.MakeErrorResponse(w, errors.NotFoundURL{reqUrl})\n\n\tcase strings.Contains(reqUrl, url.Unregister()):\n\t\thealthAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Management()) &&\n\t\tstrings.Contains(reqUrl, url.Apps()):\n\t\tdeploymentAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Resource()):\n\t\tresourceAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Configuration()):\n\t\tconfigurationAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Device()):\n\t\tdeviceAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Notification()):\n\t\tnotificationAPIExecutor.Handle(w, req)\n\t}\n}", "func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\t/* Only serve the one path */\n\tif h.endpoint != r.URL.Path {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\t/* Parse the query */\n\tif err := r.ParseForm(); nil != err {\n\t\th.Error(\n\t\t\tw,\n\t\t\tr,\n\t\t\t\"parse error: \"+err.Error(),\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t\treturn\n\t}\n\n\t/* Make sure we have a name and type */\n\tqname := r.Form.Get(QNAME) /* Query name */\n\tif \"\" == qname {\n\t\th.Error(w, r, \"no name provided\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tqtype := r.Form.Get(QTYPE) /* Query type */\n\tif \"\" == qtype {\n\t\th.Error(w, r, \"no query type provided\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t/* Set the content-type in the response */\n\tctype := r.Form.Get(CTYPE) /* Content-type */\n\tif \"\" == ctype {\n\t\tctype = h.ctype\n\t}\n\tw.Header().Set(\"Content-Type\", ctype)\n\n\t/* Do the lookup */\n\tans, code, err := h.Lookup(qname, qtype)\n\tif nil != err {\n\t\th.Error(w, r, err.Error(), code)\n\t\treturn\n\t}\n\n\t/* If we're meant to make it pretty, do so */\n\tvar res []byte\n\tif _, ok := r.Form[PRETTY]; ok {\n\t\tres, err = json.MarshalIndent(ans, \"\", \"\\t\")\n\t} else {\n\t\tres, err = json.Marshal(ans)\n\t}\n\tif nil != err {\n\t\th.Error(w, r, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t/* Send it back */\n\tif h.verbose {\n\t\tlog.Printf(\n\t\t\t\"[%v] %v %v %v %v %q\",\n\t\t\tr.RemoteAddr,\n\t\t\thttp.StatusOK,\n\t\t\tr.Method,\n\t\t\tr.Host,\n\t\t\tr.URL,\n\t\t\tans.answerData(),\n\t\t)\n\t}\n\tw.Write(res)\n\tw.Write([]byte(\"\\n\"))\n}", "func (i instanceHandler) queryHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"instID\"]\n\tapiVersion := r.URL.Query().Get(\"ApiVersion\")\n\tkind := r.URL.Query().Get(\"Kind\")\n\tname := r.URL.Query().Get(\"Name\")\n\tlabels := r.URL.Query().Get(\"Labels\")\n\tif apiVersion == \"\" {\n\t\thttp.Error(w, \"Missing ApiVersion mandatory parameter\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif kind == \"\" {\n\t\thttp.Error(w, \"Missing Kind mandatory parameter\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tresp, err := i.client.Query(id, apiVersion, kind, name, labels)\n\tif err != nil {\n\t\tlog.Error(\"Error getting Query results\", log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"id\": id,\n\t\t\t\"apiVersion\": apiVersion,\n\t\t\t\"kind\": kind,\n\t\t\t\"name\": name,\n\t\t\t\"labels\": labels,\n\t\t})\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Error(\"Error Marshaling Response\", log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"response\": resp,\n\t\t})\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (handler *WebHandler) Handle(w http.ResponseWriter, req *http.Request) {\n\tswitch req.Method {\n\tcase \"GET\":\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\tswitch req.URL.Path[len(\"/analysis/\"):] {\n\t\tcase \"totals\":\n\t\t\tparams, err := processParams(req.URL.Query())\n\t\t\tif err != nil {\n\t\t\t\twebhandler.ReturnError(err, w)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresults, err := totals(params, handler.rates, handler.backend.DB)\n\t\t\tif err != nil {\n\t\t\t\twebhandler.ReturnError(err, w)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tjson, _ := json.Marshal(results)\n\t\t\tfmt.Fprintln(w, string(json))\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tcase \"graph\":\n\t\t\tparams, err := processParams(req.URL.Query())\n\t\t\tif err != nil {\n\t\t\t\twebhandler.ReturnError(err, w)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgParams := gInitialise(params)\n\t\t\tresults, err := graph(gParams, handler.rates, handler.backend.DB)\n\t\t\tif err != nil {\n\t\t\t\twebhandler.ReturnError(err, w)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprintln(w, results)\n\t\t\tw.Header().Set(\"Content-Type\", \"image/svg+xml\")\n\t\tcase \"assets\":\n\t\t\tparams, err := processParams(req.URL.Query())\n\t\t\tif err != nil {\n\t\t\t\twebhandler.ReturnError(err, w)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresults, err := asts(params, handler.rates, handler.backend)\n\t\t\tif err != nil {\n\t\t\t\twebhandler.ReturnError(err, w)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tjson, err := json.Marshal(results)\n\t\t\tif err != nil {\n\t\t\t\twebhandler.ReturnError(err, w)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprintln(w, string(json))\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(404), 404)\n\t\t\treturn\n\t\t}\n\tcase \"OPTIONS\":\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"OPTIONS, GET\")\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\tw.Header().Set(\"Access-Control-Allow-Headers\", \"content-type\")\n\tdefault:\n\t\thttp.Error(w, http.StatusText(405), 405)\n\t}\n}", "func (h *handler) ServeHTTP(w stdhttp.ResponseWriter, r *stdhttp.Request) {\n\th.query = r.URL.Query()\n\th.header = r.Header\n\n\tif r.Method != stdhttp.MethodGet {\n\t\th.readBody(w, r)\n\t}\n\n\tw.WriteHeader(stdhttp.StatusOK)\n\t_, err := w.Write([]byte(\"message\"))\n\th.require.NoError(err)\n}", "func (h quorumHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// invoke the handler function in the struct\n\tstatus, err := h.H(h.quorumContext, w, r)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP status %d: err %q\", status, err)\n\t\tswitch status {\n\t\tcase http.StatusNotFound:\n\t\t\thttp.NotFound(w, r)\n\t\tcase http.StatusInternalServerError:\n\t\t\thttp.Error(w, http.StatusText(status), status)\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(status), status)\n\t\t}\n\t}\n}", "func (s *Server) sqlHandler(w http.ResponseWriter, req *http.Request) {\n if(s.block) {\n time.Sleep(1000000* time.Second)\n }\n\n\tquery, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't read body: %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tif s.leader != s.listen {\n\n\t\tcs, errLeader := transport.Encode(s.leader)\n\t\t\n\t\tif errLeader != nil {\n\t\t\thttp.Error(w, \"Only the primary can service queries, but this is a secondary\", http.StatusBadRequest)\t\n\t\t\tlog.Printf(\"Leader ain't present?: %s\", errLeader)\n\t\t\treturn\n\t\t}\n\n\t\t//_, errLeaderHealthCheck := s.client.SafeGet(cs, \"/healthcheck\") \n\n //if errLeaderHealthCheck != nil {\n // http.Error(w, \"Primary is down\", http.StatusBadRequest)\t\n // return\n //}\n\n\t\tbody, errLResp := s.client.SafePost(cs, \"/sql\", bytes.NewBufferString(string(query)))\n\t\tif errLResp != nil {\n s.block = true\n http.Error(w, \"Can't forward request to primary, gotta block now\", http.StatusBadRequest)\t\n return \n\t//\t log.Printf(\"Didn't get reply from leader: %s\", errLResp)\n\t\t}\n\n formatted := fmt.Sprintf(\"%s\", body)\n resp := []byte(formatted)\n\n\t\tw.Write(resp)\n\t\treturn\n\n\t} else {\n\n\t\tlog.Debugf(\"Primary Received query: %#v\", string(query))\n\t\tresp, err := s.execute(query)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t}\n\n\t\tw.Write(resp)\n\t\treturn\n\t}\n}", "func (o Options) GraphQLHandler() gin.HandlerFunc {\n\treturn gin.WrapH(&relay.Handler{Schema: NewSchema(o.DB)})\n}", "func NewHandler(config HandlerConfig) http.Handler {\n\tvar upgrader = websocket.Upgrader{\n\t\tCheckOrigin: func(r *http.Request) bool { return true },\n\t\tSubprotocols: []string{\"graphql-ws\"},\n\t}\n\n\tmgr := &ChanMgr{\n\t\tconns: make(map[string]map[string]*ResultChan),\n\t}\n\n\tif config.Logger == nil {\n\t\tconfig.Logger = &noopLogger{}\n\t}\n\n\treturn http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\t// Establish a WebSocket connection\n\t\t\tvar ws, err = upgrader.Upgrade(w, r, nil)\n\n\t\t\t// Bail out if the WebSocket connection could not be established\n\t\t\tif err != nil {\n\t\t\t\tconfig.Logger.Warnf(\"Failed to establish WebSocket connection\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Close the connection early if it doesn't implement the graphql-ws protocol\n\t\t\tif ws.Subprotocol() != \"graphql-ws\" {\n\t\t\t\tconfig.Logger.Warnf(\"Connection does not implement the GraphQL WS protocol\")\n\t\t\t\tws.Close()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Establish a GraphQL WebSocket connection\n\t\t\tNewConnection(ws, ConnectionConfig{\n\t\t\t\tAuthenticate: config.Authenticate,\n\t\t\t\tLogger: config.Logger,\n\t\t\t\tEventHandlers: ConnectionEventHandlers{\n\t\t\t\t\tClose: func(conn Connection) {\n\t\t\t\t\t\tconfig.Logger.Debugf(\"closing websocket: %s\", conn.ID)\n\t\t\t\t\t\tmgr.DelConn(conn.ID())\n\t\t\t\t\t},\n\t\t\t\t\tStartOperation: func(\n\t\t\t\t\t\tconn Connection,\n\t\t\t\t\t\topID string,\n\t\t\t\t\t\tdata *StartMessagePayload,\n\t\t\t\t\t) []error {\n\t\t\t\t\t\tconfig.Logger.Debugf(\"start operations %s on connection %s\", opID, conn.ID())\n\n\t\t\t\t\t\tctx := context.WithValue(context.Background(), ConnKey, conn)\n\t\t\t\t\t\tresultChannel := graphql.Subscribe(graphql.Params{\n\t\t\t\t\t\t\tSchema: config.Schema,\n\t\t\t\t\t\t\tRequestString: data.Query,\n\t\t\t\t\t\t\tVariableValues: data.Variables,\n\t\t\t\t\t\t\tOperationName: data.OperationName,\n\t\t\t\t\t\t\tContext: ctx,\n\t\t\t\t\t\t\tRootObject: config.RootValue,\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tmgr.Add(conn.ID(), opID, resultChannel)\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\t\t\t\tmgr.Del(conn.ID(), opID)\n\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\tcase res, more := <-resultChannel:\n\t\t\t\t\t\t\t\t\tif !more {\n\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\terrs := []error{}\n\n\t\t\t\t\t\t\t\t\tif res.HasErrors() {\n\t\t\t\t\t\t\t\t\t\tfor _, err := range res.Errors {\n\t\t\t\t\t\t\t\t\t\t\tconfig.Logger.Debugf(\"subscription_error: %v\", err)\n\t\t\t\t\t\t\t\t\t\t\terrs = append(errs, err.OriginalError())\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tconn.SendData(opID, &DataMessagePayload{\n\t\t\t\t\t\t\t\t\t\tData: res.Data,\n\t\t\t\t\t\t\t\t\t\tErrors: errs,\n\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t},\n\t\t\t\t\tStopOperation: func(conn Connection, opID string) {\n\t\t\t\t\t\tconfig.Logger.Debugf(\"stop operation %s on connection %s\", opID, conn.ID())\n\t\t\t\t\t\tmgr.Del(conn.ID(), opID)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t},\n\t)\n}", "func (mh *RootHandler) Handler(w http.ResponseWriter, r *http.Request) {\n ref := DatasetRefFromCtx(r.Context())\n if ref == nil {\n WebappHandler(w, r)\n return\n }\n if ref.IsPeerRef() {\n p := &core.PeerInfoParams{\n Peername: ref.Peername,\n }\n res := &profile.Profile{}\n err := mh.ph.Info(p, res)\n if err != nil {\n util.WriteErrResponse(w, http.StatusInternalServerError, err)\n return\n }\n if res.ID == \"\" {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer\"))\n return\n }\n util.WriteResponse(w, res)\n return\n }\n res := &repo.DatasetRef{}\n err := mh.dsh.Get(ref, res)\n if err != nil {\n util.WriteErrResponse(w, http.StatusInternalServerError, err)\n return\n }\n if res.Name == \"\" {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer dataset\"))\n return\n }\n if res == nil {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer dataset\"))\n return\n }\n util.WriteResponse(w, res)\n return\n}", "func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tparent := ID(r)\n\tr = WithID(r)\n\tid := ID(r)\n\tlog.WithFields(log.Fields{\n\t\t\"id\": id,\n\t\t\"type\": h.Type,\n\t\t\"parent\": parent,\n\t\t\"method\": r.Method,\n\t\t\"host\": r.Host,\n\t\t\"url\": r.URL.String(),\n\t\t\"content_length\": r.ContentLength,\n\t\t\"remote_addr\": r.RemoteAddr,\n\t\t\"header\": r.Header,\n\t}).Info(\"Started a transaction\")\n\n\trw := &responseWriter{\n\t\tResponseWriter: w,\n\t}\n\th.Next.ServeHTTP(rw, r)\n\n\tlog.WithFields(log.Fields{\n\t\t\"id\": id,\n\t\t\"status\": rw.status,\n\t\t\"content_length\": rw.contentLength,\n\t\t\"header\": rw.Header(),\n\t}).Info(\"Finished a transaction\")\n}", "func (c *Client) Do(ctx context.Context, req *Request, resp *Response) error {\n\tr := graphql.Response{}\n\tif resp != nil {\n\t\tr.Data = resp.Data\n\t\tr.Errors = resp.Errors\n\t\tr.Extensions = resp.Extensions\n\t}\n\treturn c.gql.MakeRequest(ctx, &graphql.Request{\n\t\tQuery: req.Query,\n\t\tVariables: req.Variables,\n\t\tOpName: req.OpName,\n\t}, &r)\n}", "func (network *Network) HTTPhandler(w http.ResponseWriter, r *http.Request){\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tbody, error := ioutil.ReadAll(r.Body) // Read Request\n\t\tdefer r.Body.Close() // Always CLOSE.\n\t\t// Check for errors or if body is empty.\n\t\tif error != nil || removeQuotationMarks(string(body)) == \"\" {\n\t\t\thttp.Error(w, \"ERROR\", http.StatusBadRequest)\n\t\t\tfmt.Println(\"Error when POST\")\n\t\t} else{\n\t\t\t// Same as in Cli.go Store\n\t\t\thashedFileString := NewKademliaIDFromData(string(body))\n\t\t\tnetwork.Store([]byte(body),hashedFileString)\n\t\t\thashSuffix := hashedFileString.String()\n\n\t\t\tmessage := map[string]string{ hashSuffix: string(body)} // JSON DATA FORMAT\n\t\t\tjsonValue,_ := json.Marshal(message)\n\n\t\t\tw.Header().Set(\"Location\", URLprefix+hashSuffix)\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\t\tw.WriteHeader(http.StatusCreated)\t// Status 201 as detailed\n\n\t\t\tw.Write(jsonValue)\n\t\t\tfmt.Println(\"HTTP Data Written. Hash = \", hashSuffix )\n\t\t}\n\tcase \"GET\":\n\t\t// Checks if there is something after the prefix. /objects/XXXXXXXXXXXXXX\n\t\tURLcomponents := strings.Split(r.URL.Path, \"/\")\t// [ \"\", \"objects\", \"hash\" ]\n\t\thashValue := URLcomponents[2]\n\t\t// Check if there is a hashvalue of correct size.\n\t\tif(len(hashValue) != 40){\n\t\t\thttp.Error(w, \"ERROR\", http.StatusLengthRequired)\n\t\t\tfmt.Println(\"Error when GET \", hashValue, \" is not of correct length. (40)\")\n\t\t}else{\n\t\t\t\t// Same as in Cli.go Get\n\t\t\t\thash := NewKademliaID(hashValue)\n\t\t\t\tdata, nodes := network.DataLookup(hash)\n\t\t\t\tif data != nil {\n\t\t\t\t\t// If data is not nil, send OK status and write.\n\t\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\t\tw.Write(data)\n\t\t\t\t\tfmt.Println(\"HTTP Data Read. Input was = \", string(data) )\n\t\t\t\t} else if len(nodes) > 0{\n\t\t\t\t\thttp.Error(w, \"ERROR\", http.StatusNotFound)\n\t\t\t\t\tfmt.Println(\"Error when GET - DataLookUP (Length)\")\n\t\t\t\t} else {\n\t\t\t\t\thttp.Error(w, \"ERROR\", http.StatusNoContent)\n\t\t\t\t\tfmt.Println(\"Error when GET - DataLookUP\")\n\t\t\t\t}\n\t\t}\n\tdefault:\n\t\thttp.Error(w, \"Wrong. Use POST or GET\", http.StatusMethodNotAllowed)\n\t}\n}", "func (s *Server) sqlHandler(w http.ResponseWriter, req *http.Request) {\n\tstate := s.cluster.State()\n\tif state != \"primary\" {\n\t\thttp.Error(w, \"Only the primary can service queries, but this is a \"+state, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tquery, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't read body: %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tlog.Debugf(\"[%s] Received query: %#v\", s.cluster.State(), string(query))\n\tresp, err := s.execute(query)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tr := &Replicate{\n\t\tSelf: s.cluster.self,\n\t\tQuery: query,\n\t}\n\tfor _, member := range s.cluster.members {\n\t\tb := util.JSONEncode(r)\n\t\t_, err := s.client.SafePost(member.ConnectionString, \"/replicate\", b)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Couldn't replicate query to %v: %s\", member, err)\n\t\t}\n\t}\n\n\tlog.Debugf(\"[%s] Returning response to %#v: %#v\", s.cluster.State(), string(query), string(resp))\n\tw.Write(resp)\n}", "func Init() http.HandlerFunc {\n\trootQuery := graphql.NewObject(graphql.ObjectConfig{\n\t\tName: \"Query\",\n\t\tFields: fields.HandleQuery(),\n\t})\n\n\trootMutation := graphql.NewObject(graphql.ObjectConfig{\n\t\tName: \"Mutation\",\n\t\tFields: fields.HandleMutation(),\n\t})\n\n\tschema, _ := graphql.NewSchema(graphql.SchemaConfig{\n\t\tQuery: rootQuery,\n\t\tMutation: rootMutation,\n\t})\n\n\th := handler.New(&handler.Config{\n\t\tSchema: &schema,\n\t\tPretty: true,\n\t})\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\th.ContextHandler(r.Context(), w, r)\n\t}\n}", "func (a *DefaultApiService) PostGraphQLExecute(r ApiPostGraphQLRequest) (GraphQLResponse, *_nethttp.Response, GenericOpenAPIError) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\texecutionError GenericOpenAPIError\n\t\tlocalVarReturnValue GraphQLResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"DefaultApiService.PostGraphQL\")\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarPath := localBasePath + \"/graphql\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\tif r.graphQLRequest == nil {\n\t\texecutionError.error = \"graphQLRequest is required and must be specified\"\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.graphQLRequest\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, executionError\n}", "func PlaygroundHandler(path string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tplayground.Handler(\"GraphQL-Server\", path).ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func httpHandler(w http.ResponseWriter, r *http.Request) {\n\tb, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\treq := json.GetRPCRequestFromJSON(string(b))\n\trespBody, statusCode := handler(req)\n\tw.WriteHeader(statusCode)\n\tw.Write([]byte(respBody))\n}", "func (this *Route) HTTPHandler(ctx *Context, done Next) {\n\tthis.dispatch(ctx, done)\n}", "func main() {\n\tclient, err := database.NewClient(\"\")\n\tif err != nil {\n\t\tlog.Fatal(\"problem connecting to the database\")\n\t}\n\tpathHandlers := handlers.NewHandler(client)\n\tr := mux.NewRouter()\n\t// Routes consist of a path and a handler function.\n\tr.HandleFunc(\"/\", pathHandlers.HelloWorld)\n\tr.HandleFunc(\"/health\", pathHandlers.HealthCheckHandler)\n\tr.HandleFunc(\"/v1/metrics/node/{nodename}\", pathHandlers.NodeHandler).Methods(\"POST\")\n\tr.HandleFunc(\"/v1/metrics/nodes{nodename}/process/{processname}\", pathHandlers.NodeHandler).Methods(\"POST\")\n\tr.HandleFunc(\"/v1/analytics/nodes/average\", pathHandlers.AnalyticsNodesHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/v1/analytics/processes\", pathHandlers.AnalyticProcessesHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/v1/analytics/processes/{processname}\", pathHandlers.AnalyticSpecificProcessHandler).Methods(\"GET\")\n\n\t// Bind to a port and pass our router in\n\tlog.Fatal(http.ListenAndServe(\":8000\", r))\n}", "func (d *MetadataAPI) Handle(w http.ResponseWriter, r *http.Request) {\n\tql, err := api.GetParamsFromRequest(\"sql\", r, \"\", true)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\tmetaQuery, err := parseSQLFunc(ql)\n\tif err != nil {\n\t\tapi.Error(w, err)\n\t\treturn\n\t}\n\tswitch metaQuery.Type {\n\tcase stmt.Database:\n\t\td.showDatabases(w)\n\tcase stmt.Namespace, stmt.Metric, stmt.Field, stmt.TagKey, stmt.TagValue:\n\t\tdb, err := api.GetParamsFromRequest(\"db\", r, \"\", true)\n\t\tif err != nil {\n\t\t\tapi.Error(w, err)\n\t\t\treturn\n\t\t}\n\t\td.suggest(w, db, metaQuery)\n\tdefault:\n\t\tapi.Error(w, errUnknownMetadataStmt)\n\t}\n}", "func (e *Engine) Do(ctx context.Context, query string, response interface{}) error {\n\tpayload := GQLRequest{\n\t\tQuery: query,\n\t\tVariables: map[string]interface{}{},\n\t}\n\n\tbody, err := e.Request(ctx, \"POST\", \"/\", &payload)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Request failed: %w\", err)\n\t}\n\n\t// TODO temporary hack, actually parse the response\n\tif str := string(body); strings.Contains(str, \"errors: \\\"[{\\\"error\") {\n\t\treturn fmt.Errorf(\"pql error: %s\", str)\n\t}\n\n\terr = json.Unmarshal(body, response)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"json unmarshal: %w\", err)\n\t}\n\n\treturn nil\n}", "func PlaygroundHandler(path string) gin.HandlerFunc {\n\th := playground.Handler(\"Go GraphQL Server\", path)\n\treturn func(c *gin.Context) {\n\t\th.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func (r rigis) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\n\t// if filter match then bad request\n\tif !r.executeFilter(req) {\n\t\tlogrus.WithFields(formatErrorLog(req)).Error(\"Request Forbidden.\")\n\t\tresponseError(rw, req, getErrorEntity(http.StatusForbidden))\n\t\treturn\n\t}\n\n\tfor i := 0; i < len(r.nodes); i++ {\n\t\t// if rule unmatch then skip this node\n\t\tif !r.nodes[i].executeRule(req) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// if filter match then bad request\n\t\tif !r.nodes[i].executeFilter(req) {\n\t\t\tlogrus.WithFields(formatErrorLog(req)).Error(\"Request Forbidden.\")\n\t\t\tresponseError(rw, req, getErrorEntity(http.StatusForbidden))\n\t\t\treturn\n\t\t}\n\n\t\tr.nodes[i].serveHTTP(rw, req)\n\t\treturn\n\t}\n\n\t// not match all rule\n\tlogrus.WithFields(formatErrorLog(req)).Error(\"Contnts Not Found.\")\n\tresponseError(rw, req, getErrorEntity(http.StatusNotFound))\n}", "func NewHTTPHandler(db *sql.DB) HandlerInterface {\n\treturn &Handler{\n\t\tps: NewService(db),\n\t}\n}", "func (rLoader *V1RouterLoader) routerGraphQL(router *gin.Engine, handler *graphQL.GraphQlController) {\n\tgroup := router.Group(\"v1/graphql\")\n\tgroup.POST(\"\", handler.GraphQLControll)\n}", "func AtomicPersistenceQueryHandler(options ...option) func(http.Handler) http.Handler {\n\toption := NewOptions(options...)\n\toption.ValidAPQ()\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tvar (\n\t\t\t\tqueryHash string\n\t\t\t\terr error\n\t\t\t)\n\t\t\tif !(r.Method == http.MethodPost || r.Method == http.MethodGet) {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !checkRouter(r, option.routers) {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif queryHash, err = ComputeHttpRequestQueryHash(r); err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar item string\n\t\t\t// if cache is miss , store the newest data to cache\n\t\t\tif v, err := option.cache.Get(redisKey(option.serviceName, queryHash), &item); err != nil || v == nil {\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\t\t\t\tresponseBuf := bytes.NewBuffer(nil)\n\t\t\t\tcrw := newCacheResponseWrite(w, responseBuf)\n\t\t\t\tnext.ServeHTTP(crw, r)\n\t\t\t\tif err := option.cache.Set(redisKey(option.serviceName, queryHash), model.NewItem(responseBuf.String(), option.expire)); err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// 此处不能提前设置状态,否则beego内部框架会识别response已被处理,导致content-type:text-plain(一直是)\n\t\t\t// 详见 :https://blog.csdn.net/yes169yes123/article/details/103126655\n\t\t\t// w.WriteHeader(http.StatusAccepted)\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\t\tw.Write([]byte(item))\n\t\t\treturn\n\t\t})\n\t}\n}", "func (s *KgSearchResult) ServeHTTP(\n\tw http.ResponseWriter, r *http.Request) {\n\n\t// qs := r.FormValue(\"q\")\n\tres, _ := knowledgegraph.GetResult(s.Kgs, 0)\n\tif res != nil {\n\t\t// TODO 移到vie中\n\n\t\tt, err := template.ParseFiles(\"view/template/3.html\")\n\t\t// TODO 统一的错误处理\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"internal error\",\n\t\t\t\thttp.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif t.Execute(w, res) != nil {\n\t\t\thttp.Error(w, \"internal error\",\n\t\t\t\thttp.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\thttp.NotFound(w, r)\n\t}\n\n}", "func main() {\n\t defaultPort := \"4040\"\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = defaultPort\n\t}\n\n\tDatabase := db.Connect()\n\n\tDatabase.AddQueryHook(db.Logs{})\n\n\troute := chi.NewRouter()\n\n\troute.Use(cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"http://localhost:4040\"},\n\t\tAllowCredentials: true,\n\t\tDebug: true,\n\t}).Handler)\n\troute.Use(middleware.Logger,\n\t\t\tmiddleware.RequestID,\n\t\t\tInternalMiddleWare.AuthMiddleware())\n\n\troute.Route(\"/graphql\", func(route chi.Router){\n\t\troute.Use(dataloaders.NewMiddleware(Database)...)\n\n\t\tschema := generated.NewExecutableSchema( generated.Config{\n\t\t\t\tResolvers: &resolvers.Resolver{\n\t\t\t\t\tDB: Database,\n\t\t\t\t},\n\t\t\t\tDirectives:generated.DirectiveRoot{},\n\t\t\t\tComplexity:generated.ComplexityRoot{},\n\t\t })\n\n\t\tvar serve = handler.NewDefaultServer(schema)\n\t\tserve.Use(extension.FixedComplexityLimit(300))\n\t\troute.Handle(\"/\", serve)\n\t})\n\n\tgraphiql := playground.Handler(\"api-gateway\" , \"/graphql\")\n\troute.Get(\"/\" , graphiql)\n\n\tlog.Printf(\"connect to http://localhost:%s/ for GraphQL playground\", port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, route))\n}", "func (h *Hub) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"/\" {\n\t\t// just for health checks. AWS by default needs HTTP 200.\n\t\tfmt.Fprint(w, \"hello\")\n\t\treturn\n\t}\n\tif r.URL.Path != \"/ws\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tconn, err := h.up.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Println(\"err ServeHTTP:\", err)\n\t\treturn\n\t}\n\th.tx <- NewClient(conn)\n}", "func QueriesHandler(db storage.DB) atreugo.View {\n\treturn func(ctx *atreugo.RequestCtx) error {\n\t\tqueries := queriesParam(ctx)\n\t\tworlds := storage.AcquireWorlds()[:queries]\n\n\t\tfor i := 0; i < queries; i++ {\n\t\t\tdb.GetOneRandomWorld(&worlds[i])\n\t\t}\n\n\t\terr := ctx.JSONResponse(worlds)\n\n\t\tstorage.ReleaseWorlds(worlds)\n\n\t\treturn err\n\t}\n}", "func (h *ConnectionHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tswitch req.Method {\n\tcase \"GET\":\n\t\th.get(res, req)\n\tcase \"POST\":\n\t\th.post(res, req)\n\tdefault:\n\t\tpageNotFoundHandler(res, req)\n\t}\n}", "func (p *Plugin) ServeHTTP(c *plugin.Context, w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tvar dfr IncomingRequest\n\tif err := json.NewDecoder(r.Body).Decode(&dfr); err != nil {\n\t\tp.API.LogError(\"Cannot decode\", \"err\", err.Error())\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tp.API.LogInfo(\"user\", \"t\", dfr.Intent.Params.Username)\n\tif dfr.Intent.Params.Username == nil || *dfr.Intent.Params.Username.Resolved == \"\" {\n\n\t}\n\n\tvar response *OutgoingResponse\n\tvalidateUser := func() string {\n\t\tif dfr.User.Params.UserName == nil || *dfr.User.Params.UserName == \"\" {\n\t\t\tresponse = getResponseWithText(\"Sorry, you didn't set your mattermost username!\")\n\t\t\treturn \"\"\n\t\t}\n\t\tidB, err := p.API.KVGet(*dfr.User.Params.UserName)\n\t\tif err != nil || idB == nil {\n\t\t\tresponse = getResponseWithText(\"Sorry, you didn't enable google assistant integration!\")\n\t\t\treturn \"\"\n\t\t}\n\t\tu, _ := p.API.GetUserByUsername(*dfr.User.Params.UserName)\n\n\t\treturn u.Id\n\t}\n\n\thandler := *dfr.Handler.Name\n\t// handler = \"set_username\"\n\tswitch handler {\n\tcase \"get_status\":\n\t\t{\n\t\t\tuserId := validateUser()\n\t\t\tif userId == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar nErr error\n\t\t\tresponse, nErr = p.handleGetStatus(userId)\n\t\t\tif nErr != nil {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tcase \"read_direct_messages\":\n\t\t{\n\t\t\tuserId := validateUser()\n\t\t\tif userId == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar nErr error\n\t\t\tresponse, nErr = p.handleReadMessages(userId)\n\t\t\tif nErr != nil {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tcase \"change_status\":\n\t\t{\n\t\t\tuserId := validateUser()\n\t\t\tif userId == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar nErr error\n\t\t\tresponse, nErr = p.handleStatusChange(*dfr.Intent.Params.Status.Resolved, userId)\n\t\t\tif nErr != nil {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tcase \"set_username\":\n\t\t{\n\t\t\tresponse = &OutgoingResponse{\n\t\t\t\tUser: &gUser{\n\t\t\t\t\tParams: gUserParams{\n\t\t\t\t\t\tUserName: dfr.Intent.Params.Username.Resolved, //model.NewString(\"sysadmin\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tPrompt: &gPrompt{},\n\t\t\t}\n\t\t}\n\tcase \"send_message\":\n\t\t{\n\t\t\tuserId := validateUser()\n\t\t\tif userId == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar nErr error\n\t\t\tresponse, nErr = p.handleSendDM(userId, dfr.Scene.Slots.Username.Value, *dfr.Intent.Params.Message.Resolved)\n\t\t\tif nErr != nil {\n\t\t\t\tresponse = getResponseWithText(\"Sorry, can't find that user!\")\n\t\t\t}\n\t\t}\n\tdefault:\n\t\t{\n\t\t\tresponse = getResponseWithText(\"Sorry, don't know what to do!\")\n\t\t}\n\t}\n\tsuggestions := []gSuggestions{\n\t\t{Title: \"Change status to away\"},\n\t\t{Title: \"Status Report\"},\n\t\t{Title: \"Read messages\"},\n\t\t{Title: \"Write message\"},\n\t}\n\tresponse.Prompt.Suggestions = &suggestions\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(response)\n\n}", "func (h *JSONRPCHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tconn := struct {\n\t\tio.Writer\n\t\tio.ReadCloser\n\t}{w, r.Body}\n\n\tif err := h.server.ServeRequest(jsonrpc.NewServerCodec(conn)); err != nil {\n\t\tlog.Errorf(\"could not handle json-rpc request: %s\", err)\n\t}\n}", "func (g *Gateway) PlaygroundHandler(w http.ResponseWriter, r *http.Request) {\n\t// on POSTs, we have to send the request to the graphqlHandler\n\tif r.Method == http.MethodPost {\n\t\tg.GraphQLHandler(w, r)\n\t\treturn\n\t}\n\n\t// we are not handling a POST request so we have to show the user the playground\n\terr := writePlayground(w, PlaygroundConfig{\n\t\tEndpoint: r.URL.String(),\n\t})\n\tif err != nil {\n\t\tg.logger.Warn(\"failed writing playground UI:\", err.Error())\n\t}\n}", "func HttpHandler(store *Store) HttpHandlerFunc {\r\n\treturn func(rw http.ResponseWriter, r *http.Request) {\r\n\t\tif r.Method == \"GET\" {\r\n\t\t\thandleListDishes(store, rw)\r\n\t\t} else if r.Method == \"POST\" {\r\n\t\t\thandleOrderCreate(r, rw, store)\r\n\t\t} else {\r\n\t\t\trw.WriteHeader(http.StatusMethodNotAllowed)\r\n\t\t}\r\n\t}\r\n}", "func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tctx, err := s.ops.NewContext(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// sumdb handler\n\tif strings.HasPrefix(r.URL.Path, \"/sumdb/\") {\n\t\tsumdb.Handler(w, r)\n\t\treturn\n\t}\n\n\ti := strings.Index(r.URL.Path, \"/@\")\n\tif i < 0 {\n\t\thttp.Error(w, \"no such path\", http.StatusNotFound)\n\t\treturn\n\t}\n\tmodPath, err := module.UnescapePath(strings.TrimPrefix(r.URL.Path[:i], \"/\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\twhat := r.URL.Path[i+len(\"/@\"):]\n\tconst (\n\t\tcontentTypeJSON = \"application/json\"\n\t\tcontentTypeText = \"text/plain; charset=UTF-8\"\n\t\tcontentTypeBinary = \"application/octet-stream\"\n\t)\n\tvar ctype string\n\tvar f File\n\tvar openErr error\n\tswitch what {\n\tcase \"latest\":\n\t\tctype = contentTypeJSON\n\t\tf, openErr = s.ops.Latest(ctx, modPath)\n\tcase \"v/list\":\n\t\tctype = contentTypeText\n\t\tf, openErr = s.ops.List(ctx, modPath)\n\tdefault:\n\t\twhat = strings.TrimPrefix(what, \"v/\")\n\t\text := path.Ext(what)\n\t\tvers, err := module.UnescapeVersion(strings.TrimSuffix(what, ext))\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tm := module.Version{Path: modPath, Version: vers}\n\t\tif vers == \"latest\" {\n\t\t\t// The go command handles \"go get m@latest\" by fetching /m/@v/latest, not latest.info.\n\t\t\t// We should never see requests for \"latest.info\" and so on, so avoid confusion\n\t\t\t// by disallowing it early.\n\t\t\thttp.Error(w, \"version latest is disallowed\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\t// All requests require canonical versions except for info,\n\t\t// which accepts any revision identifier known to the underlying storage.\n\t\tif ext != \".info\" && vers != module.CanonicalVersion(vers) {\n\t\t\thttp.Error(w, \"version \"+vers+\" is not in canonical form\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tswitch ext {\n\t\tcase \".info\":\n\t\t\tctype = \"application/json\"\n\t\t\tf, openErr = s.ops.Info(ctx, m)\n\t\tcase \".mod\":\n\t\t\tctype = \"text/plain; charset=UTF-8\"\n\t\t\tf, openErr = s.ops.GoMod(ctx, m)\n\t\tcase \".zip\":\n\t\t\tctype = \"application/octet-stream\"\n\t\t\tf, openErr = s.ops.Zip(ctx, m)\n\t\tdefault:\n\t\t\thttp.Error(w, \"request not recognized\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t}\n\tif openErr != nil {\n\t\tcode := http.StatusNotFound\n\t\thttp.Error(w, openErr.Error(), code)\n\t\treturn\n\t}\n\tdefer f.Close()\n\tinfo, err := f.Stat()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\tif info.IsDir() {\n\t\thttp.Error(w, \"unexpected directory\", http.StatusNotFound)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", ctype)\n\thttp.ServeContent(w, r, what, info.ModTime(), f)\n}", "func Handler(rc *Context, s storage.Store, storeExecutor kvs) {\r\n\r\n\t// Make sure all resources are cleanup after leaving\r\n\tcancelFunc := rc.CtxCancel\r\n\tdefer cancelFunc()\r\n\r\n\t// Err channel shared across\r\n\terrc := make(chan error, 1)\r\n\r\n\t// Execution call\r\n\tout := make(chan interface{}, 1)\r\n\r\n\tgo func() {\r\n\t\tdefer close(out)\r\n\t\tdefer close(errc)\r\n\r\n\t\tinb := <-httpExtractor(rc, errc)\r\n\r\n\t\tlogging.MsgFields(\r\n\t\t\trc.uID,\r\n\t\t\t\"HttpExtractor\",\r\n\t\t\tlogging.Input,\r\n\t\t\tlogging.Field{{logging.Payload, inb}})\r\n\r\n\t\tif rc.Cancelled() {\r\n\t\t\tlogging.Msg(\r\n\t\t\t\trc.uID,\r\n\t\t\t\t\"Handler\",\r\n\t\t\t\tlogging.Cancelled,\r\n\t\t\t\t\"kvs execution was cancelled from another goroutine\")\r\n\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\tres, err := storeExecutor(s, inb)\r\n\r\n\t\tif err != nil {\r\n\t\t\terrc <- err\r\n\t\t\tcancelFunc()\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\tselect {\r\n\t\tcase out <- res:\r\n\t\tcase <-rc.Ctx.Done():\r\n\t\t\treturn\r\n\t\t}\r\n\t}()\r\n\r\n\tHTTPResponse(rc, out, errc)\r\n}", "func Handler(_db *sql.DB) http.HandlerFunc {\n\trouter := mux.NewRouter()\n\n\tdb = _db\n\n\trouter.HandleFunc(\"/chain\", AddLink).\n\t\tMethods(http.MethodPost)\n\n\trouter.Handle(\"/invite/{key}\", NewInviteHandler()).\n\t\tMethods(http.MethodGet)\n\n\trouter.HandleFunc(\"/about\", About).\n\t\tMethods(http.MethodGet)\n\n\trouter.HandleFunc(\"/\", CORS).\n\t\tMethods(http.MethodOptions)\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\trouter.ServeHTTP(w, r)\n\t}\n}", "func (p Plugin) Handler(bot *linebot.Client) *http.ServeMux {\n\tmux := http.NewServeMux()\n\n\t// Setup HTTP Server for receiving requests from LINE platform\n\tmux.HandleFunc(\"/callback\", func(w http.ResponseWriter, req *http.Request) {\n\t\tevents, err := bot.ParseRequest(req)\n\t\tif err != nil {\n\t\t\tif err == linebot.ErrInvalidSignature {\n\t\t\t\tw.WriteHeader(400)\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(500)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, event := range events {\n\t\t\tif event.Type == linebot.EventTypeMessage {\n\t\t\t\tswitch message := event.Message.(type) {\n\t\t\t\tcase *linebot.TextMessage:\n\t\t\t\t\tlog.Printf(\"User ID is %v\\n\", event.Source.UserID)\n\t\t\t\t\tlog.Printf(\"Room ID is %v\\n\", event.Source.RoomID)\n\t\t\t\t\tlog.Printf(\"Group ID is %v\\n\", event.Source.GroupID)\n\n\t\t\t\t\tReceiveCount++\n\t\t\t\t\tif message.Text == \"test\" {\n\t\t\t\t\t\tSendCount++\n\t\t\t\t\t\tif _, err = bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(\"count + 1\")).Do(); err != nil {\n\t\t\t\t\t\t\tlog.Print(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif _, err = bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(message.Text)).Do(); err != nil {\n\t\t\t\t\t\tlog.Print(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tmux.HandleFunc(\"/metrics\", func(w http.ResponseWriter, req *http.Request) {\n\t\tpromhttp.Handler().ServeHTTP(w, req)\n\t})\n\n\t// Setup HTTP Server for receiving requests from LINE platform\n\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, req *http.Request) {\n\t\tfmt.Fprintln(w, \"Welcome to Line webhook page.\")\n\t})\n\n\treturn mux\n}", "func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request) {\n\t// TODO: Authentication.\n\n\t// Parse query from query string.\n\tvalues := r.URL.Query()\n\tqueries, err := parser.ParseQuery(values.Get(\"q\"))\n\tif err != nil {\n\t\th.error(w, \"parse error: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Retrieve database from server.\n\tdb := h.server.Database(values.Get(\":db\"))\n\tif db == nil {\n\t\th.error(w, ErrDatabaseNotFound.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// Parse the time precision from the query params.\n\tprecision, err := parseTimePrecision(values.Get(\"time_precision\"))\n\tif err != nil {\n\t\th.error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Create processor for writing data out.\n\tvar p engine.Processor\n\tif r.URL.Query().Get(\"chunked\") == \"true\" {\n\t\tp = &chunkWriterProcessor{w, precision, false, (values.Get(\"pretty\") == \"true\")}\n\t} else {\n\t\tp = &pointsWriterProcessor{make(map[string]*protocol.Series), w, precision, (values.Get(\"pretty\") == \"true\")}\n\t}\n\n\t// Execute query against the database.\n\tfor _, q := range queries {\n\t\tif err := db.ExecuteQuery(nil, q, p); err != nil {\n\t\t\th.error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Mark processor as complete. Print error, if applicable.\n\tif err := p.Close(); err != nil {\n\t\th.error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func GetHandler(c *gin.Context) {\n\t// The actual response goes here\n\tresp := QueryResponse{\n\t\tStatus: StatusOK,\n\t\tMessage: \"10-4 good buddy\",\n\t}\n\tc.JSON(http.StatusOK, resp)\n}", "func (g *GRPC) handler() http.Handler {\n\treturn http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tswitch r.Header.Get(\"Content-Type\") {\n\t\t\tcase \"application/grpc\":\n\t\t\t\tif r.ProtoMajor == 2 {\n\t\t\t\t\tg.serv.ServeHTTP(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttp.Error(w, \"'application/grpc' content arrived, but HTTP protocol was not HTTP 2\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\tcase \"application/grpc-gateway\", \"application/jsonpb\":\n\t\t\t\tif g.gatewayHandler == nil {\n\t\t\t\t\thttp.Error(w, \"application/grpc-gateway received, but server is not setup for REST\", http.StatusBadRequest)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// g.httpRESTHandler(g.gatewayHandler).ServeHTTP(w, r)\n\t\t\t\tg.gatewayHandler.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\t// Special case where they are looking for the REST swagger docs.\n\t\t\t\tif strings.HasPrefix(r.URL.Path, \"/swagger-ui/\") {\n\t\t\t\t\tg.httpRESTHandler(g.gatewayHandler).ServeHTTP(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif g.httpMux == nil {\n\t\t\t\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tg.httpMux.ServeHTTP(w, r)\n\t\t\t}\n\t\t},\n\t)\n}", "func (d *Data) ServeHTTP(uuid dvid.UUID, ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request) {\n\ttimedLog := dvid.NewTimeLog()\n\t// versionID := ctx.VersionID()\n\n\t// Get the action (GET, POST)\n\taction := strings.ToLower(r.Method)\n\n\t// Break URL request into arguments\n\turl := r.URL.Path[len(server.WebAPIPath):]\n\tparts := strings.Split(url, \"/\")\n\tif len(parts[len(parts)-1]) == 0 {\n\t\tparts = parts[:len(parts)-1]\n\t}\n\n\t// Handle POST on data -> setting of configuration\n\tif len(parts) == 3 && action == \"put\" {\n\t\tconfig, err := server.DecodeJSON(r)\n\t\tif err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tif err := d.ModifyConfig(config); err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tif err := datastore.SaveDataByUUID(uuid, d); err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(w, \"Changed '%s' based on received configuration:\\n%s\\n\", d.DataName(), config)\n\t\treturn\n\t}\n\n\tif len(parts) < 4 {\n\t\tserver.BadRequest(w, r, \"Incomplete API request\")\n\t\treturn\n\t}\n\n\t// Process help and info.\n\tswitch parts[3] {\n\tcase \"help\":\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tfmt.Fprintln(w, dtype.Help())\n\n\tcase \"info\":\n\t\tjsonBytes, err := d.MarshalJSON()\n\t\tif err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprintf(w, string(jsonBytes))\n\n\tcase \"sync\":\n\t\tif action != \"post\" {\n\t\t\tserver.BadRequest(w, r, \"Only POST allowed to sync endpoint\")\n\t\t\treturn\n\t\t}\n\t\treplace := r.URL.Query().Get(\"replace\") == \"true\"\n\t\tif err := datastore.SetSyncByJSON(d, uuid, replace, r.Body); err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\n\tcase \"label\":\n\t\tif action != \"get\" {\n\t\t\tserver.BadRequest(w, r, \"Only GET action is available on 'label' endpoint.\")\n\t\t\treturn\n\t\t}\n\t\tif len(parts) < 5 {\n\t\t\tserver.BadRequest(w, r, \"Must include label after 'label' endpoint.\")\n\t\t\treturn\n\t\t}\n\t\tlabel, err := strconv.ParseUint(parts[4], 10, 64)\n\t\tif err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tif label == 0 {\n\t\t\tserver.BadRequest(w, r, \"Label 0 is protected background value and cannot be used for query.\")\n\t\t\treturn\n\t\t}\n\t\tqueryStrings := r.URL.Query()\n\t\tjsonBytes, err := d.GetLabelJSON(ctx, label, queryStrings.Get(\"relationships\") == \"true\")\n\t\tif err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tif _, err := w.Write(jsonBytes); err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\ttimedLog.Infof(\"HTTP %s: get synaptic elements for label %d (%s)\", r.Method, label, r.URL)\n\n\tcase \"tag\":\n\t\tif action != \"get\" {\n\t\t\tserver.BadRequest(w, r, \"Only GET action is available on 'tag' endpoint.\")\n\t\t\treturn\n\t\t}\n\t\tif len(parts) < 5 {\n\t\t\tserver.BadRequest(w, r, \"Must include tag string after 'tag' endpoint.\")\n\t\t\treturn\n\t\t}\n\t\ttag := Tag(parts[4])\n\t\tqueryStrings := r.URL.Query()\n\t\tjsonBytes, err := d.GetTagJSON(ctx, tag, queryStrings.Get(\"relationships\") == \"true\")\n\t\tif err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tif _, err := w.Write(jsonBytes); err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\ttimedLog.Infof(\"HTTP %s: get synaptic elements for tag %s (%s)\", r.Method, tag, r.URL)\n\n\tcase \"elements\":\n\t\tswitch action {\n\t\tcase \"get\":\n\t\t\t// GET <api URL>/node/<UUID>/<data name>/elements/<size>/<offset>\n\t\t\tif len(parts) < 6 {\n\t\t\t\tserver.BadRequest(w, r, \"Expect size and offset to follow 'elements' in GET request\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsizeStr, offsetStr := parts[4], parts[5]\n\t\t\text3d, err := dvid.NewExtents3dFromStrings(offsetStr, sizeStr, \"_\")\n\t\t\tif err != nil {\n\t\t\t\tserver.BadRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\telements, err := d.GetRegionSynapses(ctx, ext3d)\n\t\t\tif err != nil {\n\t\t\t\tserver.BadRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\t\tjsonBytes, err := json.Marshal(elements)\n\t\t\tif err != nil {\n\t\t\t\tserver.BadRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err := w.Write(jsonBytes); err != nil {\n\t\t\t\tserver.BadRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttimedLog.Infof(\"HTTP %s: synapse elements in subvolume (size %s, offset %s) (%s)\", r.Method, sizeStr, offsetStr, r.URL)\n\n\t\tcase \"post\":\n\t\t\tif err := d.StoreSynapses(ctx, r.Body); err != nil {\n\t\t\t\tserver.BadRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tserver.BadRequest(w, r, \"Only GET or POST action is available on 'elements' endpoint.\")\n\t\t\treturn\n\t\t}\n\n\tcase \"element\":\n\t\t// DELETE <api URL>/node/<UUID>/<data name>/element/<coord>\n\t\tif action != \"delete\" {\n\t\t\tserver.BadRequest(w, r, \"Only DELETE action is available on 'element' endpoint.\")\n\t\t\treturn\n\t\t}\n\t\tif len(parts) < 5 {\n\t\t\tserver.BadRequest(w, r, \"Must include coordinate after DELETE on 'element' endpoint.\")\n\t\t\treturn\n\t\t}\n\t\tpt, err := dvid.StringToPoint3d(parts[4], \"_\")\n\t\tif err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tif err := d.DeleteElement(ctx, pt); err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\ttimedLog.Infof(\"HTTP %s: delete synaptic element at %s (%s)\", r.Method, pt, r.URL)\n\n\tcase \"move\":\n\t\t// POST <api URL>/node/<UUID>/<data name>/move/<from_coord>/<to_coord>\n\t\tif action != \"post\" {\n\t\t\tserver.BadRequest(w, r, \"Only POST action is available on 'move' endpoint.\")\n\t\t\treturn\n\t\t}\n\t\tif len(parts) < 6 {\n\t\t\tserver.BadRequest(w, r, \"Must include 'from' and 'to' coordinate after 'move' endpoint.\")\n\t\t\treturn\n\t\t}\n\t\tfromPt, err := dvid.StringToPoint3d(parts[4], \"_\")\n\t\tif err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\ttoPt, err := dvid.StringToPoint3d(parts[5], \"_\")\n\t\tif err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tif err := d.MoveElement(ctx, fromPt, toPt); err != nil {\n\t\t\tserver.BadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\ttimedLog.Infof(\"HTTP %s: move synaptic element from %s to %s (%s)\", r.Method, fromPt, toPt, r.URL)\n\n\tdefault:\n\t\tserver.BadAPIRequest(w, r, d)\n\t}\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\thandler.Submit(w, r)\n}", "func generateHandler(w http.ResponseWriter, r *http.Request) {\n\tpath := r.URL.Path[1:]\n\trequest := urlParser.ParseURL(path)\n\n\ttableName := request.TableName\n\tfields := request.Fields\n\n\t//only valid names are existing tables in db\n\tif !structs.ValidStruct[tableName] {\n\t\tfmt.Printf(\"\\\"%s\\\" table not found.\\n\", tableName)\n\n\t\thttp.NotFound(w, r)\n\t} else {\n\t\tfmt.Printf(\"\\\"%s\\\" table found.\\n\", tableName)\n\n\t\trows := sqlParser.GetRows(tableName, fields)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\tif fields == \"\" {\n\t\t\tfmt.Printf(\"No fields\\n\")\n\t\t\tstructs.MapTableToJson(tableName, rows, w)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s\\n\", fields)\n\t\t\tfieldArray := strings.Split(fields, \",\")\n\t\t\tstructFilter.MapCustomTableToJson(tableName, rows, w, fieldArray)\n\t\t}\n\t}\n}", "func Handler(c *gin.Context) {\n\tvar req Request\n\tc.BindJSON(&req)\n\ts := service{\n\t\tcontext: c,\n\t\treq: req,\n\t}\n\tres, err := s.Execute()\n\tif err != nil {\n\t\tc.JSON(400, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\"data\": res.Data})\n}", "func (p *stats) Handles(req *comm.Request) (res bool) {\n\treturn\n}", "func (h *Hub) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// only allow GET request\n\tif r.Method != \"GET\" {\n\t\thttp.Error(\n\t\t\tw,\n\t\t\thttp.StatusText(http.StatusMethodNotAllowed),\n\t\t\thttp.StatusMethodNotAllowed,\n\t\t)\n\t\treturn\n\t}\n\n\t// upgrade the connection\n\tws, err := h.wsConnFactory.Upgrade(w, r, nil)\n\tif err != nil {\n\t\th.log.Println(\"[ERROR] failed to upgrade connection:\", err)\n\t\treturn\n\t}\n\n\t// create the connection\n\tc := &connection{send: make(chan []byte, 256), ws: ws, hub: h}\n\t// registers the connection to the hub by prepping the hub for the connection\n\t// by setting it to nil\n\th.register <- c\n\n\tgo c.listenWrite()\n\tc.listenRead()\n}", "func (p *Plugin) ServeHTTP(c *plugin.Context, w http.ResponseWriter, r *http.Request) {\n\tuserID := r.Header.Get(\"Mattermost-User-Id\")\n\n\tif userID != \"\" {\n\t\tuser, err := p.API.GetUser(userID)\n\t\tif err != nil {\n\t\t\tmlog.Error(\"Error in authenticated user lookup\", mlog.Err(err))\n\t\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tif user.IsSystemAdmin() {\n\t\t\tr.Header.Set(\"Authorization\", fmt.Sprintf(\"bearer %s\", p.config.AdminToken))\n\t\t}\n\t}\n\tmlog.Info(fmt.Sprintf(\"Got HTTP request: %s: %s Headers: %+v\", r.Method, r.URL, r.Header))\n\n\t// TODO: Less naive URL filter\n\tif strings.HasSuffix(r.URL.String(), \"/_events\") {\n\t\tp.wsProxy(w, r)\n\t} else {\n\t\tp.httpProxy(w, r)\n\t}\n}", "func execmHandlerServeHTTP(_ int, p *gop.Context) {\n\targs := p.GetArgs(3)\n\targs[0].(*cgi.Handler).ServeHTTP(args[1].(http.ResponseWriter), args[2].(*http.Request))\n}", "func reportHandler(c echo.Context) error {\n\n\tgrq := new(GQLRequest)\n\tif err := c.Bind(grq); err != nil {\n\t\treturn err\n\t}\n\n\tquery := grq.Query\n\tvariables := grq.Variables\n\tgqlContext := map[string]interface{}{}\n\n\tresult, err := report_executor.Execute(gqlContext, query, variables, \"\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn c.JSON(http.StatusOK, result)\n\n}", "func NewHTTPHandler(bh *handler.BaseHTTPHandler, bu *usecase.BaseUsecase, br *repository.BaseRepository, s *infrastructure.SQL) *HTTPHandler {\n\t// user set.\n\tuserRepo := NewRepository(br, s.Master, s.Read)\n\tuserUsecase := NewUsecase(bu, s.Master, userRepo)\n\treturn &HTTPHandler{BaseHTTPHandler: *bh, usecase: userUsecase}\n}", "func (h *ChatHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tconn, err := h.Upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\th.Log.Error(err)\n\t\treturn\n\t}\n\tclient := NewClient(h.Log, conn, h.Broker)\n\th.Broker.Register(client)\n\tdefer func() {\n\t\th.Broker.Unregister(client)\n\t}()\n\tif len(r.URL.Query().Get(\"nogreet\")) == 0 {\n\t\tclient.outboundChan <- NewConnectionMessage(nil, client.ID, ConnectionMessagePayload{\n\t\t\tClientID: client.ID,\n\t\t})\n\t}\n\tclient.Run(h.Timeout)\n}", "func Handler(a adding.Service, l listing.Service, d deleting.Service) *gin.Engine {\n\trouter := gin.Default()\n\n\trouter.GET(\"/movies\", listMovies(l))\n\trouter.GET(\"/movies/:id\", getMovie(l))\n\trouter.POST(\"/movies\", addMovie(a))\n\trouter.DELETE(\"/movies/:id\", deleteMovie(d))\n\n\treturn router\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\n table := InitDB()\n if (table == nil) {\n fmt.Println(\"ERROR: SQL is NIL\")\n return\n }\n\n requests := libEvents(table, \"Guildhall Public\", \"request\")\n hosts := libEvents(table, \"Guildhall Public\", \"host\")\n\n strContents := readHtml(\"../index.html\", requests, hosts)\n fmt.Println(strContents)\n fmt.Fprint(w, strContents)\n}", "func QueryGraphQL(ctx context.Context, endpoint, queryName string, token, query string, variables map[string]interface{}, target interface{}) error {\n\tbody, err := json.Marshal(map[string]interface{}{\n\t\t\"query\": query,\n\t\t\"variables\": variables,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif queryName != \"\" {\n\t\tqueryName = \"?\" + queryName\n\t}\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"%s/.api/graphql%s\", endpoint, queryName), bytes.NewReader(body))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"token %s\", token))\n\n\t// Note: We do not use req.Context(ctx) here as it causes the frontend\n\t// to output long error logs, which is very noisy under high concurrency.\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn errors.Errorf(\"unexpected status code: %d\", resp.StatusCode)\n\t}\n\n\tcontents, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar errorPayload ErrorPayload\n\tif err := json.Unmarshal(contents, &errorPayload); err == nil && len(errorPayload.Errors) > 0 {\n\t\tvar combined error\n\t\tfor _, err := range errorPayload.Errors {\n\t\t\tcombined = multierror.Append(combined, errors.Errorf(\"%s\", err.Message))\n\t\t}\n\n\t\treturn combined\n\t}\n\n\treturn json.Unmarshal(contents, &target)\n}", "func Handler(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// Log body and pass to the DAO\n\tfmt.Printf(\"Received body: %v\\n\", req)\n\n\trequest := new(vm.GeneralRequest)\n\tresponse := request.Validate(req.Body)\n\tif response.Code != 0 {\n\t\treturn events.APIGatewayProxyResponse{Body: response.Marshal(), StatusCode: 500}, nil\n\t}\n\n\trequest.Date = time.Now().Unix()\n\n\tvar mainTable = \"main\"\n\tif value, ok := os.LookupEnv(\"dynamodb_table_main\"); ok {\n\t\tmainTable = value\n\t}\n\n\t// insert data into the DB\n\tdal.Insert(mainTable, request)\n\n\t// Log and return result\n\tfmt.Println(\"Wrote item: \", request)\n\treturn events.APIGatewayProxyResponse{Body: response.Marshal(), StatusCode: 200}, nil\n}", "func httpHandler(c conn.Conn, proto string) {\n\tdefer c.Close()\n\tdefer func() {\n\t\t// recover from failures\n\t\tif r := recover(); r != nil {\n\t\t\tc.Warn(\"httpHandler failed with error %v\", r)\n\t\t}\n\t}()\n\n\t// Make sure we detect dead connections while we decide how to multiplex\n\tc.SetDeadline(time.Now().Add(connReadTimeout))\n\n\t// multiplex by extracting the Host header, the vhost library\n\tvhostConn, err := vhost.HTTP(c)\n\tif err != nil {\n\t\tc.Warn(\"Failed to read valid %s request: %v\", proto, err)\n\t\tc.Write([]byte(BadRequest))\n\t\treturn\n\t}\n\n\t// read out the Host header and auth from the request\n\thost := strings.ToLower(vhostConn.Host())\n\tauth := vhostConn.Request.Header.Get(\"Authorization\")\n\n\t// done reading mux data, free up the request memory\n\tvhostConn.Free()\n\n\t// We need to read from the vhost conn now since it mucked around reading the stream\n\tc = conn.Wrap(vhostConn, \"pub\")\n\n\t// multiplex to find the right backend host\n\tc.Debug(\"Found hostname %s in request\", host)\n\ttunnel := tunnelRegistry.Get(fmt.Sprintf(\"%s://%s\", proto, host))\n\tif tunnel == nil {\n\t\tc.Info(\"No tunnel found for hostname %s\", host)\n\t\tc.Write([]byte(fmt.Sprintf(NotFound, len(host)+18, host)))\n\t\treturn\n\t}\n\n\t// If the client specified http auth and it doesn't match this request's auth\n\t// then fail the request with 401 Not Authorized and request the client reissue the\n\t// request with basic authdeny the request\n\tif tunnel.req.HttpAuth != \"\" && auth != tunnel.req.HttpAuth {\n\t\tc.Info(\"Authentication failed: %s\", auth)\n\t\tc.Write([]byte(NotAuthorized))\n\t\treturn\n\t}\n\n\t// dead connections will now be handled by tunnel heartbeating and the client\n\tc.SetDeadline(time.Time{})\n\n\t// let the tunnel handle the connection now\n\ttunnel.HandlePublicConnection(c)\n}", "func (p *Plugin) ServeHTTP(c *plugin.Context, w http.ResponseWriter, r *http.Request) {\n\tswitch r.URL.Path {\n\tcase \"/status\":\n\t\tp.handleStatus(w, r)\n\tcase \"/hello\":\n\t\tp.handleHello(c,w, r)\n\tdefault:\n\t\thttp.NotFound(w, r)\n\t}\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\tvar (\n\t\tc = appengine.NewContext(r)\n\t\tdb = data.NewRequest(c)\n\t\tvineAPI = vine.NewRequest(urlfetch.Client(c))\n\t)\n\tif user.Current(c) == nil {\n\t\turl, _ := user.LoginURL(c, \"/admin/dashboard\")\n\t\thttp.Redirect(w, r, url, 301)\n\t\treturn\n\t} else if !user.IsAdmin(c) {\n\t\tw.WriteHeader(401)\n\t\treturn\n\t}\n\n\tif r.Method == \"GET\" {\n\t\tenterprise, reports, _ := getAppUsers(c)\n\t\tvar (\n\t\t\tp = page.New(\"admin.html\")\n\t\t\tdata = map[string]interface{}{\n\t\t\t\t\"title\": \"Admin Dashboard\",\n\t\t\t\t\"config\": config.Load(c),\n\t\t\t\t\"enterpriseUsers\": enterprise,\n\t\t\t\t\"reportUsers\": reports,\n\t\t\t}\n\t\t)\n\t\tp.LoadData(data)\n\t\tp.Write(w)\n\t} else if r.Method == \"POST\" {\n\t\tswitch r.FormValue(\"op\") {\n\t\tcase \"TaskUsers\":\n\t\t\tt := NewTask(c)\n\t\t\tt.LoadCtx(r)\n\t\t\tt.BatchTaskUsers(strings.Split(r.FormValue(\"v\"), \"\\n\")...)\n\t\tcase \"TaskImport\":\n\t\t\tlog.Infof(c, \"Tasking %s for import\", r.FormValue(\"v\"))\n\t\t\tvar tasks []*taskqueue.Task\n\t\t\tfor k, v := range strings.Split(r.FormValue(\"v\"), \"\\n\") {\n\t\t\t\tt := taskqueue.NewPOSTTask(\"/cron/import\", map[string][]string{\n\t\t\t\t\t\"file\": {strings.TrimSpace(v)},\n\t\t\t\t})\n\t\t\t\tt.Name = fmt.Sprintf(\"%s-%s\", strings.Split(v, \".\")[0], utils.GenSlug())\n\t\t\t\tt.Delay = time.Minute * time.Duration(k) * 4\n\t\t\t\tt.RetryOptions = &taskqueue.RetryOptions{RetryLimit: 0, AgeLimit: t.Delay + (2 * time.Second)}\n\t\t\t\ttasks = append(tasks, t)\n\t\t\t}\n\t\t\tif _, err := taskqueue.AddMulti(c, tasks, \"\"); err != nil {\n\t\t\t\tlog.Errorf(c, \"error adding import tasks: %v\", err)\n\t\t\t}\n\t\tcase \"UnqueueUser\":\n\t\t\tlog.Infof(c, \"unqueuing %v\", r.FormValue(\"v\"))\n\t\t\tdb.UnqueueUser(r.FormValue(\"v\"))\n\t\tcase \"BatchUsers\":\n\t\t\tusers := strings.Split(r.FormValue(\"v\"), \",\")\n\t\t\tlog.Infof(c, \"queueing users: %v\", users)\n\t\t\tfor _, v := range users {\n\t\t\t\tdb.QueueUser(strings.TrimSpace(v))\n\t\t\t}\n\t\tcase \"FeaturedUser\":\n\t\t\tif user, err := vineAPI.GetUser(r.FormValue(\"user\")); err == nil {\n\t\t\t\tkey := datastore.NewKey(c, \"_featuredUser_\", \"featuredUser\", 0, nil)\n\t\t\t\tfeaturedUser := &featuredUser{UserID: user.UserIdStr, PostID: r.FormValue(\"vine\")}\n\t\t\t\tif _, err := nds.Put(c, key, featuredUser); err != nil {\n\t\t\t\t\tlog.Errorf(c, \"error setting featured user: %v\", err)\n\t\t\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"DumpKind\":\n\t\t\tt := NewTask(c)\n\t\t\tt.LoadCtx(r)\n\t\t\tt.DumpData(r.FormValue(\"v\"), w)\n\t\t\treturn\n\t\tcase \"PurgeData\":\n\t\t\tt := taskqueue.NewPOSTTask(\"/cron/purge\", map[string][]string{\n\t\t\t\t\"v\": {strings.TrimSpace(r.FormValue(\"v\"))},\n\t\t\t})\n\t\t\tt.Delay = 45 * time.Second\n\t\t\tt.Name = \"purge-\" + r.FormValue(\"v\") + \"-\" + utils.GenSlug()\n\t\t\tif _, err := taskqueue.Add(c, t, \"\"); err != nil {\n\t\t\t\tlog.Errorf(c, \"error adding purge task: %v\", err)\n\t\t\t}\n\t\tcase \"LoadData\":\n\t\t\tfile, _, err := r.FormFile(\"file\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(c, \"error loading file: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt := NewTask(c)\n\t\t\tt.LoadCtx(r)\n\t\t\tif err := t.LoadData(r.FormValue(\"v\"), file); err != nil {\n\t\t\t\tlog.Errorf(c, \"Error loading data: %v\", err)\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(w, \"{\\\"op\\\":\\\"%v\\\",\\\"success\\\":true}\", r.FormValue(\"op\"))\n\t}\n}", "func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tsignature := r.Header.Get(\"X-Hub-Signature\")\n\tif signature == \"\" {\n\t\twh.l.Println(\"signature is missing\")\n\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\twh.l.Println(\"fail to read body\")\n\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif !wh.isValidSignature(signature, body) {\n\t\twh.l.Println(\"invalid signature:\", signature)\n\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tevent := r.Header.Get(\"X-Github-Event\")\n\tswitch event {\n\tcase \"ping\":\n\t\t// return with code 200\n\t\treturn\n\tcase \"push\":\n\t\tpush := &Push{}\n\t\terr = json.Unmarshal(body, push)\n\t\tif err != nil {\n\t\t\twh.l.Println(\"fail to unmarshal json:\", err)\n\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t}\n\t\terr = wh.pushCallback(push)\n\t\tif err != nil {\n\t\t\twh.l.Println(\"fail to execute push callback:\", err)\n\t\t\thttp.Error(w, \"internal server error\", http.StatusInternalServerError)\n\t\t}\n\tcase \"release\":\n\t\trelease := &Release{}\n\t\terr = json.Unmarshal(body, release)\n\t\tif err != nil {\n\t\t\twh.l.Println(\"fail to unmarshal json:\", err)\n\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t}\n\t\terr := wh.releaseCallback(release)\n\t\tif err != nil {\n\t\t\twh.l.Println(\"fail to execute push callback:\", err)\n\t\t\thttp.Error(w, \"internal server error\", http.StatusInternalServerError)\n\t\t}\n\tdefault:\n\t\t// return 400 if we do not handle the event type.\n\t\t// This is to visually show the user a configuration error in the GH ui.\n\t\twh.l.Println(\"unexpected event:\", event)\n\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n}", "func (h *App) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tvar head string\n\n\tswitch req.URL.Path {\n\tcase \"/\", \"/orgs\", \"/users\", \"/devices\", \"/sign-in\", \"/groups\", \"/msg\":\n\t\th.IndexHandler.ServeHTTP(res, req)\n\n\tdefault:\n\t\thead, req.URL.Path = ShiftPath(req.URL.Path)\n\t\tswitch head {\n\t\tcase \"public\":\n\t\t\th.PublicHandler.ServeHTTP(res, req)\n\t\tcase \"v1\":\n\t\t\th.V1ApiHandler.ServeHTTP(res, req)\n\t\tdefault:\n\t\t\thttp.Error(res, \"Not Found\", http.StatusNotFound)\n\t\t}\n\t}\n}", "func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\ttmpl, err := h.tmpl.Render()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsession, err := h.store.Get(r, h.config.SessionName)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tid, ok := vars[\"id\"]\n\tif !ok {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdata := h.tmpl.Data()\n\tif flashes := session.Flashes(); len(flashes) > 0 {\n\t\tdata.SetFlashes(flashes)\n\t}\n\n\tintid, _ := strconv.Atoi(id)\n\n\tposts, err := h.db.GetPostsByUser(intid)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdata.Posts = posts\n\n\tuserCookie, ok := session.Values[h.config.UserCookieKey].(db.PublicUserData)\n\tif !ok {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdata.SetUserData(userCookie)\n\n\trequestedUser, err := h.db.GetUserByID(intid)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdata.SetTargetUser(requestedUser)\n\tdata.Title = requestedUser.Username\n\n\terr = session.Save(r, w)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err = tmpl.ExecuteTemplate(w, blog.TemplatesBase, data); err != nil {\n\t\tlog.Fatal(\"Could not execute register templates.\")\n\t}\n}", "func (r *Router) executeHandler(w http.ResponseWriter, req *http.Request) {\n\n\tvar url string\n\t// dont jump away from function if not necessary\n\tif r.prefix != \"\" {\n\t\turl = strings.TrimPrefix(req.URL.Path, r.prefix)\n\t} else {\n\t\turl = req.URL.Path\n\t}\n\n\tmethod := methodToInt(req.Method)\n\tvar parameters *ParameterList\n\n\tif method == -1 {\n\t\tr.notAllowedMethod(w, req, nil)\n\t\tlog.Println(\"Method not allowed: \" + req.Method)\n\t\treturn\n\t}\n\n\tcurrentNode := r.pathTrees[method]\n\t// check if there is an handler for the request method\n\tif currentNode == nil {\n\t\tr.notAllowedMethod(w, req, nil)\n\t\tlog.Println(\"Method not allowed, no handler set for: \" + req.Method)\n\t\treturn\n\t}\n\n\t// return index page\n\tif len(url) == 0 || url == \"/\" {\n\t\tstaticNode := currentNode.staticRoutes[1]\n\n\t\tif staticNode != nil {\n\t\t\tex := staticNode.get(\"/\")\n\t\t\tif ex != nil {\n\t\t\t\tex.handler(w, req, nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else if pn := currentNode.parameterHandler; pn != nil { // paramter node is set\n\t\t\tpn.handler(w, req, nil)\n\t\t\treturn\n\t\t}\n\t\tr.notFound(w, req, nil)\n\t\treturn\n\t}\n\n\tlastSlash := 0\n\t// start from one to skip the first /\n\tsize := len(url)\n\tvar sch string\n\n\tfor i := 1; i < size; i++ {\n\t\t// do something only when a / is found (or end of url is reached)\n\t\tif url[i] == '/' || i == size-1 {\n\t\t\t// grab the paramter from the url with a slice\n\t\t\tif i != size-1 {\n\t\t\t\tsch = url[lastSlash:i]\n\t\t\t} else {\n\t\t\t\tsch = url[lastSlash:]\n\t\t\t}\n\n\t\t\t// first search static nodes\n\t\t\tvar staticNode *pathNode\n\t\t\tif currentNode.staticRoutes != nil {\n\t\t\t\tsz := len(sch)\n\t\t\t\tif sz < len(currentNode.staticRoutes) {\n\t\t\t\t\tstaticNode = currentNode.staticRoutes[sz].get(sch)\n\t\t\t\t} else {\n\t\t\t\t\tstaticNode = nil\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstaticNode = nil\n\t\t\t}\n\n\t\t\tif staticNode != nil {\n\t\t\t\tcurrentNode = staticNode\n\t\t\t} else if currentNode.parameterHandler != nil { // then check if the value could be a paramter\n\t\t\t\tcurrentNode = currentNode.parameterHandler\n\t\t\t\tif parameters == nil {\n\t\t\t\t\tparameters = r.paramPool.Get()\n\t\t\t\t}\n\t\t\t\tif currentNode.name == \"*\" { // * parameter require that everything is matched\n\t\t\t\t\tparameters.Set(currentNode.name, url[lastSlash:])\n\t\t\t\t\tbreak // they also must stop the cycle and jump to handlers execution\n\t\t\t\t} else {\n\t\t\t\t\tparameters.Set(currentNode.name, sch[1:])\n\t\t\t\t}\n\t\t\t} else { // in nothing is found then we can print a not found message\n\t\t\t\tr.paramPool.Push(parameters)\n\t\t\t\tr.notFound(w, req, nil) // not found any possible match\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlastSlash = i // update last slash pos after operations\n\t\t}\n\t}\n\n\t// when we are here we are in the last node of the url so we can execute the action\n\tif currentNode.handler != nil {\n\t\tcurrentNode.handler(w, req, parameters)\n\t} else {\n\t\tr.notFound(w, req, nil)\n\t}\n\tr.paramPool.Push(parameters)\n}", "func (a AdminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdata := LayoutData(w, r)\n\tdata, err := serveAdminPage(a.Database, data)\n\tif err != \"\" {\n\t\tdata = data.MergeKV(\"error\", err)\n\t}\n\n\tmustRender(w, r, \"admin\", data)\n}", "func (p *Plugin) ServeHTTP(c *plugin.Context, w http.ResponseWriter, r *http.Request) {\n\tswitch r.URL.Path {\n\tcase \"/create\":\n\t\tp.handleCreate(w, r)\n\tdefault:\n\t\thttp.NotFound(w, r)\n\t}\n}", "func (r *Runner) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tr.rpc.ServeHTTP(w, req)\n}", "func Handler(gf func() restful.Injector, ls logSet) http.Handler {\n\thandler := mux(gf, ls)\n\taddMetrics(handler, ls)\n\treturn handler\n}", "func (cmh *CreateMessageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvar newMsg types.Message\n\n\tif err := render.Bind(r, &newMsg); err != nil {\n\t\tif err.Error() == \"EOF\" {\n\t\t\t// message body was blank\n\t\t\terr = errors.New(\"missing message parameters\")\n\t\t}\n\t\thandlers.RespondBadRequest(w, err)\n\t\treturn\n\t}\n\n\texecutedMsg, err := cmh.Callback(&newMsg)\n\thandlers.Respond(w, executedMsg, err)\n}", "func (eh *executorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// Preprocess\n\tif eh.preprocessHost != \"\" {\n\t\tif err := eh.preprocess(r); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Predict\n\tb, err := eh.predict(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Postprocess\n\tif eh.postprocessHost != \"\" {\n\t\tb, err = eh.postprocess(r, b)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Write final response\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (h *handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tif !h.tracer.Recording() || h.requestIgnorer(req) {\n\t\th.handler.ServeHTTP(w, req)\n\t\treturn\n\t}\n\ttx, body, req := StartTransactionWithBody(h.tracer, h.requestName(req), req)\n\tdefer tx.End()\n\n\tw, resp := WrapResponseWriter(w)\n\n\tdefer func() {\n\t\tif v := recover(); v != nil {\n\t\t\tif h.panicPropagation {\n\t\t\t\tdefer panic(v)\n\t\t\t\t// 500 status code will be set only for APM transaction\n\t\t\t\t// to allow other middleware to choose a different response code\n\t\t\t\tif resp.StatusCode == 0 {\n\t\t\t\t\tresp.StatusCode = http.StatusInternalServerError\n\t\t\t\t}\n\t\t\t} else if resp.StatusCode == 0 {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t}\n\t\t\th.recovery(w, req, resp, body, tx, v)\n\t\t}\n\t\tSetTransactionContext(tx, req, resp, body)\n\t\tbody.Discard()\n\t}()\n\th.handler.ServeHTTP(w, req)\n\tif resp.StatusCode == 0 {\n\t\tresp.StatusCode = http.StatusOK\n\t}\n}", "func anyHTTPHandler(ctx *fasthttp.RequestCtx) {\n\treqHelper := countPool.Get().(RequestPack)\n\n\treqHelper.ModRequest.Headers = parseHeaders(ctx)\n\terr := recover()\n\tif err != nil {\n\t\tctx.SetStatusCode(400)\n\t\tctx.SetBodyString(\"Invalid request\")\n\t\tcountPool.Put(reqHelper)\n\t\treturn\n\t}\n\n\treqHelper.ModRequest.Method = string(ctx.Method())\n\treqHelper.ModRequest.Remote = ctx.RemoteAddr().String()\n\treqHelper.ModRequest.Path = string(ctx.Path())\n\treqHelper.ModRequest.Version = \"HTTP/1.1\"\n\treqHelper.ModRequest.Body = string(ctx.PostBody())\n\treqHelper.ModRequest.Query = ctx.QueryArgs().String()\n\n\texists := shardManager.SubmitToShard(reqHelper.ShardId, &reqHelper.ModRequest, reqHelper.RecvChannel)\n\n\tif !exists {\n\t\tctx.SetStatusCode(503)\n\t\t_, _ = fmt.Fprintf(\n\t\t\tctx, \"Internal Server error: Shard with Id: %v does not exist.\", &reqHelper.ShardId)\n\t\treturn\n\t}\n\n\tresponse := <-reqHelper.RecvChannel\n\n\tcountPool.Put(reqHelper)\n\n\tctx.SetStatusCode(response.Status)\n\tctx.SetBodyString(response.Body)\n\n\tvar head []string\n\tfor _, head = range response.Headers {\n\t\tctx.Response.Header.Set(head[0], head[1])\n\t}\n}", "func queryHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tif r.Header.Get(\"Content-Type\") != \"application/json\" {\r\n\t\tw.WriteHeader(http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\t//To allocate slice for request body\r\n\tlength, err := strconv.Atoi(r.Header.Get(\"Content-Length\"))\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\t//Read body data to parse json\r\n\tbody := make([]byte, length)\r\n\tlength, err = r.Body.Read(body)\r\n\tif err != nil && err != io.EOF {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\t//parse json\r\n\tvar jsonBody map[string]interface{}\r\n\terr = json.Unmarshal(body[:length], &jsonBody)\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\tvar time_from,time_to time.Time\r\n\tif time_from, err = getTimeFromReq(jsonBody, \"from\"); err != nil{\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t\tw.WriteHeader(http.StatusBadRequest)\t\t\r\n\t\treturn \r\n\t}\r\n\r\n\tif time_to, err = getTimeFromReq(jsonBody, \"to\"); err != nil{\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t\tw.WriteHeader(http.StatusBadRequest)\t\t\r\n\t\treturn \r\n\t}\r\n\r\n\tvar targets []string\r\n\tif targets, err = getTargetFromReq(jsonBody); err != nil {\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t}\r\n\r\n\tjsonOut := getRedisVal(*redisHost,\r\n\t\ttargets,\r\n\t\tstrconv.FormatInt(time_from.Unix(), 10),\r\n\t\tstrconv.FormatInt(time_to.Unix(), 10),\r\n\t\tint(jsonBody[\"maxDataPoints\"].(float64)))\r\n\r\n\tw.Header().Set(\"Content-Type\", \"application/json\")\r\n\tfmt.Fprintf(w, jsonOut)\r\n\treturn\r\n}" ]
[ "0.73312646", "0.72953224", "0.71947956", "0.7187551", "0.7050148", "0.70441633", "0.6992783", "0.6937332", "0.6732775", "0.6642417", "0.6604141", "0.64036214", "0.6345075", "0.63396084", "0.6274692", "0.6235615", "0.61169827", "0.6094849", "0.60695696", "0.6055218", "0.59856784", "0.5869056", "0.5866543", "0.5856031", "0.57917553", "0.5779997", "0.57791454", "0.5775843", "0.5760648", "0.5756712", "0.57521933", "0.5733476", "0.57309705", "0.57308567", "0.57226866", "0.57190317", "0.57083", "0.5686549", "0.56728387", "0.5648286", "0.56388146", "0.5635525", "0.56117547", "0.5553432", "0.55485946", "0.55441505", "0.5524319", "0.5499048", "0.5495776", "0.54943", "0.5489103", "0.5480738", "0.54681706", "0.5462634", "0.5456205", "0.54552525", "0.5446442", "0.5430333", "0.54098344", "0.54032254", "0.5398722", "0.53971165", "0.5375121", "0.53675365", "0.5344114", "0.5335649", "0.53256893", "0.5315543", "0.5312703", "0.5298856", "0.52961475", "0.5286084", "0.5278037", "0.5272735", "0.52695984", "0.52678716", "0.5257112", "0.5247626", "0.5245156", "0.52450585", "0.52440643", "0.5240692", "0.52366936", "0.5236211", "0.5229309", "0.5222401", "0.52155304", "0.5204562", "0.51915115", "0.51830935", "0.5180662", "0.5176202", "0.5172079", "0.5170899", "0.5170252", "0.51639557", "0.5162242", "0.51611966", "0.5159538", "0.5154815" ]
0.6761708
8
1 infuture, 0 inprogress 1 ended
func JudgeContestStatus(cst *models.Contest, t time.Time) int { if t.Before(cst.StartTime) { return -1 } if t.After(cst.EndTime) { return 1 } return 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (j *Job) done() { j.isDone <- true }", "func (m *Manager) InProgress(result int64, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tm.LastProcess = int64(result)\n}", "func (t *ProgressTracker) proceed() {\n\tatomic.AddUint64(&t.progress, 1)\n}", "func (s *status) ended() error { return s.set(\"ended\") }", "func (px *Paxos) Done(seq int) {\n // Your code here.\n px.mu.Lock()\n defer px.mu.Unlock()\n\n px.decisions[px.me] = seq\n}", "func (b *Bar) InProgress() bool {\n\treturn !isClosed(b.done)\n}", "func (s *testSignaler) isDone() bool {\n\tselect {\n\tcase status := <-s.status:\n\t\ts.nonBlockingStatus <- status\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (px *Paxos) Done(seq int) {\n\t// Your code here.\n\tpx.localDoneMin = seq\n}", "func (cs cmdStatus) isDone() bool {\n\treturn cs&(1<<12 /*busy*/) == 0\n}", "func (async *async) isCompleted() bool {\n\tstate := atomic.LoadUint32(&async.state)\n\treturn state == completed\n}", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\n\treturn m.end\n}", "func (px *Paxos) Done(seq int) {\n // Your code here.\n px.mu.Lock()\n defer px.mu.Unlock()\n px.doneMap[px.me] = max(px.doneMap[px.me], seq)\n px.cleanDoneValues()\n return\n}", "func (p *Pool) done() {\n\tatomic.AddInt32(&p.counter, -1)\n\n\tif p.counter <= 0 {\n\t\t// unlock wait\n\t\tp.wait <- struct{}{}\n\t}\n}", "func (w *WaitTask) startInner(taskContext *TaskContext) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\tklog.V(3).Infof(\"wait task progress: %d/%d\", 0, len(w.Ids))\n\n\tpending := object.ObjMetadataSet{}\n\tfor _, id := range w.Ids {\n\t\tswitch {\n\t\tcase w.skipped(taskContext, id):\n\t\t\terr := taskContext.InventoryManager().SetSkippedReconcile(id)\n\t\t\tif err != nil {\n\t\t\t\t// Object never applied or deleted!\n\t\t\t\tklog.Errorf(\"Failed to mark object as skipped reconcile: %v\", err)\n\t\t\t}\n\t\t\tw.sendEvent(taskContext, id, event.ReconcileSkipped)\n\t\tcase w.changedUID(taskContext, id):\n\t\t\t// replaced\n\t\t\tw.handleChangedUID(taskContext, id)\n\t\tcase w.reconciledByID(taskContext, id):\n\t\t\terr := taskContext.InventoryManager().SetSuccessfulReconcile(id)\n\t\t\tif err != nil {\n\t\t\t\t// Object never applied or deleted!\n\t\t\t\tklog.Errorf(\"Failed to mark object as successful reconcile: %v\", err)\n\t\t\t}\n\t\t\tw.sendEvent(taskContext, id, event.ReconcileSuccessful)\n\t\tdefault:\n\t\t\terr := taskContext.InventoryManager().SetPendingReconcile(id)\n\t\t\tif err != nil {\n\t\t\t\t// Object never applied or deleted!\n\t\t\t\tklog.Errorf(\"Failed to mark object as pending reconcile: %v\", err)\n\t\t\t}\n\t\t\tpending = append(pending, id)\n\t\t\tw.sendEvent(taskContext, id, event.ReconcilePending)\n\t\t}\n\t}\n\tw.pending = pending\n\n\tklog.V(3).Infof(\"wait task progress: %d/%d\", len(w.Ids)-len(w.pending), len(w.Ids))\n\n\tif len(pending) == 0 {\n\t\t// all reconciled - clear pending and exit\n\t\tklog.V(3).Infof(\"all objects reconciled or skipped (name: %q)\", w.TaskName)\n\t\tw.cancelFunc()\n\t}\n}", "func (t *Task) MarkComplete() {\n t.Complete = true\n}", "func (o Outcome) IsFinished() bool { return o.Reason != notCompleted }", "func (TransferStatus) Started() TransferStatus { return TransferStatus(1) }", "func (j *Job) Wait() { <-j.isDone }", "func (b *Body) InfectionProgress() {\n\tfor _, oNum := range b.infectionPattern[b.infectionSet] {\n\t\tif b.graph[oNum].DiseaseLevel() != 0 && b.graph[oNum].DiseaseLevel() != 1 {\n\t\t\treturn\n\t\t}\n\t}\n\tb.infectionSet++\n\tif b.infectionSet >= len(b.infectionPattern) {\n\t\tb.infectionSet--\n\n\t\t<-time.After(2 * time.Second)\n\t\tthisBody.complete = true\n\t\treturn\n\t}\n\tfor _, oNum := range b.infectionPattern[b.infectionSet] {\n\t\tb.Infect(oNum)\n\t}\n}", "func (m *Master) Done() bool {\n\tret := false\n\t// Your code here.\n\tm.mutex.Lock()\n\tret = m.isFinished_\n\tm.mutex.Unlock()\n\treturn ret\n}", "func (async *async) isPending() bool {\n\tstate := atomic.LoadUint32(&async.state)\n\treturn state == pending\n}", "func (r *WaitingCounter) Done() {\n\tif nv := (*atomic.Int32)(r).Add(-1); nv < 0 {\n\t\tpanic(\"negative counter value\")\n\t}\n}", "func (TransferStatus) NotStarted() TransferStatus { return TransferStatus(0) }", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\treturn m.ReduceFinish\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tm.Mutex.Lock()\n\tdefer m.Mutex.Unlock()\n\tif m.Phase == Reduce && len(m.Undone) == 0 && len(m.Doing) == 0 {\n\t\tret = true\n//\t\tlog.Printf(\"all tasks finished\")\n\t}\n\n\treturn ret\n}", "func (pid pidcontroller) isDone() bool {\n\treturn pid.atTarget\n}", "func (m *Master) Done() bool {\n\t// ret := (JobDown == m.phase)\n\n\t// Your code here.\n\treturn JobDown == m.phase\n}", "func (gq *Dispatch) next() {\n for true {\n // Attempt to start processing the file.\n gq.pLock.Lock()\n if gq.processing >= gq.MaxGo {\n gq.waitingToRun = true\n gq.nextWait.Add(1)\n gq.pLock.Unlock()\n gq.nextWait.Wait()\n continue\n }\n // Keep the books and reset wait time before unlocking.\n gq.processing++\n gq.pLock.Unlock()\n\n // Get an element from the queue.\n gq.qLock.Lock()\n var wrapper = gq.queue.Dequeue().(queues.RegisteredTask)\n gq.qLock.Unlock()\n\n // Begin processing and asyncronously return.\n //var task = taskelm.Value.(dispatchTaskWrapper)\n var task = wrapper.Func()\n go task(wrapper.Id())\n return\n }\n}", "func (m *Master) haveDone() bool {\n\tret := true\n\tt := time.Now().Unix()\n\tj := 0\n\tfor j < len(m.reduceTasks) {\n\t\tif m.reduceTasks[j].state == 1 {\n\t\t\tif t-m.reduceTasks[j].emittime >= TIMEOUT {\n\t\t\t\tm.reduceTasks[j].state = 0\n\t\t\t}\n\t\t}\n\t\tj++\n\t}\n\ti := 0\n\tfor _, reduceTask := range m.reduceTasks {\n\t\tif reduceTask.state == 0 {\n\t\t\tm.nextreducetask = i\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\tfor _, reduceTask := range m.reduceTasks {\n\t\tif reduceTask.state != 2 {\n\t\t\tret = false\n\t\t\tbreak\n\t\t}\n\t}\n\tif ret {\n\t\tm.done = true\n\t}\n\treturn ret\n}", "func (px *Paxos) Done(seq int) {\n\t// Your code here.\n\tif seq > px.doneSeq {\n\t\tpx.doneSeq = seq\n\t}\n}", "func WaitForNotify() {\n\tcount++\n\t<-done\n}", "func (this *DeployLock) finish() {\n\tthis.mutex.Lock()\n\tthis.numFinished++\n\tthis.mutex.Unlock()\n}", "func f2() {\n\tc := make(chan int, 4)\n\tgo func(c chan int) {\n\t\tvar i = -1\n\t\tfor {\n\t\t\ti += 1\n\t\t\tctx, _ := context.WithTimeout(context.Background(), time.Duration(5*time.Second))\n\n\t\t\tfmt.Printf(\"\\ncurrent num is : %d\\n\", i)\n\t\t\tselect {\n\t\t\tcase c <- i:\n\t\t\t\tfmt.Printf(\"sent success : %d\\n\", i)\n\t\t\tcase <-ctx.Done():\n\t\t\t\tfmt.Printf(\"sent failed : %d, since timeout\\n\", i)\n\t\t\t}\n\t\t}\n\t}(c)\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif m.remainReduceCount == 0 {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (t *task) done(r interface{}, err error) {\n\tfor i := 0; i < t.Ctr; i++ {\n\t\tt.C <- &TaskState{\n\t\t\tR: r,\n\t\t\tE: err,\n\t\t}\n\t}\n}", "func (q *Q) Complete() {\n\tif q.Status != PROGRESS {\n\t\treturn\n\t}\n\tq.Status = SUCCESS\n\tq.History = append(q.History, \"Quest Achieved\")\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\n\treturn m.done\n}", "func (f *fakeProgressbar) Wait() {}", "func (px *Paxos) Done(seq int) {\n\t// Your code here.\n\tif px.dones[px.me] < seq {\n\t\tpx.dones[px.me] = seq\n\t}\n}", "func completedLoop(instance *v2alpha2.Experiment) (int, bool) {\n\tif instance.Status.GetCompletedIterations()%instance.Spec.GetIterationsPerLoop() == 0 {\n\t\treturn int(instance.Status.GetCompletedIterations() / instance.Spec.GetIterationsPerLoop()), true\n\t}\n\treturn -1, false\n}", "func TestInProgress(t *testing.T) {\n\ttestCase := func(c chan<- fs.Event) {\n\t\tevents.Default.Log(events.ItemStarted, map[string]string{\n\t\t\t\"item\": \"inprogress\",\n\t\t})\n\t\tsleepMs(100)\n\t\tc <- fs.Event{Name: \"inprogress\", Type: fs.NonRemove}\n\t\tsleepMs(1000)\n\t\tevents.Default.Log(events.ItemFinished, map[string]interface{}{\n\t\t\t\"item\": \"inprogress\",\n\t\t})\n\t\tsleepMs(100)\n\t\tc <- fs.Event{Name: \"notinprogress\", Type: fs.NonRemove}\n\t\tsleepMs(800)\n\t}\n\n\texpectedBatches := []expectedBatch{\n\t\t{[][]string{{\"notinprogress\"}}, 2000, 3500},\n\t}\n\n\ttestScenario(t, \"InProgress\", testCase, expectedBatches)\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\n\tif (m.taskPhase == ExitPhase) {\n\t\tret = true\n\t}\n\t// Your code here.\n\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tsuccNum := atomic.LoadInt64(&m.JobManager.SuccNum)\n\tif succNum == int64(m.Rnum) {\n\t\tclose(m.JobManager.Jobs) // 安全关闭\n\t\tclose(m.JobManager.RShuffleChan)\n\t\treturn true\n\t}\n\treturn false\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\treturn m.done\n}", "func (px *Paxos) Done(seq int) {\n\t// Your code here.\n\tpx.peerDones[px.me] = seq\n}", "func (m *Master) SignalTaskStatus(args *model.TaskStatus, reply *bool) error {\n\tif !args.Success {\n\t\treturn nil\n\t}\n\n\tif m.phase == model.Map {\n\t\tlog.Infof(\"map phase for %s completed\", args.File)\n\t\tm.mutex.Lock()\n\t\tdefer m.mutex.Unlock()\n\t\tf := path.Base(args.File)\n\t\tif t, ok := m.mapTasks[f]; ok {\n\t\t\tif t.Status == inprogress {\n\t\t\t\tt.Status = completed\n\t\t\t\tt.Files = append(t.Files, args.OutFiles...)\n\t\t\t\tm.mapTasks[f] = t\n\t\t\t}\n\n\t\t\t// Build up reduce tasks.\n\t\t\tfor i, v := range args.OutFiles {\n\t\t\t\tkey := toString(i + 1)\n\t\t\t\tt := m.reduceTasks[key]\n\t\t\t\tt.Files = append(t.Files, v)\n\t\t\t\tm.reduceTasks[key] = t\n\t\t\t}\n\t\t}\n\t} else if m.phase == model.Reduce {\n\t\tlog.Infof(\"reduce phase %s completed\", args.File)\n\t\ti, _ := strconv.ParseInt(args.File, 10, 32)\n\t\tkey := toString(i + 1)\n\t\tm.mutex.Lock()\n\t\tdefer m.mutex.Unlock()\n\t\tif t, ok := m.reduceTasks[key]; ok {\n\t\t\tif t.Status == inprogress {\n\t\t\t\tt.Status = completed\n\t\t\t\tm.reduceTasks[key] = t\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif len(m.completedTasks[0])==m.M && len(m.completedTasks[1])==m.R {\n\t\tret = true\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tif m.State == Success {\n\t\treturn true\n\t}\n\n\t// Your code here.\n\tif m.State == Map {\n\t\treturn false\n\t}\n\tfor _, v := range m.ReduceTask {\n\t\tif v.Status != Finish {\n\t\t\treturn false\n\t\t}\n\t}\n\tm.State = Success\n\treturn true\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tnum_active_client := len(m.clients)\n\tfmt.Println(\"Active clients: \" + strconv.Itoa(num_active_client))\n\n\tdone := true\n\tfor job, job_status := range m.jobs {\n\t\tfmt.Println(job + \": \" + job_status)\n\t\tif job_status != \"done\" {\n\t\t\tdone = false\n\t\t}\n\t}\n\treturn done\n}", "func (s *State) finished() bool {\n\tfor _, state := range s.states {\n\t\tif state != mesos.TASK_FINISHED {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (inv *Invocation) Done() bool {\n\tinv.mutex.Lock()\n\tdefer inv.mutex.Unlock()\n\n\treturn inv.done()\n}", "func (px *Paxos) Done(done int) {\n\t// Your code here.\n\tpx.Lock()\n\tdefer px.Unlock()\n\n\tif px.done >= done {\n\t\treturn\n\t}\n\n\tvar i int\n\tfor i = px.done; i <= done; i++ {\n\t\tif i < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsi := px.seqInstances[i]\n\t\tif si == nil || si.Status() != Decided {\n\t\t\tbreak\n\t\t}\n\t}\n\tpx.done = i - 1\n}", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\treturn m.state == TearDown\n}", "func (r errorReporter) futureWork(n ast.Node, msg string, args ...interface{}) {\n\tr.prefixed(\"future\", n, msg, args...)\n}", "func (t *task) reportNonFinalStageChange() {\n\tclose(t.stageChangeWakeup)\n\tt.stageChangeWakeup = make(chan struct{})\n}", "func (px *Paxos) Done(seq int) {\n px.mu.Lock()\n defer px.mu.Unlock()\n\n px.done[px.me] = seq\n px.freeMemory()\n}", "func (px *Paxos) status(Seq int) (Fate, interface{}) {\n\t// Your code here.\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\tVal, ok := px.Stati[Seq]\n\n\tif !ok {\n\t\treturn Pending, nil\n\t}\n\n\tif Val == Decided {\n\t\t// return Decided, px.result[Seq]\n\t\treturn Decided, px.Val[Seq]\n\t}\n\treturn Val, nil\n}", "func (t *task) complete(bq *InMemoryBuildQueue, executeResponse *remoteexecution.ExecuteResponse, completedByWorker bool) {\n\tswitch t.getStage() {\n\tcase remoteexecution.ExecutionStage_QUEUED:\n\t\t// The task isn't executing. Create a temporary worker\n\t\t// on which we start the task, so that we can go through\n\t\t// the regular completion code below.\n\t\tvar w worker\n\t\tw.assignQueuedTask(bq, t)\n\tcase remoteexecution.ExecutionStage_EXECUTING:\n\t\t// Task is executing on a worker. Make sure to preserve\n\t\t// worker.lastInvocations.\n\t\tif completedByWorker {\n\t\t\tw := t.currentWorker\n\t\t\tif len(w.lastInvocations) != 0 {\n\t\t\t\tpanic(\"Executing worker cannot have invocations associated with it\")\n\t\t\t}\n\t\t\tif len(t.operations) == 0 {\n\t\t\t\tpanic(\"Task no longer has an invocation associated with it\")\n\t\t\t}\n\t\t\tfor i := range t.operations {\n\t\t\t\tw.lastInvocations = append(w.lastInvocations, i)\n\t\t\t\ti.idleWorkersCount++\n\t\t\t}\n\t\t}\n\tcase remoteexecution.ExecutionStage_COMPLETED:\n\t\t// Task is already completed. Nothing to do.\n\t\treturn\n\t}\n\n\tcurrentSCQ := t.getSizeClassQueue()\n\tfor i := range t.operations {\n\t\ti.decrementExecutingWorkersCount(bq)\n\t}\n\tt.currentWorker.currentTask = nil\n\tt.currentWorker = nil\n\tresult, grpcCode := getResultAndGRPCCodeFromExecuteResponse(executeResponse)\n\tt.registerExecutingStageFinished(bq, result, grpcCode)\n\n\t// Communicate the results to the initial size class learner,\n\t// which may request that the task is re-executed.\n\tpq := currentSCQ.platformQueue\n\tvar timeout time.Duration\n\tif code, actionResult := status.FromProto(executeResponse.Status).Code(), executeResponse.Result; code == codes.OK && actionResult.GetExitCode() == 0 {\n\t\t// The task succeeded, but we're still getting\n\t\t// instructed to run the task again for training\n\t\t// purposes. If that happens, create a new task that\n\t\t// runs in the background. The user does not need to be\n\t\t// blocked on this.\n\t\texecutionMetadata := actionResult.GetExecutionMetadata()\n\t\tbackgroundSizeClassIndex, backgroundTimeout, backgroundInitialSizeClassLearner := t.initialSizeClassLearner.Succeeded(\n\t\t\texecutionMetadata.GetExecutionCompletedTimestamp().AsTime().Sub(\n\t\t\t\texecutionMetadata.GetExecutionStartTimestamp().AsTime()),\n\t\t\tpq.sizeClasses)\n\t\tt.initialSizeClassLearner = nil\n\t\tif backgroundInitialSizeClassLearner != nil {\n\t\t\tif pq.maximumQueuedBackgroundLearningOperations == 0 {\n\t\t\t\t// No background learning permitted.\n\t\t\t\tbackgroundInitialSizeClassLearner.Abandoned()\n\t\t\t} else {\n\t\t\t\tbackgroundSCQ := pq.sizeClassQueues[backgroundSizeClassIndex]\n\t\t\t\tbackgroundInvocation := backgroundSCQ.getOrCreateInvocation(backgroundLearningInvocationKey)\n\t\t\t\tif backgroundInvocation.queuedOperations.Len() >= pq.maximumQueuedBackgroundLearningOperations {\n\t\t\t\t\t// Already running too many background tasks.\n\t\t\t\t\tbackgroundInitialSizeClassLearner.Abandoned()\n\t\t\t\t} else {\n\t\t\t\t\tbackgroundAction := *t.desiredState.Action\n\t\t\t\t\tbackgroundAction.DoNotCache = true\n\t\t\t\t\tbackgroundAction.Timeout = durationpb.New(backgroundTimeout)\n\t\t\t\t\tbackgroundTask := &task{\n\t\t\t\t\t\toperations: map[*invocation]*operation{},\n\t\t\t\t\t\tactionDigest: t.actionDigest,\n\t\t\t\t\t\tdesiredState: t.desiredState,\n\t\t\t\t\t\ttargetID: t.targetID,\n\t\t\t\t\t\tinitialSizeClassLearner: backgroundInitialSizeClassLearner,\n\t\t\t\t\t\tstageChangeWakeup: make(chan struct{}),\n\t\t\t\t\t}\n\t\t\t\t\tbackgroundTask.desiredState.Action = &backgroundAction\n\t\t\t\t\tbackgroundTask.newOperation(bq, pq.backgroundLearningOperationPriority, backgroundInvocation, true)\n\t\t\t\t\tbackgroundTask.schedule(bq)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if completedByWorker {\n\t\t// The worker communicated that the task failed. Attempt\n\t\t// to run it on another size class.\n\t\ttimeout, t.initialSizeClassLearner = t.initialSizeClassLearner.Failed(code == codes.DeadlineExceeded)\n\t} else {\n\t\t// The task was completed, but this was not done by the\n\t\t// worker. Treat is as a regular failure.\n\t\tt.initialSizeClassLearner.Abandoned()\n\t\tt.initialSizeClassLearner = nil\n\t}\n\n\tif t.initialSizeClassLearner != nil {\n\t\t// Re-execution against the largest size class is\n\t\t// requested, using the original timeout value.\n\t\t// Transplant all operations to the other size class\n\t\t// queue and reschedule.\n\t\tt.desiredState.Action.Timeout = durationpb.New(timeout)\n\t\tt.registerCompletedStageFinished(bq)\n\t\tlargestSCQ := pq.sizeClassQueues[len(pq.sizeClassQueues)-1]\n\t\toperations := t.operations\n\t\tt.operations = make(map[*invocation]*operation, len(operations))\n\t\tfor oldI, o := range operations {\n\t\t\ti := largestSCQ.getOrCreateInvocation(oldI.invocationKey)\n\t\t\tt.operations[i] = o\n\t\t\to.invocation = i\n\t\t}\n\t\tt.schedule(bq)\n\t\tt.reportNonFinalStageChange()\n\t} else {\n\t\t// The task succeeded or it failed on the largest size\n\t\t// class. Let's just complete it.\n\t\t//\n\t\t// Scrub data from the task that are no longer needed\n\t\t// after completion. This reduces memory usage\n\t\t// significantly. Keep the Action digest, so that\n\t\t// there's still a way to figure out what the task was.\n\t\tdelete(bq.inFlightDeduplicationMap, t.actionDigest)\n\t\tt.executeResponse = executeResponse\n\t\tt.desiredState.Action = nil\n\t\tclose(t.stageChangeWakeup)\n\t\tt.stageChangeWakeup = nil\n\n\t\t// Background learning tasks may continue to exist, even\n\t\t// if no clients wait for the results. Now that this\n\t\t// task is completed, it must go through the regular\n\t\t// cleanup process.\n\t\tfor _, o := range t.operations {\n\t\t\tif o.mayExistWithoutWaiters {\n\t\t\t\to.mayExistWithoutWaiters = false\n\t\t\t\to.maybeStartCleanup(bq)\n\t\t\t}\n\t\t}\n\t}\n}", "func (px *Paxos) Done(seq int) {\n\t//DPrintf(\"Done(%d)\\n\", seq)\n\tpx.done[px.me] = seq\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\t// Your code here.\n\tif seq < px.Min() {\n\t\treturn Forgotten, nil\n\t}\n\n\tnode, ok := px.prepareStatus.Find(seq)\n\tif ok && node.State.Done {\n\t\treturn Decided, node.State.VA\n\t}\n\treturn Pending, nil\n}", "func (w *Watcher) isDone() bool {\n\tvar done bool\n\tselect {\n\tcase done = <-w.done:\n\t\tw.finish()\n\tdefault:\n\t}\n\treturn done\n}", "func StatusWorkOutRespCount() int { return statusWorkOutRespCount }", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif m.reduceFinished == m.reduceTasks {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func WaitStatusContinued() WaitStatus {\n\treturn WaitStatus(0xffff)\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\treturn ret\n}", "func (t *Transport) finish(stream *Stream, out []byte) (n int) {\n\tatomic.AddUint64(&t.nTxfin, 1)\n\tvar scratch [16]byte\n\tn = tag2cbor(tagCborPrefix, out) // prefix\n\tout[n] = 0xc8 // 0xc8 (end stream, 0b110_01000 <tag,8>)\n\tn++ //\n\tm := tag2cbor(stream.opaque, scratch[:]) // tag-opaque\n\tscratch[m] = 0x40 // zero-len byte-string\n\tm++\n\tn += valbytes2cbor(scratch[:m], out[n:]) // packet\n\tout[n] = 0xff // 0xff CBOR indefinite end.\n\tn++\n\treturn n\n}", "func (r *OperationReqReconciler) isCompleted(request *userv1.Operationrequest) bool {\n\treturn request.Status.Phase == userv1.RequestCompleted\n}", "func (e *Opener) Success(now time.Time, duration time.Duration) {\n\te.legitimateAttemptsCount.Inc(now)\n}", "func (t *task) registerExecutingStageFinished(bq *InMemoryBuildQueue, result, grpcCode string) {\n\tscq := t.getSizeClassQueue()\n\tscq.tasksExecutingDurationSeconds.WithLabelValues(result, grpcCode).Observe(bq.now.Sub(t.currentStageStartTime).Seconds())\n\tscq.tasksExecutingRetries.WithLabelValues(result, grpcCode).Observe(float64(t.retryCount))\n\tt.currentStageStartTime = bq.now\n}", "func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {\n\tselect {\n\t// An error from either of the goroutines means we're done so signal\n\t// caller with the error and signal all goroutines to quit.\n\tcase err := <-bi.errChan:\n\t\tresultsChan <- &importResults{\n\t\t\tblocksProcessed: bi.blocksProcessed,\n\t\t\tblocksImported: bi.blocksImported,\n\t\t\terr: err,\n\t\t}\n\t\tclose(bi.quit)\n\n\t// The import finished normally.\n\tcase <-bi.doneChan:\n\t\tresultsChan <- &importResults{\n\t\t\tblocksProcessed: bi.blocksProcessed,\n\t\t\tblocksImported: bi.blocksImported,\n\t\t\terr: nil,\n\t\t}\n\t}\n}", "func (c *Client) doWaitForStatus(eniID string, checkNum, checkInterval int, finalStatus string) error {\n\tfor i := 0; i < checkNum; i++ {\n\t\ttime.Sleep(time.Second * time.Duration(checkInterval))\n\t\tenis, err := c.queryENI(eniID, \"\", \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, eni := range enis {\n\t\t\tif *eni.NetworkInterfaceId == eniID {\n\t\t\t\tswitch *eni.State {\n\t\t\t\tcase ENI_STATUS_AVAILABLE:\n\t\t\t\t\tswitch finalStatus {\n\t\t\t\t\tcase ENI_STATUS_ATTACHED:\n\t\t\t\t\t\tif eni.Attachment != nil && eni.Attachment.InstanceId != nil {\n\t\t\t\t\t\t\tblog.Infof(\"eni %s is attached\", eniID)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblog.Infof(\"eni %s is not attached\", eniID)\n\t\t\t\t\tcase ENI_STATUS_DETACHED:\n\t\t\t\t\t\tif eni.Attachment == nil {\n\t\t\t\t\t\t\tblog.Infof(\"eni %s is detached\", eniID)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblog.Infof(\"eni %s is not detached\", eniID)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tblog.Infof(\"eni %s is %s now\", eniID, *eni.State)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\tcase ENI_STATUS_PENDING, ENI_STATUS_ATTACHING, ENI_STATUS_DETACHING, ENI_STATUS_DELETING:\n\t\t\t\t\tblog.Infof(\"eni %s is %s\", eniID, *eni.State)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tblog.Errorf(\"timeout when wait for eni %s\", eniID)\n\treturn fmt.Errorf(\"timeout when wait for eni %s\", eniID)\n}", "func (d *Data)IsBusy() bool {\n return d.inProgressCount > 0\n}", "func (s *stream) isDone() bool {\n\tif s.limit == 0 {\n\t\treturn s.done\n\t}\n\n\treturn s.done || s.sent >= s.limit\n}", "func start_jobs(done chan<- doneStatus, jobs <-chan job) {\n num_processed := 0\n for job := range jobs {\n success := job.run()\n if success {\n num_processed++\n }\n }\n done <- doneStatus{num_processed}\n}", "func start_jobs(done chan<- doneStatus, jobs <-chan job) {\n num_processed := 0\n for job := range jobs {\n success := job.run()\n if success {\n num_processed++\n }\n }\n done <- doneStatus{num_processed}\n}", "func msmtFinished(currSrvBytes *uint) bool {\n\tif *currSrvBytes >= *msmtTotalBytes {\n\t\t// debug fmt.Println(\"\\nServer received everything from client: Msmt finished\")\n\t\treturn true\n\t}\n\n\treturn false\n}", "func worker(finished chan bool) {\n\tfmt.Println(\"routine: Started\")\n\ttime.Sleep(time.Second)\n\tfmt.Println(\"routine: Finished\")\n\tfinished <- true\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif len(m.DoneReduceTask) == m.NReduce {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (p *promise) Pass() {\n\trt := int64((time.Since(initTime) - p.sinceTime) / time.Millisecond)\n\tp.bbr.rtStat.Add(rt)\n\tatomic.AddInt64(&p.bbr.inFlight, -1)\n\n\tp.bbr.passStat.Add(1)\n\tfmt.Printf(\"------%#v, \\n\", p.bbr.Stat())\n}", "func (b *blocksProviderImpl) isDone() bool {\n\treturn atomic.LoadInt32(&b.done) == 1\n}", "func main() {\n\tres := canFinish(2, [][]int{[]int{0, 1}})\n\t// 998001\n\tfmt.Println(res)\n}", "func progress(a int64, b int64, finish bool) float64 {\n\tif b == 0 {\n\t\tif finish {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\t}\n\treturn float64(a) / float64(b)\n}", "func (s *Shim) Done() {\n\ts.add(-1)\n}", "func (b *bar) finish() {\n\tb.mutex.Lock()\n\tdefer b.mutex.Unlock()\n\n\tif b.finished {\n\t\treturn\n\t}\n\n\tclose(b.finishChan)\n\tb.finished = true\n\tb.clear()\n}", "func EndProgress() {\n\tendTime = time.Now().Round(time.Second)\n\tendChan <- true\n\tclose(endChan)\n}", "func (w *Worker) WaitForFinish() {\n\t<-w.done\n}", "func (sh *errorGenerator) progress() int32 {\n\treturn atomic.LoadInt32(&sh.totalCnt) * 100 / sh.totalLim\n}", "func (px *Paxos) Done(seq int) {\n\t// Your code here.\n\tpx.peerDone(seq, px.me)\n}", "func (*notifs) markFinished(nl nl.Listener, tsi *meta.Snode, srcErr error, aborted bool) (done bool) {\n\tnl.MarkFinished(tsi)\n\tif aborted {\n\t\tnl.SetAborted()\n\t\tif srcErr == nil {\n\t\t\tdetail := fmt.Sprintf(\"%s from %s\", nl, tsi.StringEx())\n\t\t\tsrcErr = cmn.NewErrAborted(nl.String(), detail, nil)\n\t\t}\n\t}\n\tif srcErr != nil {\n\t\tnl.AddErr(srcErr)\n\t}\n\treturn nl.ActiveCount() == 0 || aborted\n}", "func (s StackStatus) InProgress() bool {\n\treturn strings.HasSuffix(string(s), \"IN_PROGRESS\")\n}", "func (b *Consecutive) Succeed() {\n\tb.mutex.Lock()\n\tdefer b.mutex.Unlock()\n\n\tif b.state == open {\n\t\tb.reset()\n\t}\n}", "func done(proc PDAProcessor, is_accepted bool, transition_count int) {\r\n\tfmt.Println(\"pda = \", proc.Name, \"::total_clock = \", transition_count, \"::method = is_accepted = \", is_accepted, \"::Current State = \", proc.Current_State)\r\n\tfmt.Println(\"Current_state: \", proc.Current_State)\r\n\tfmt.Println(proc.Stack)\r\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\treturn m.jobs.empty(ReduceJob)\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\t// clear timeout task\n\tfor i, taskPatchTime := range m.todoMapTask {\n\t\tif taskPatchTime != 0 && taskPatchTime+10 < time.Now().Unix() {\n\t\t\tfmt.Printf(\"MapTask Timeout: TaskID:%d\\n\", i)\n\t\t\tm.todoMapTask[i] = 0\n\t\t}\n\t}\n\tfor i, taskPatchTime := range m.todoReduceTask {\n\t\tif taskPatchTime != 0 && taskPatchTime+10 < time.Now().Unix() {\n\t\t\tfmt.Printf(\"ReduceTask Timeout: TaskID:%d\\n\", i)\n\t\t\tm.todoReduceTask[i] = 0\n\t\t}\n\t}\n\n\tret = len(m.todoMapTask) == 0 && len(m.todoReduceTask) == 0\n\n\treturn ret\n}", "func (async *async) wait() {\n\t<-async.done\n}", "func (b *box) completed() bool {\n\treturn b.setValues == 9\n}", "func (s *JobStatus) Done() bool {\n\treturn s.State == Done\n}", "func (s *JobStatus) Done() bool {\n\treturn s.State == Done\n}", "func (p *ManagedClustersStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func isPrimeAsync(number int64, channel chan PrimeResult) {\n\n\tresult:= new (PrimeResult)\n\tresult.number= number\n\tresult.prime= isPrime(number)\n\tchannel <- *result\n}", "func (m *Master) mapfinished() bool {\n\tt := time.Now().Unix()\n\tret := true\n\tj := 0\n\tfor j < len(m.mapTasks) {\n\t\tif m.mapTasks[j].state == 1 {\n\t\t\tif t-m.mapTasks[j].emittime >= TIMEOUT {\n\t\t\t\tm.mapTasks[j].state = 0\n\t\t\t}\n\t\t}\n\t\tj++\n\t}\n\ti := 0\n\tfor i < len(m.mapTasks) {\n\t\tif m.mapTasks[i].state == 0 {\n\t\t\tm.nextmaptask = i\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\tfor _, mapTask := range m.mapTasks {\n\t\tif mapTask.state != 2 {\n\t\t\tret = false\n\t\t\tbreak\n\t\t}\n\t}\n\treturn ret\n}" ]
[ "0.62912303", "0.62500477", "0.6028249", "0.58972055", "0.5865885", "0.58186096", "0.5792455", "0.577795", "0.5751172", "0.5713418", "0.5673935", "0.56666213", "0.5660057", "0.5649432", "0.5637165", "0.5597765", "0.5597761", "0.5596717", "0.55698997", "0.55644786", "0.5552096", "0.55360466", "0.54548335", "0.5444106", "0.54356337", "0.54271054", "0.5425252", "0.5417822", "0.5414786", "0.54118437", "0.5409094", "0.540276", "0.5388826", "0.53754544", "0.5355556", "0.53438497", "0.5334829", "0.533096", "0.5329273", "0.53267246", "0.5325046", "0.5317675", "0.5302918", "0.5301447", "0.5296422", "0.529169", "0.5282429", "0.52803993", "0.52772075", "0.5273701", "0.52654684", "0.52602863", "0.52577496", "0.5241843", "0.5233235", "0.52322865", "0.5224569", "0.5223604", "0.52107495", "0.5203575", "0.52033216", "0.5202659", "0.5202033", "0.5201555", "0.5200762", "0.519937", "0.5191499", "0.5182302", "0.5180845", "0.5177373", "0.5171835", "0.51681733", "0.5167838", "0.5165922", "0.5165922", "0.51621866", "0.5159884", "0.51573706", "0.51567745", "0.5153336", "0.5153209", "0.5150407", "0.51500964", "0.51482874", "0.51464385", "0.5131984", "0.51306677", "0.5124027", "0.5119438", "0.5116498", "0.51106286", "0.5109688", "0.5108042", "0.5108022", "0.5107685", "0.5102728", "0.5101387", "0.5101387", "0.50995135", "0.5099227", "0.5097466" ]
0.0
-1
it's pointless to "wait" for a container creation it's quick, and it doesn't make sense to have different behaviours.
func (d *driver) LaunchInstance( vimInstance *catalogue.VIMInstance, hostname, image, flavour, keyPair string, networks []*catalogue.VNFDConnectionPoint, securityGroups []string, userData string) (*catalogue.Server, error) { tag := util.FuncName() d.WithFields(log.Fields{ "tag": tag, }).Debug("received request") return d.LaunchInstanceAndWaitWithIPs(vimInstance, hostname, image, flavour, keyPair, networks, securityGroups, userData, nil, nil) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func EnsureContainer(ctx context.Context, log *zap.SugaredLogger, cli *client.Client,\n\topts *EnsureContainerOpts) (container *types.ContainerJSON, created bool, err error) {\n\tlog = log.With(\"containerName\", opts.ContainerName)\n\n\tlog.Debug(\"checking state of container\")\n\n\t// Check if a ${containerName} container exists.\n\tcontainers, err := cli.ContainerList(ctx, types.ContainerListOptions{\n\t\tAll: true,\n\t\tFilters: filters.NewArgs(filters.Arg(\"name\", opts.ContainerName)),\n\t})\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif len(containers) > 0 {\n\t\tcontainer := containers[0]\n\n\t\tlog.Debugw(\"container found\", \"containerId\", container.ID, \"state\", container.State)\n\n\t\tswitch container.State {\n\t\tcase \"running\":\n\t\tdefault:\n\t\t\tlog.Infof(\"container isn't running; starting\")\n\t\t\terr := cli.ContainerStart(ctx, container.ID, types.ContainerStartOptions{})\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorw(\"starting container failed\", \"containerId\", container.ID)\n\t\t\t\treturn nil, false, err\n\t\t\t}\n\t\t}\n\n\t\tc, err := cli.ContainerInspect(ctx, container.ID)\n\t\tif err != nil {\n\t\t\tlog.Errorw(\"inspecting container failed\", \"containerId\", container.ID)\n\t\t\treturn nil, false, err\n\t\t}\n\n\t\treturn &c, false, nil\n\t}\n\n\tlog.Infow(\"container not found; creating\")\n\n\tif opts.PullImageIfMissing {\n\t\tout, err := cli.ImagePull(ctx, opts.ContainerConfig.Image, types.ImagePullOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\n\t\tif err := PipeOutput(out, os.Stdout); err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t} else {\n\t\timageListOpts := types.ImageListOptions{\n\t\t\tAll: true,\n\t\t}\n\t\timages, err := cli.ImageList(ctx, imageListOpts)\n\t\tif err != nil {\n\t\t\tlog.Errorw(\"retrieving list of images failed\")\n\t\t\treturn nil, false, err\n\t\t}\n\t\tfound := false\n\t\tfor _, summary := range images {\n\t\t\tif len(summary.RepoTags) > 0 && summary.RepoTags[0] == opts.ContainerConfig.Image {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tlog.Errorw(\"image not found\", \"image\", opts.ContainerConfig.Image)\n\t\t\terr := errors.New(\"image not found\")\n\t\t\treturn nil, false, err\n\t\t}\n\t}\n\n\tres, err := cli.ContainerCreate(ctx,\n\t\topts.ContainerConfig,\n\t\topts.HostConfig,\n\t\topts.NetworkingConfig,\n\t\topts.ContainerName,\n\t)\n\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tlog.Infow(\"starting new container\", \"id\", res.ID)\n\n\terr = cli.ContainerStart(ctx, res.ID, types.ContainerStartOptions{})\n\tif err == nil {\n\t\tlog.Infow(\"started container\", \"id\", res.ID)\n\t}\n\n\tc, err := cli.ContainerInspect(ctx, res.ID)\n\tif err == nil {\n\t\tlog.Infow(\"started container\", \"id\", res.ID)\n\t}\n\n\treturn &c, true, err\n}", "func (c *Client) waitContainer(ctx context.Context, id string) (types.ContainerWaitOKBody, error) {\n\twrapperCli, err := c.Get(ctx)\n\tif err != nil {\n\t\treturn types.ContainerWaitOKBody{}, fmt.Errorf(\"failed to get a containerd grpc client: %v\", err)\n\t}\n\n\tctx = leases.WithLease(ctx, wrapperCli.lease.ID)\n\n\twaitExit := func() *Message {\n\t\treturn c.ProbeContainer(ctx, id, -1*time.Second)\n\t}\n\n\t// wait for the task to exit.\n\tmsg := waitExit()\n\n\terrMsg := \"\"\n\terr = msg.RawError()\n\tif err != nil {\n\t\tif errtypes.IsTimeout(err) {\n\t\t\treturn types.ContainerWaitOKBody{}, err\n\t\t}\n\t\terrMsg = err.Error()\n\t}\n\n\treturn types.ContainerWaitOKBody{\n\t\tError: errMsg,\n\t\tStatusCode: int64(msg.ExitCode()),\n\t}, nil\n}", "func ensureBlobContainer(client *azureclients.AzureClientWrapper, resourceGroupName, storageAccountName, containerName string) error {\n\t// Check if blob container already exists\n\tneedToCreateBlobContainer := false\n\tvar rawResponse *http.Response\n\tctxWithResp := runtime.WithCaptureResponse(context.Background(), &rawResponse)\n\tgetBlobContainerResp, err := client.BlobContainerClient.Get(\n\t\tctxWithResp,\n\t\tresourceGroupName,\n\t\tstorageAccountName,\n\t\tcontainerName,\n\t\t&armstorage.BlobContainersClientGetOptions{})\n\tif err != nil {\n\t\tvar respErr *azcore.ResponseError\n\t\tif errors.As(err, &respErr) {\n\t\t\tswitch respErr.ErrorCode {\n\t\t\tcase \"ContainerNotFound\":\n\t\t\t\tneedToCreateBlobContainer = true\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"Unable to get blob container: %v\", respErr.ErrorCode)\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Found and validated existing blob container, return\n\tif !needToCreateBlobContainer {\n\t\tlog.Printf(\"Found existing blob container %s\", *getBlobContainerResp.BlobContainer.ID)\n\t\treturn nil\n\t}\n\n\tcreateBlobContainerResp, err := client.BlobContainerClient.Create(\n\t\tcontext.Background(),\n\t\tresourceGroupName,\n\t\tstorageAccountName,\n\t\tcontainerName,\n\t\tarmstorage.BlobContainer{\n\t\t\t// Note there is no Tags parameter within BlobContainer or ContainerProperties.\n\t\t\tContainerProperties: &armstorage.ContainerProperties{\n\t\t\t\tPublicAccess: to.Ptr(armstorage.PublicAccessContainer),\n\t\t\t},\n\t\t},\n\t\t&armstorage.BlobContainersClientCreateOptions{},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Created blob container %s\", *createBlobContainerResp.BlobContainer.ID)\n\treturn nil\n}", "func (n *mockAgent) createContainer(sandbox *Sandbox, c *Container) (*Process, error) {\n\treturn &Process{}, nil\n}", "func (h *DefaultHelper) ContainerWait(ctx context.Context, container string, timeout time.Duration) (int, error) {\n\tctx, cancel := context.WithTimeout(ctx, timeout)\n\tdefer cancel()\n\n\tret := make(chan waitReturn)\n\tgo func() {\n\t\ti, err := h.client.ContainerWait(ctx, container)\n\t\tret <- waitReturn{i, err}\n\t}()\n\n\tselect {\n\tcase r := <-ret:\n\t\treturn r.statusCode, r.err\n\tcase <-ctx.Done():\n\t\treturn -1, fmt.Errorf(\"Container %s didn't stop in the specified time : %v\", container, timeout)\n\t}\n}", "func (c Client) ContainerWait(ID string) error {\n\tctx := context.Background()\n\t_, err := c.cli.ContainerWait(ctx, ID)\n\treturn err\n}", "func CreateBusyboxContainerOk(c *check.C, cname string, cmd ...string) {\n\t// If not specified, CMD executed in container is \"top\".\n\tif len(cmd) == 0 {\n\t\tcmd = []string{\"top\"}\n\t}\n\n\tresp, err := CreateBusyboxContainer(c, cname, cmd...)\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 201)\n}", "func (c *gcsCore) WaitContainer(id string) (func() prot.NotificationType, error) {\n\tc.containerCacheMutex.Lock()\n\tentry := c.getContainer(id)\n\tif entry == nil {\n\t\tc.containerCacheMutex.Unlock()\n\t\treturn nil, gcserr.NewHresultError(gcserr.HrVmcomputeSystemNotFound)\n\t}\n\tc.containerCacheMutex.Unlock()\n\n\tf := func() prot.NotificationType {\n\t\tlogrus.Debugf(\"gcscore::WaitContainer waiting on init process waitgroup\")\n\t\tentry.initProcess.writersWg.Wait()\n\t\tlogrus.Debugf(\"gcscore::WaitContainer init process waitgroup count has dropped to zero\")\n\t\t// v1 only supported unexpected exit\n\t\treturn prot.NtUnexpectedExit\n\t}\n\n\treturn f, nil\n}", "func createContainers(ctx context.Context, cli *client.Client, containerNames []string) ([]string, error) {\n\tcntrIDs := make([]string, len(containerNames))\n\n\tfor idx, cname := range containerNames {\n\t\t// create the docker container\n\t\tcbody, err := cli.ContainerCreate(ctx, &cn.Config{Image: \"ubuntu\", Tty: true}, &cn.HostConfig{},\n\t\t\t&nw.NetworkingConfig{}, &specs.Platform{}, cname)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// start the docker container\n\t\terr = cli.ContainerStart(ctx, cbody.ID, types.ContainerStartOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcntrIDs[idx] = cbody.ID\n\t}\n\n\treturn cntrIDs, nil\n}", "func (d *dockerWaiter) wait(ctx context.Context, containerID string, stopFn func()) error {\n\tstatusCh, errCh := d.client.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)\n\n\tif stopFn != nil {\n\t\tstopFn()\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Second):\n\t\t\tif stopFn != nil {\n\t\t\t\tstopFn()\n\t\t\t}\n\n\t\tcase err := <-errCh:\n\t\t\treturn err\n\n\t\tcase status := <-statusCh:\n\t\t\tif status.StatusCode != 0 {\n\t\t\t\treturn &common.BuildError{\n\t\t\t\t\tInner: fmt.Errorf(\"exit code %d\", status.StatusCode),\n\t\t\t\t\tExitCode: int(status.StatusCode),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (n *mockAgent) startContainer(sandbox *Sandbox, c *Container) error {\n\treturn nil\n}", "func (s *server) createContainer(dir, lang string) {\n\t// need to write a new Dockerfile in dir with the an image and port\n\tdockerFile, err := os.Create(dir + \"/Dockerfile\")\n\tif err != nil {\n\t\ts.callBack(dir, lang, err)\n\t\treturn\n\t}\n\tcontents := []byte(fmt.Sprintf(\"FROM %s\\n EXPOSE %d\", imageFromLang(lang), 8080))\n\tif _, err := dockerFile.Write(contents); err != nil {\n\t\ts.callBack(dir, lang, err)\n\t\treturn\n\t}\n\n\tinputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil)\n\n\ttb := tarBuilder{tr: tar.NewWriter(inputbuf), rootDir: dir}\n\tif err := filepath.Walk(dir, tb.pathWalker); err != nil {\n\t\ts.callBack(dir, lang, err)\n\t\treturn\n\t}\n\n\timageName := generateRandomImageName()\n\ttb.tr.Flush()\n\ttb.tr.Close()\n\topts := docker.BuildImageOptions{\n\t\tName: imageName,\n\t\tInputStream: inputbuf,\n\t\tOutputStream: outputbuf,\n\t}\n\tif err := s.dockerClient.BuildImage(opts); err != nil {\n\t\ts.callBack(dir, lang, err)\n\t\treturn\n\t}\n\n\tconfig := &docker.Config{\n\t\tCPUShares: s.config.CPU,\n\t\tMemory: s.config.Memory,\n\t\tTty: true,\n\t\tOpenStdin: false,\n\t\tImage: imageName,\n\t\tNetworkDisabled: false,\n\t}\n\n\thostConfig := &docker.HostConfig{} // set our container privileges...\n\n\tcontainerOpts := docker.CreateContainerOptions{Name: \"\", Config: config, HostConfig: hostConfig}\n\tcontainer, err := s.dockerClient.CreateContainer(containerOpts)\n\n\tif s.config.BuildInterface != nil {\n\t\ts.config.BuildInterface.PostBuild(container, lang, err)\n\t}\n}", "func newContainer(name, secretName, image string, resourceRequirements corev1.ResourceRequirements, nodeID int, operation string) corev1.Container {\n\tenv := newPgEnvironment()\n\treturn corev1.Container{\n\t\tImage: image,\n\t\tName: name,\n\t\tCommand: []string{defaultCntCommand},\n\t\tPorts: []corev1.ContainerPort{{\n\t\t\tContainerPort: postgresqlPort,\n\t\t\tName: \"postgresql\",\n\t\t}},\n\t\tReadinessProbe: &corev1.Probe{\n\t\t\tTimeoutSeconds: 10,\n\t\t\tInitialDelaySeconds: 10,\n\t\t\tPeriodSeconds: 5,\n\t\t\tHandler: corev1.Handler{\n\t\t\t\tExec: &corev1.ExecAction{\n\t\t\t\t\tCommand: []string{defaultHealthCheckCommand},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tLivenessProbe: &corev1.Probe{\n\t\t\tTimeoutSeconds: 10,\n\t\t\tInitialDelaySeconds: 60,\n\t\t\tPeriodSeconds: 10,\n\t\t\tHandler: corev1.Handler{\n\t\t\t\tExec: &corev1.ExecAction{\n\t\t\t\t\tCommand: []string{defaultHealthCheckCommand},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tEnv: []corev1.EnvVar{\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"POSTGRESQL_USER\",\n\t\t\t\tValue: env.user,\n\t\t\t},\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"POSTGRESQL_PASSWORD\",\n\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: secretName},\n\t\t\t\t\t\tKey: \"database-password\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"POSTGRESQL_DATABASE\",\n\t\t\t\tValue: env.database,\n\t\t\t},\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"PGPASSFILE\",\n\t\t\t\tValue: pgpassFilePath,\n\t\t\t},\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"STARTUP_OPERATION\",\n\t\t\t\tValue: operation,\n\t\t\t},\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"ENABLE_REPMGR\",\n\t\t\t\tValue: \"true\",\n\t\t\t},\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"REPMGR_PASSWORD\",\n\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: secretName},\n\t\t\t\t\t\tKey: \"repmgr-password\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"NODE_NAME\",\n\t\t\t\tValue: name,\n\t\t\t},\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"NODE_ID\",\n\t\t\t\tValue: fmt.Sprintf(\"%v\", nodeID),\n\t\t\t},\n\t\t},\n\t\tResources: resourceRequirements,\n\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\tcorev1.VolumeMount{\n\t\t\t\tName: name,\n\t\t\t\tMountPath: pgDataPath,\n\t\t\t},\n\t\t},\n\t}\n}", "func CreateContainerHandler(buffer *circularbuffer.CircularBuffer[OutgoingMessage], msg IncomingMessage) {\n\t// Create the container\n\t// -------------------------\n\tid := time.Now().UnixNano()\n\tif buffer != nil {\n\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"Creating container\", Success: true})\n\t}\n\n\t// options will be whatever the user wants set like container limits, priority, etc. Its\n\t// called config in LXD land, but since we use config for our config I'm calling it options in here\n\tvar options map[string]string\n\terr := json.Unmarshal([]byte(msg.Data[\"options\"]), &options)\n\tif err != nil {\n\t\tif buffer != nil {\n\t\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"failed: \" + err.Error(), Success: false})\n\t\t}\n\t\treturn\n\t}\n\n\terr = lxd.CreateContainer(msg.Data[\"host\"], msg.Data[\"name\"], msg.Data[\"image\"], msg.Data[\"storagepool\"], options)\n\tif err != nil {\n\t\tif buffer != nil {\n\t\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"failed: \" + err.Error(), Success: false})\n\t\t}\n\t\treturn\n\t}\n\tif buffer != nil {\n\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"done\", Success: true})\n\t}\n\t// -------------------------\n\n\t// DNS Previously we would fail here and continue, but that has been shown to lead to multiple containers being assigned\n\t// the same IP, which turns out is a bad idea. So now we will fail, and let the user cleanup.\n\t// -------------------------\n\tif strings.ToLower(Conf.DNS.Provider) != \"dhcp\" {\n\t\tid := time.Now().UnixNano()\n\t\tif buffer != nil {\n\t\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"Creating DNS entry\", Success: true})\n\t\t}\n\n\t\td := dns.New(Conf)\n\t\tif d == nil {\n\t\t\tif buffer != nil {\n\t\t\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"failed to create DNS object for provider: \" + Conf.DNS.Provider, Success: false})\n\t\t\t}\n\t\t\treturn\n\t\t} else {\n\t\t\tip, err := d.GetARecord(msg.Data[\"name\"], Conf.DNS.NetworkBlocks)\n\t\t\tif err != nil {\n\t\t\t\tif buffer != nil {\n\t\t\t\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"failed: \" + err.Error(), Success: false})\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tif buffer != nil {\n\t\t\t\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: ip, Success: true})\n\t\t\t\t}\n\n\t\t\t\t// upload our network config\n\t\t\t\tsetupContainerNetwork(buffer, msg.Data[\"host\"], msg.Data[\"name\"], ip)\n\t\t\t}\n\t\t}\n\t}\n\t// -------------------------\n\n\t// Start the container\n\terr = StartContainerHandler(buffer, msg)\n\tif err != nil {\n\t\t// The other handler would have taken care of the message\n\t\treturn\n\t}\n\n\tid = time.Now().UnixNano()\n\tif buffer != nil {\n\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"Waiting for networking\", Success: true})\n\t}\n\n\t// We will try 10 times to see if the networking comes up by asking LXD for the container state\n\t// and checking to see if we found an ipv4 address\n\tnetworkUp := false\n\ti := 0\n\tfor !networkUp && i < 10 {\n\t\t// this isn't exactly as efficient as it could be but don't feel like making a new call just for this at the moment\n\t\tcontainerInfo, err := lxd.GetContainers(msg.Data[\"host\"], msg.Data[\"name\"], true)\n\t\tif err != nil {\n\t\t\tif buffer != nil {\n\t\t\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: err.Error(), Success: false})\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t// look through the container state for an address in the inet family, right not we aren't worried about comparing\n\t\t// this address to what we got from DNS if we are using that, maybe in the future if it becomes an issue\n\t\tfor iface, info := range containerInfo[0].State.Network {\n\t\t\tif iface != \"lo\" {\n\t\t\t\tfor _, addr := range info.Addresses {\n\t\t\t\t\tif addr.Family == \"inet\" && addr.Address != \"\" {\n\t\t\t\t\t\tnetworkUp = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ti++\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\tif !networkUp {\n\t\t// we will bail if we didn't get an address since if we plan on bootstrapping we won't get far\n\t\tif buffer != nil {\n\t\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"no ip detected\", Success: false})\n\t\t}\n\t\treturn\n\t}\n\n\tif buffer != nil {\n\t\tbuffer.Enqueue(OutgoingMessage{ID: id, Message: \"network is up\", Success: true})\n\t}\n\n\tBootstrapContainer(buffer, msg.Data[\"host\"], msg.Data[\"name\"])\n}", "func TestCContainer(t *testing.T) {\n\tctx := context.Background()\n\tc := NewCContainer(nil, nil)\n\t_ = c.WaitValueEmpty(ctx) // should be instant\n\tnvalCh := make(chan interface{}, 1)\n\tgo func() {\n\t\tnv, err := c.WaitValue(ctx)\n\t\tif err != nil {\n\t\t\tt.Fatal(err.Error())\n\t\t}\n\t\tnvalCh <- nv\n\t}()\n\tc.SetValue(5)\n\tgv := <-nvalCh\n\tif gv != 5 {\n\t\tt.Fail()\n\t}\n\tdl, _ := context.WithDeadline(ctx, time.Now().Add(time.Millisecond*1))\n\t<-dl.Done()\n\terr := c.WaitValueEmpty(dl)\n\tif err != context.DeadlineExceeded {\n\t\tt.Fail()\n\t}\n\tc.SetValue(nil)\n\t_ = c.WaitValueEmpty(ctx) // should be instant\n}", "func TestContainerStatus(t *testing.T) {\n\tds, fDocker, fClock := newTestDockerService()\n\tsConfig := makeSandboxConfig(\"foo\", \"bar\", \"1\", 0)\n\tlabels := map[string]string{\"abc.xyz\": \"foo\"}\n\tannotations := map[string]string{\"foo.bar.baz\": \"abc\"}\n\timageName := \"iamimage\"\n\tconfig := makeContainerConfig(sConfig, \"pause\", imageName, 0, labels, annotations)\n\n\tvar defaultTime time.Time\n\tdt := defaultTime.UnixNano()\n\tct, st, ft := dt, dt, dt\n\tstate := runtimeapi.ContainerState_CONTAINER_CREATED\n\timageRef := DockerImageIDPrefix + imageName\n\t// The following variables are not set in FakeDockerClient.\n\texitCode := int32(0)\n\tvar reason, message string\n\n\texpected := &runtimeapi.ContainerStatus{\n\t\tState: state,\n\t\tCreatedAt: ct,\n\t\tStartedAt: st,\n\t\tFinishedAt: ft,\n\t\tMetadata: config.Metadata,\n\t\tImage: config.Image,\n\t\tImageRef: imageRef,\n\t\tExitCode: exitCode,\n\t\tReason: reason,\n\t\tMessage: message,\n\t\tMounts: []*runtimeapi.Mount{},\n\t\tLabels: config.Labels,\n\t\tAnnotations: config.Annotations,\n\t}\n\n\tfDocker.InjectImages([]dockertypes.Image{{ID: imageName}})\n\n\t// Create the container.\n\tfClock.SetTime(time.Now().Add(-1 * time.Hour))\n\texpected.CreatedAt = fClock.Now().UnixNano()\n\tconst sandboxId = \"sandboxid\"\n\tid, err := ds.CreateContainer(sandboxId, config, sConfig)\n\tassert.NoError(t, err)\n\n\t// Check internal labels\n\tc, err := fDocker.InspectContainer(id)\n\tassert.NoError(t, err)\n\tassert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelContainer)\n\tassert.Equal(t, c.Config.Labels[sandboxIDLabelKey], sandboxId)\n\n\t// Set the id manually since we don't know the id until it's created.\n\texpected.Id = id\n\tassert.NoError(t, err)\n\tstatus, err := ds.ContainerStatus(id)\n\tassert.NoError(t, err)\n\tassert.Equal(t, expected, status)\n\n\t// Advance the clock and start the container.\n\tfClock.SetTime(time.Now())\n\texpected.StartedAt = fClock.Now().UnixNano()\n\texpected.State = runtimeapi.ContainerState_CONTAINER_RUNNING\n\n\terr = ds.StartContainer(id)\n\tassert.NoError(t, err)\n\tstatus, err = ds.ContainerStatus(id)\n\tassert.Equal(t, expected, status)\n\n\t// Advance the clock and stop the container.\n\tfClock.SetTime(time.Now().Add(1 * time.Hour))\n\texpected.FinishedAt = fClock.Now().UnixNano()\n\texpected.State = runtimeapi.ContainerState_CONTAINER_EXITED\n\texpected.Reason = \"Completed\"\n\n\terr = ds.StopContainer(id, 0)\n\tassert.NoError(t, err)\n\tstatus, err = ds.ContainerStatus(id)\n\tassert.Equal(t, expected, status)\n\n\t// Remove the container.\n\terr = ds.RemoveContainer(id)\n\tassert.NoError(t, err)\n\tstatus, err = ds.ContainerStatus(id)\n\tassert.Error(t, err, fmt.Sprintf(\"status of container: %+v\", status))\n}", "func (e *DockerEngine) wait(ctx context.Context, id string) (*compiler.State, error) {\n\t_, errc := e.client.ContainerWait(ctx, id)\n\tif errc != nil {\n\t\treturn nil, errc\n\t}\n\n\tinfo, err := e.client.ContainerInspect(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif info.State.Running {\n\t\t// TODO(bradrydewski) if the state is still running\n\t\t// we should call wait again.\n\t}\n\n\treturn &compiler.State{\n\t\tExited: true,\n\t\tExitCode: info.State.ExitCode,\n\t\tOOMKilled: info.State.OOMKilled,\n\t}, nil\n}", "func CreateContainerOK(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ContainerController, command []string, entrypoint []string, env []string, image string, name string, sslRedirect bool, volumes []string, workingDir *string) (http.ResponseWriter, *app.GoaContainerCreateResults) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tquery[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tquery[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tquery[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tquery[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tquery[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tquery[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tquery[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tquery[\"workingDir\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/v2/container/create\"),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tprms[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tprms[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tprms[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tprms[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tprms[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tprms[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tprms[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tprms[\"workingDir\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ContainerTest\"), rw, req, prms)\n\tcreateCtx, _err := app.NewCreateContainerContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", e)\n\t\treturn nil, nil\n\t}\n\n\t// Perform action\n\t_err = ctrl.Create(createCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 200 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 200\", rw.Code)\n\t}\n\tvar mt *app.GoaContainerCreateResults\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(*app.GoaContainerCreateResults)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of app.GoaContainerCreateResults\", resp, resp)\n\t\t}\n\t\t_err = mt.Validate()\n\t\tif _err != nil {\n\t\t\tt.Errorf(\"invalid response media type: %s\", _err)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func (c MockDockerClient) ContainerCreate(ctx context.Context, command []string, envvars []string, image string, binds []string, links []string, kernelCapabilities []string, pidMode string, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) {\n\tif c.ContainerCreateFn != nil {\n\t\tfmt.Println(\"[MockDockerClient] In \", utils.CurrentFunctionName())\n\t\tfmt.Println(\"[MockDockerClient] - ctx: \", ctx)\n\t\tfmt.Println(\"[MockDockerClient] - command: \", command)\n\t\tfmt.Println(\"[MockDockerClient] - envvars: \", envvars)\n\t\tfmt.Println(\"[MockDockerClient] - image: \", envvars)\n\t\tfmt.Println(\"[MockDockerClient] - binds: \", binds)\n\t\tfmt.Println(\"[MockDockerClient] - links: \", links)\n\t\tfmt.Println(\"[MockDockerClient] - kernelCapabilities: \", kernelCapabilities)\n\t\tfmt.Println(\"[MockDockerClient] - pidMode: \", pidMode)\n\t\tfmt.Println(\"[MockDockerClient] - containerName: \", containerName)\n\t\treturn c.ContainerCreateFn(ctx, command, envvars, image, binds, links, kernelCapabilities, pidMode, containerName)\n\t}\n\tpanic(fmt.Sprintf(\"No function defined for: %s\", utils.CurrentFunctionName()))\n}", "func (c *client) WaitContainer(ctx context.Context, ctn *pipeline.Container) error {\n\tlogrus.Tracef(\"waiting for container %s\", ctn.ID)\n\n\t// create label selector for watching the pod\n\tselector := fmt.Sprintf(\"pipeline=%s\", c.pod.ObjectMeta.Name)\n\n\t// create options for watching the container\n\topts := metav1.ListOptions{\n\t\tLabelSelector: selector,\n\t\tWatch: true,\n\t}\n\n\t// send API call to capture channel for watching the container\n\t//\n\t// https://pkg.go.dev/k8s.io/client-go/kubernetes/typed/core/v1?tab=doc#PodInterface\n\t// ->\n\t// https://pkg.go.dev/k8s.io/apimachinery/pkg/watch?tab=doc#Interface\n\twatch, err := c.kubernetes.CoreV1().Pods(c.namespace).Watch(opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\t// capture new result from the channel\n\t\t//\n\t\t// https://pkg.go.dev/k8s.io/apimachinery/pkg/watch?tab=doc#Interface\n\t\tresult := <-watch.ResultChan()\n\n\t\t// convert the object from the result to a pod\n\t\tpod, ok := result.Object.(*v1.Pod)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unable to watch pod %s\", c.pod.ObjectMeta.Name)\n\t\t}\n\n\t\t// check if the pod is in a pending state\n\t\t//\n\t\t// https://pkg.go.dev/k8s.io/api/core/v1?tab=doc#PodStatus\n\t\tif pod.Status.Phase == v1.PodPending {\n\t\t\t// skip pod if it's in a pending state\n\t\t\tcontinue\n\t\t}\n\n\t\t// iterate through each container in the pod\n\t\tfor _, cst := range pod.Status.ContainerStatuses {\n\t\t\t// check if the container has a matching ID\n\t\t\t//\n\t\t\t// https://pkg.go.dev/k8s.io/api/core/v1?tab=doc#ContainerStatus\n\t\t\tif !strings.EqualFold(cst.Name, ctn.ID) {\n\t\t\t\t// skip container if it's not a matching ID\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// check if the container is in a terminated state\n\t\t\t//\n\t\t\t// https://pkg.go.dev/k8s.io/api/core/v1?tab=doc#ContainerState\n\t\t\tif cst.State.Terminated == nil {\n\t\t\t\t// skip container if it's not in a terminated state\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// check if the container has a terminated state reason\n\t\t\t//\n\t\t\t// https://pkg.go.dev/k8s.io/api/core/v1?tab=doc#ContainerStateTerminated\n\t\t\tif len(cst.State.Terminated.Reason) > 0 {\n\t\t\t\t// break watching the container as it's complete\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}", "func startContainer(bundle, container, address string, config *specs.Spec) int {\n\tpid := os.Getpid()\n\tr := &types.CreateContainerRequest{\n\t\tId: container,\n\t\tBundlePath: bundle,\n\t\tStdin: fmt.Sprintf(\"/proc/%d/fd/0\", pid),\n\t\tStdout: fmt.Sprintf(\"/proc/%d/fd/1\", pid),\n\t\tStderr: fmt.Sprintf(\"/proc/%d/fd/2\", pid),\n\t}\n\n\tc := getClient(address)\n\ttimestamp := uint64(time.Now().Unix())\n\tif _, err := c.CreateContainer(netcontext.Background(), r); err != nil {\n\t\tfmt.Printf(\"error %v\\n\", err)\n\t\treturn -1\n\t}\n\tif config.Process.Terminal {\n\t\ts, err := term.SetRawTerminal(os.Stdin.Fd())\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error %v\\n\", err)\n\t\t\treturn -1\n\t\t}\n\t\tdefer term.RestoreTerminal(os.Stdin.Fd(), s)\n\t\tmonitorTtySize(c, container, \"init\")\n\t}\n\treturn waitForExit(c, timestamp, container, \"init\")\n\n}", "func (s *DockerContainerizerTestSuite) TestDockerContainerCreate() {\n\t// Expected request JSON:\n\t//{\n\t// \"Memory\": 512,\n\t// \"CpuShares\": 1024,\n\t// \"Env\": [\n\t// \"foo=bar\"\n\t// ],\n\t// \"Cmd\": null,\n\t// \"Entrypoint\": null,\n\t// \"HostConfig\": {\n\t// \"Binds\": [\n\t// \"/data:/usr/share/nginx/html:rw\"\n\t// ],\n\t// \"PortBindings\": {\n\t// \"80/tcp\": [\n\t// {\n\t// \"HostPort\": \"30000\"\n\t// }\n\t// ]\n\t// },\n\t// \"NetworkMode\": \"host\",\n\t// \"RestartPolicy\": {},\n\t// \"LogConfig\": {}\n\t// }\n\t//}\n\tvar result struct {\n\t\tCPUShares uint64 `json:\"CpuShares\"`\n\t\tEnv []string\n\t\tMemory uint64\n\t\tHostConfig struct {\n\t\t\tBinds []string\n\t\t\tNetworkMode string\n\t\t\tPortBindings map[string][]struct {\n\t\t\t\tHostPort string\n\t\t\t}\n\t\t}\n\t}\n\n\t// Nominal case + request JSON tests\n\t_, err := s.dc.ContainerCreate(s.info)\n\tassert.Nil(s.T(), err) // Should be nil (everything is OK)\n\tif err := json.Unmarshal(s.req.body, &result); err != nil {\n\t\ts.T().Fatal(err)\n\t}\n\n\tassert.Equal(s.T(), s.info.MemoryLimit, result.Memory) // Should be equal to the task memory limit\n\tassert.Equal(s.T(), s.info.CPUSharesLimit, result.CPUShares) // Should be equal to the task CPU shares limit\n\tassert.Equal(s.T(), \"host\", result.HostConfig.NetworkMode) // Should be the string representation of the task network mode\n\tassert.Equal(s.T(), []string{\"foo=bar\"}, result.Env) // Should be formated as a list of \"key=value\" strings\n\tassert.Equal(s.T(), []string{\"/data:/usr/share/nginx/html:rw\"}, result.HostConfig.Binds) // Should be formated as a list of \"hostPath:containerPath:mode\" strings\n\n\tportBindings, ok := result.HostConfig.PortBindings[\"80/tcp\"]\n\tassert.True(s.T(), ok) // Should be present and formated as \"port/protocol\"\n\tassert.Equal(s.T(), \"30000\", portBindings[0].HostPort) // Should be equal to task host port\n\n\t// Invalid network mode (should throw an error)\n\tvar invalidNetwork mesos.ContainerInfo_DockerInfo_Network = 666\n\ts.info.TaskInfo.Container.Docker.Network = &invalidNetwork\n\t_, err = s.dc.ContainerCreate(s.info)\n\tassert.NotNil(s.T(), err)\n}", "func waitForGlusterContainer() error {\n\n\t//Check if docker gluster container is up and running\n\tfor {\n\t\tglusterServerContainerVal, err := helpers.GetSystemDockerNode(\"gluster-server\")\n\t\tif err != nil {\n\t\t\trwolog.Error(\"Error in checking docker gluster container for status \", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tif len(glusterServerContainerVal) > 0 {\n\t\t\tbreak\n\t\t} else {\n\t\t\trwolog.Debug(\"Sleeping for 10 seconds to get gluster docker container up\")\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\t}\n\treturn nil\n}", "func GenerateContainers(vmi *v1.VirtualMachineInstance, podVolumeName string, podVolumeMountDir string) []kubev1.Container {\n\tvar containers []kubev1.Container\n\n\tinitialDelaySeconds := 2\n\ttimeoutSeconds := 5\n\tperiodSeconds := 5\n\tsuccessThreshold := 2\n\tfailureThreshold := 5\n\n\t// Make VirtualMachineInstance Image Wrapper Containers\n\tfor _, volume := range vmi.Spec.Volumes {\n\t\tif volume.ContainerDisk != nil {\n\n\t\t\tvolumeMountDir := generateVolumeMountDir(vmi, volume.Name)\n\t\t\tdiskContainerName := fmt.Sprintf(\"volume%s\", volume.Name)\n\t\t\tdiskContainerImage := volume.ContainerDisk.Image\n\t\t\tresources := kubev1.ResourceRequirements{}\n\t\t\tif vmi.IsCPUDedicated() {\n\t\t\t\tresources.Limits = make(kubev1.ResourceList)\n\t\t\t\t// TODO(vladikr): adjust the correct cpu/mem values - this is mainly needed to allow QemuImg to run correctly\n\t\t\t\tresources.Limits[kubev1.ResourceCPU] = resource.MustParse(\"200m\")\n\t\t\t\t// k8s minimum memory reservation is linuxMinMemory = 4194304\n\t\t\t\tresources.Limits[kubev1.ResourceMemory] = resource.MustParse(\"64M\")\n\t\t\t}\n\t\t\tcontainers = append(containers, kubev1.Container{\n\t\t\t\tName: diskContainerName,\n\t\t\t\tImage: diskContainerImage,\n\t\t\t\tImagePullPolicy: kubev1.PullIfNotPresent,\n\t\t\t\tCommand: []string{\"/entry-point.sh\"},\n\t\t\t\tEnv: []kubev1.EnvVar{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"COPY_PATH\",\n\t\t\t\t\t\tValue: volumeMountDir + \"/\" + filePrefix,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"IMAGE_PATH\",\n\t\t\t\t\t\tValue: volume.ContainerDisk.Path,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVolumeMounts: []kubev1.VolumeMount{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: podVolumeName,\n\t\t\t\t\t\tMountPath: podVolumeMountDir,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResources: resources,\n\n\t\t\t\t// The readiness probes ensure the volume coversion and copy finished\n\t\t\t\t// before the container is marked as \"Ready: True\"\n\t\t\t\tReadinessProbe: &kubev1.Probe{\n\t\t\t\t\tHandler: kubev1.Handler{\n\t\t\t\t\t\tExec: &kubev1.ExecAction{\n\t\t\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\t\t\"cat\",\n\t\t\t\t\t\t\t\t\"/tmp/healthy\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tInitialDelaySeconds: int32(initialDelaySeconds),\n\t\t\t\t\tPeriodSeconds: int32(periodSeconds),\n\t\t\t\t\tTimeoutSeconds: int32(timeoutSeconds),\n\t\t\t\t\tSuccessThreshold: int32(successThreshold),\n\t\t\t\t\tFailureThreshold: int32(failureThreshold),\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\treturn containers\n}", "func (c *gcsCore) CreateContainer(id string, settings prot.VMHostedContainerSettings) error {\n\tc.containerCacheMutex.Lock()\n\tdefer c.containerCacheMutex.Unlock()\n\n\tif c.getContainer(id) != nil {\n\t\treturn gcserr.NewHresultError(gcserr.HrVmcomputeSystemAlreadyExists)\n\t}\n\n\tcontainerEntry := newContainerCacheEntry(id)\n\t// We need to only allow exited notifications when at least one WaitProcess\n\t// call has been written. We increment the writers here which is safe even\n\t// on failure because this entry will not be in the map on failure.\n\tlogrus.Debugf(\"+1 initprocess.writersWg [gcsCore::CreateContainer]\")\n\tcontainerEntry.initProcess.writersWg.Add(1)\n\n\t// Set up mapped virtual disks.\n\tif err := c.setupMappedVirtualDisks(id, settings.MappedVirtualDisks); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to set up mapped virtual disks during create for container %s\", id)\n\t}\n\tfor _, disk := range settings.MappedVirtualDisks {\n\t\tcontainerEntry.AddMappedVirtualDisk(disk)\n\t}\n\t// Set up mapped directories.\n\tif err := c.setupMappedDirectories(id, settings.MappedDirectories); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to set up mapped directories during create for container %s\", id)\n\t}\n\tfor _, dir := range settings.MappedDirectories {\n\t\tcontainerEntry.AddMappedDirectory(dir)\n\t}\n\n\t// Set up layers.\n\tscratch, layers, err := c.getLayerMounts(settings.SandboxDataPath, settings.Layers)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to get layer devices for container %s\", id)\n\t}\n\tcontainerEntry.Index, err = c.getOrAddContainerIndex(id)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get a valid container index\")\n\t}\n\n\tif err := c.mountLayers(containerEntry.Index, scratch, layers); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to mount layers for container %s\", id)\n\t}\n\n\t// Stash network adapters away\n\tfor _, adapter := range settings.NetworkAdapters {\n\t\tcontainerEntry.AddNetworkAdapter(adapter)\n\t}\n\t// Create the directory that will contain the resolv.conf file.\n\t//\n\t// TODO(rn): This isn't quite right but works. Basically, when\n\t// we do the network config in ExecProcess() the overlay for\n\t// the rootfs has already been created. When we then write\n\t// /etc/resolv.conf to the base layer it won't show up unless\n\t// /etc exists when the overlay is created. This is a bit\n\t// problematic as we basically later write to a what is\n\t// supposed to be read-only layer in the overlay... Ideally,\n\t// dockerd would pass a runc config with a bind mount for\n\t// /etc/resolv.conf like it does on unix.\n\tif err := os.MkdirAll(filepath.Join(baseFilesPath, \"etc\"), 0755); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create resolv.conf directory\")\n\t}\n\n\tc.containerCache[id] = containerEntry\n\n\treturn nil\n}", "func (d *DockerClient) WaitContainer(idOrName string) error {\n\tresp, err := d.makeRequest(\"POST\", fmt.Sprintf(\"%s/containers/%s/wait\", d.pathPrefix, idOrName), nil)\n\tif err != nil {\n\t\treturn err\n\t} else if resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"WaitContainier: error status code %d\", resp.StatusCode)\n\t}\n\treturn nil\n}", "func (c *DockerContainerizer) ContainerCreate(info Info) (string, error) {\n\t// Define network mode\n\tvar networkMode string\n\tswitch info.TaskInfo.GetContainer().GetDocker().GetNetwork() {\n\tcase mesos.ContainerInfo_DockerInfo_HOST:\n\t\tnetworkMode = \"host\"\n\t\tbreak\n\tcase mesos.ContainerInfo_DockerInfo_BRIDGE:\n\t\tnetworkMode = \"bridge\"\n\t\tbreak\n\tcase mesos.ContainerInfo_DockerInfo_NONE:\n\t\tnetworkMode = \"none\"\n\t\tbreak\n\tcase mesos.ContainerInfo_DockerInfo_USER:\n\t\tnetworkMode = \"user\"\n\t\tbreak\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"Invalid network mode\")\n\t}\n\n\t// Define ports mappings\n\tportsMappings := make(map[docker.Port][]docker.PortBinding)\n\tfor _, mapping := range info.TaskInfo.GetContainer().GetDocker().GetPortMappings() {\n\t\tcontainerPort := docker.Port(fmt.Sprintf(\"%d/%s\", mapping.GetContainerPort(), mapping.GetProtocol())) // ContainerPort needs to have the form port/protocol (eg. 80/tcp)\n\t\thostPort := strconv.Itoa(int(mapping.HostPort))\n\t\tportsMappings[containerPort] = []docker.PortBinding{\n\t\t\tdocker.PortBinding{\n\t\t\t\tHostPort: hostPort,\n\t\t\t},\n\t\t}\n\t}\n\n\t// Define environment variables\n\t// Docker needs to have a string slice with strings of the form key=val\n\tvar stringifiedEnv []string\n\tenvironment := info.TaskInfo.Command.GetEnvironment().GetVariables()\n\tfor _, variable := range environment {\n\t\tstringifiedEnv = append(stringifiedEnv, fmt.Sprintf(\"%s=%s\", variable.GetName(), variable.GetValue()))\n\t}\n\n\t// Define volumes\n\t// Volumes must be passed to \"binds\" host configuration parameter\n\t// and have the form: hostPath:containerPath:mode\n\tvar binds []string\n\tfor _, volume := range info.TaskInfo.GetContainer().GetVolumes() {\n\t\tvar mode string\n\t\tswitch volume.GetMode() {\n\t\tcase mesos.RW:\n\t\t\tmode = \"rw\"\n\t\tcase mesos.RO:\n\t\t\tmode = \"ro\"\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"Invalid volume mode: %v\", volume.GetMode().String())\n\t\t}\n\n\t\tbind := fmt.Sprintf(\"%s:%s:%s\", volume.GetHostPath(), volume.GetContainerPath(), mode)\n\t\tbinds = append(binds, bind)\n\t}\n\n\t// Prepare container\n\tlogger.GetInstance().Debug(\"Creating a new container\",\n\t\tzap.String(\"networkMode\", networkMode),\n\t\tzap.Reflect(\"portsMappings\", portsMappings),\n\t)\n\tcontainer, err := c.Client.CreateContainer(docker.CreateContainerOptions{\n\t\tConfig: &docker.Config{\n\t\t\tCPUShares: int64(info.CPUSharesLimit),\n\t\t\tEnv: stringifiedEnv,\n\t\t\tImage: info.TaskInfo.GetContainer().GetDocker().GetImage(),\n\t\t\tMemory: int64(info.MemoryLimit),\n\t\t},\n\t\tHostConfig: &docker.HostConfig{\n\t\t\tBinds: binds,\n\t\t\tNetworkMode: networkMode,\n\t\t\tPortBindings: portsMappings,\n\t\t\tPrivileged: info.TaskInfo.GetContainer().GetDocker().GetPrivileged(),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn container.ID, nil\n}", "func newContainer(task *Task, agent *agentState, slots, ordinal int) *container {\n\treturn &container{\n\t\ttask: task,\n\t\tid: ContainerID(uuid.New().String()),\n\t\tslots: slots,\n\t\tagent: agent,\n\t\tstate: containerStarting,\n\t\tordinal: ordinal,\n\t}\n}", "func (c *Client) execContainer(ctx context.Context, process *Process, timeout int) error {\n\tpack, err := c.watch.get(process.ContainerID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcloseStdinCh := make(chan struct{})\n\n\tvar (\n\t\tcntrID, execID = pack.container.ID(), process.ExecID\n\t\twithStdin, withTerminal = process.IO.Stream().Stdin() != nil, process.P.Terminal\n\t)\n\n\t// create exec process in container\n\texecProcess, err := pack.task.Exec(ctx, process.ExecID, process.P, func(_ string) (cio.IO, error) {\n\t\tlogrus.WithFields(\n\t\t\tlogrus.Fields{\n\t\t\t\t\"container\": cntrID,\n\t\t\t\t\"process\": execID,\n\t\t\t},\n\t\t).Debugf(\"creating cio (withStdin=%v, withTerminal=%v)\", withStdin, withTerminal)\n\n\t\tfifoset, err := containerio.NewFIFOSet(execID, withStdin, withTerminal)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c.createIO(fifoset, cntrID, execID, closeStdinCh, process.IO.InitContainerIO)\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to exec process\")\n\t}\n\n\t// wait exec process to exit\n\texitStatus, err := execProcess.Wait(context.TODO())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to exec process\")\n\t}\n\n\tvar msg *Message\n\tdefer func() {\n\t\tif msg == nil {\n\t\t\treturn\n\t\t}\n\n\t\t// XXX: if exec process get run, io should be closed in this function,\n\t\tfor _, hook := range c.hooks {\n\t\t\tif err := hook(process.ExecID, msg); err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to execute the exec exit hooks: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// delete the finished exec process in containerd\n\t\tif _, err := execProcess.Delete(context.TODO()); err != nil {\n\t\t\tlogrus.Warnf(\"failed to delete exec process %s: %s\", process.ExecID, err)\n\t\t}\n\n\t}()\n\n\t// start the exec process\n\tif err := execProcess.Start(ctx); err != nil {\n\t\tmsg = &Message{\n\t\t\terr: err,\n\t\t\texitCode: 126,\n\t\t\texitTime: time.Now().UTC(),\n\t\t}\n\t\treturn errors.Wrapf(err, \"failed to start exec, exec id %s\", execID)\n\t}\n\t// make sure the closeStdinCh has been closed.\n\tclose(closeStdinCh)\n\n\tt := time.Duration(timeout) * time.Second\n\tvar timeCh <-chan time.Time\n\tif t == 0 {\n\t\ttimeCh = make(chan time.Time)\n\t} else {\n\t\ttimeCh = time.After(t)\n\t}\n\n\tselect {\n\tcase status := <-exitStatus:\n\t\tmsg = &Message{\n\t\t\terr: status.Error(),\n\t\t\texitCode: status.ExitCode(),\n\t\t\texitTime: status.ExitTime(),\n\t\t}\n\tcase <-timeCh:\n\t\t// ignore the not found error because the process may exit itself before kill\n\t\tif err := execProcess.Kill(ctx, syscall.SIGKILL); err != nil && !errdefs.IsNotFound(err) {\n\t\t\t// try to force kill the exec process\n\t\t\tif err := execProcess.Kill(ctx, syscall.SIGTERM); err != nil && !errdefs.IsNotFound(err) {\n\t\t\t\treturn errors.Wrapf(err, \"failed to kill the exec process\")\n\t\t\t}\n\t\t}\n\t\t// wait for process to be killed\n\t\tstatus := <-exitStatus\n\t\tmsg = &Message{\n\t\t\terr: errors.Wrapf(status.Error(), \"failed to exec process %s, timeout\", execID),\n\t\t\texitCode: status.ExitCode(),\n\t\t\texitTime: status.ExitTime(),\n\t\t}\n\t}\n\n\treturn nil\n}", "func ContainerCreated(r *provider.CreateContainerResponse, req *provider.CreateContainerRequest, spaceOwner, executant *user.UserId) events.ContainerCreated {\n\treturn events.ContainerCreated{\n\t\tSpaceOwner: spaceOwner,\n\t\tExecutant: executant,\n\t\tRef: req.Ref,\n\t\tTimestamp: utils.TSNow(),\n\t}\n}", "func mustGetContainer(g *gomega.WithT, objs *objectSet, deploymentName, containerName string) map[string]interface{} {\n\tobj := objs.kind(\"Deployment\").nameEquals(deploymentName)\n\tg.Expect(obj).Should(gomega.Not(gomega.BeNil()))\n\tcontainer := obj.Container(containerName)\n\tg.Expect(container).Should(gomega.Not(gomega.BeNil()))\n\treturn container\n}", "func CreateContainerConflict(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ContainerController, command []string, entrypoint []string, env []string, image string, name string, sslRedirect bool, volumes []string, workingDir *string) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tquery[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tquery[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tquery[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tquery[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tquery[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tquery[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tquery[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tquery[\"workingDir\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/v2/container/create\"),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tprms[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tprms[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tprms[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tprms[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tprms[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tprms[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tprms[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tprms[\"workingDir\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ContainerTest\"), rw, req, prms)\n\tcreateCtx, _err := app.NewCreateContainerContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.Create(createCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 409 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 409\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func containerHealthy(containerStatus v1.ContainerStatus, init bool) bool {\n\tif !containerStatus.Ready {\n\t\treturn false\n\t}\n\tif containerStatus.State.Waiting != nil {\n\t\treturn false\n\t}\n\tif init {\n\t\tif containerStatus.State.Terminated != nil {\n\t\t\tif containerStatus.State.Terminated.ExitCode != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else if containerStatus.State.Running == nil {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tif containerStatus.State.Terminated != nil {\n\t\t\treturn false\n\t\t}\n\t\tif containerStatus.State.Running == nil {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (c *Container) Wait() error {\n\tif c.id == \"\" {\n\t\treturn fmt.Errorf(\"container %s absent\", c.id)\n\t}\n\t_, err := c.cli.ContainerWait(c.ctx, c.id)\n\treturn err\n}", "func UntilVolumeCreated(t *testing.T) {\n\tnames := helpers.GetNames(\"UntilVolume\", 0, 1, 1, 2, 1, 0, 0, 0)\n\tnames.TearDown()\n\tdefer names.TearDown()\n\n\tout, err := helpers.GetOutput(\"safescale network list\")\n\trequire.Nil(t, err)\n\t_ = out\n\n\tfmt.Println(\"Creating network \" + names.Networks[0])\n\n\tout, err = helpers.GetOutput(\"safescale network create \" + names.Networks[0] + \" --cidr 192.168.47.0/24\")\n\trequire.Nil(t, err)\n\t_ = out\n\n\tout, err = helpers.GetOutput(\"safescale network create \" + names.Networks[0] + \" --cidr 192.168.47.0/24\")\n\trequire.NotNil(t, err)\n\trequire.True(t, strings.Contains(out, \"already exist\"))\n\n\tfmt.Println(\"Creating VM \" + names.Hosts[0])\n\n\tout, err = helpers.GetOutput(\"safescale host create \" + names.Hosts[0] + \" --public --net \" + names.Networks[0])\n\trequire.Nil(t, err)\n\t_ = out\n\n\tout, err = helpers.GetOutput(\"safescale host create \" + names.Hosts[0] + \" --public --net \" + names.Networks[0])\n\trequire.NotNil(t, err)\n\trequire.True(t, strings.Contains(out, \"already exist\") || strings.Contains(out, \"already used\"))\n\n\tout, err = helpers.GetOutput(\"safescale host inspect \" + names.Hosts[0])\n\trequire.Nil(t, err)\n\t_ = out\n\n\tfmt.Println(\"Creating VM \" + names.Hosts[1])\n\n\tout, err = helpers.GetOutput(\"safescale host create \" + names.Hosts[1] + \" --public --net \" + names.Networks[0])\n\trequire.Nil(t, err)\n\t_ = out\n\n\tout, err = helpers.GetOutput(\"safescale host create \" + names.Hosts[1] + \" --public --net \" + names.Networks[0])\n\trequire.NotNil(t, err)\n\trequire.True(t, strings.Contains(out, \"already exist\") || strings.Contains(out, \"already used\"))\n\n\tout, err = helpers.GetOutput(\"safescale volume list\")\n\trequire.Nil(t, err)\n\trequire.True(t, strings.Contains(out, \"null\"))\n\n\tfmt.Println(\"Creating Volume \" + names.Volumes[0])\n\n\tout, err = helpers.GetOutput(\"safescale volume create \" + names.Volumes[0])\n\trequire.Nil(t, err)\n\t_ = out\n\n\tout, err = helpers.GetOutput(\"safescale volume list\")\n\trequire.Nil(t, err)\n\trequire.True(t, strings.Contains(out, names.Volumes[0]))\n}", "func (s *VarlinkInterface) WaitContainer(ctx context.Context, c VarlinkCall, name_ string, interval_ int64) error {\n\treturn c.ReplyMethodNotImplemented(ctx, \"io.podman.WaitContainer\")\n}", "func (s *serverServiceLogger) Container(ctx context.Context, name string) (c *Container, err error) {\n\tdefer func(begin time.Time) {\n\t\tLog(s.logger, begin, err, \"container_name\", name)\n\t}(time.Now())\n\treturn s.service.Container(ctx, name)\n}", "func (c *Client) recoverContainer(ctx context.Context, id string, io *containerio.IO) (err0 error) {\n\twrapperCli, err := c.Get(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get a containerd grpc client: %v\", err)\n\t}\n\n\tif !c.lock.TrylockWithRetry(ctx, id) {\n\t\treturn errtypes.ErrLockfailed\n\t}\n\tdefer c.lock.Unlock(id)\n\n\tlc, err := wrapperCli.client.LoadContainer(ctx, id)\n\tif err != nil {\n\t\tif errdefs.IsNotFound(err) {\n\t\t\treturn errors.Wrapf(errtypes.ErrNotfound, \"container %s\", id)\n\t\t}\n\t\treturn errors.Wrapf(err, \"failed to load container(%s)\", id)\n\t}\n\n\tvar (\n\t\ttimeout = 5 * time.Second\n\t\tch = make(chan error, 1)\n\t\ttask containerd.Task\n\t)\n\n\t// for normal shim, this operation should be end less than 1 second,\n\t// we give 5 second timeout to believe the shim get locked internal,\n\t// return error since we do not want a hang shim affect daemon start\n\tpctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tgo func() {\n\t\ttask, err = lc.Task(pctx, func(fset *cio.FIFOSet) (cio.IO, error) {\n\t\t\treturn c.attachIO(fset, io.InitContainerIO)\n\t\t})\n\t\tch <- err\n\t}()\n\n\tselect {\n\tcase <-time.After(timeout):\n\t\treturn errors.Wrap(errtypes.ErrTimeout, \"failed to connect to shim\")\n\tcase err = <-ch:\n\t}\n\n\tif err != nil {\n\t\tif !errdefs.IsNotFound(err) {\n\t\t\treturn errors.Wrap(err, \"failed to get task\")\n\t\t}\n\t\t// not found task, delete container directly.\n\t\tlc.Delete(ctx)\n\t\treturn errors.Wrap(errtypes.ErrNotfound, \"task\")\n\t}\n\n\tstatusCh, err := task.Wait(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to wait task\")\n\t}\n\n\tc.watch.add(&containerPack{\n\t\tid: id,\n\t\tcontainer: lc,\n\t\ttask: task,\n\t\tch: make(chan *Message, 1),\n\t\tclient: wrapperCli,\n\t\tsch: statusCh,\n\t})\n\n\tlogrus.Infof(\"success to recover container: %s\", id)\n\treturn nil\n}", "func (c *Container) Wait(ctx context.Context) error {\n\tdefer c.stopProfiling()\n\tstatusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)\n\tselect {\n\tcase err := <-errChan:\n\t\treturn err\n\tcase res := <-statusChan:\n\t\tif res.StatusCode != 0 {\n\t\t\tvar msg string\n\t\t\tif res.Error != nil {\n\t\t\t\tmsg = res.Error.Message\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"container returned non-zero status: %d, msg: %q\", res.StatusCode, msg)\n\t\t}\n\t\treturn nil\n\t}\n}", "func CreateContainerBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ContainerController, command []string, entrypoint []string, env []string, image string, name string, sslRedirect bool, volumes []string, workingDir *string) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tquery[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tquery[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tquery[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tquery[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tquery[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tquery[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tquery[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tquery[\"workingDir\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/v2/container/create\"),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tprms[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tprms[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tprms[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tprms[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tprms[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tprms[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tprms[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tprms[\"workingDir\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ContainerTest\"), rw, req, prms)\n\tcreateCtx, _err := app.NewCreateContainerContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.Create(createCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func (c *Client) destroyContainer(ctx context.Context, id string, timeout int64) (*Message, error) {\n\t// TODO(ziren): if we just want to stop a container,\n\t// we may need lease to lock the snapshot of container,\n\t// in case, it be deleted by gc.\n\twrapperCli, err := c.Get(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get a containerd grpc client: %v\", err)\n\t}\n\n\tctx = leases.WithLease(ctx, wrapperCli.lease.ID)\n\n\tif !c.lock.TrylockWithRetry(ctx, id) {\n\t\treturn nil, errtypes.ErrLockfailed\n\t}\n\tdefer c.lock.Unlock(id)\n\n\tpack, err := c.watch.get(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// if you call DestroyContainer to stop a container, will skip the hooks.\n\t// the caller need to execute the all hooks.\n\tpack.l.Lock()\n\tpack.skipStopHooks = true\n\tpack.l.Unlock()\n\tdefer func() {\n\t\tpack.l.Lock()\n\t\tpack.skipStopHooks = false\n\t\tpack.l.Unlock()\n\t}()\n\n\twaitExit := func() *Message {\n\t\treturn c.ProbeContainer(ctx, id, time.Duration(timeout)*time.Second)\n\t}\n\n\tvar msg *Message\n\n\t// TODO: set task request timeout by context timeout\n\tif err := pack.task.Kill(ctx, syscall.SIGTERM, containerd.WithKillAll); err != nil {\n\t\tif !errdefs.IsNotFound(err) {\n\t\t\treturn nil, errors.Wrap(err, \"failed to kill task\")\n\t\t}\n\t\tgoto clean\n\t}\n\t// wait for the task to exit.\n\tmsg = waitExit()\n\n\tif err := msg.RawError(); err != nil && errtypes.IsTimeout(err) {\n\t\t// timeout, use SIGKILL to retry.\n\t\tif err := pack.task.Kill(ctx, syscall.SIGKILL, containerd.WithKillAll); err != nil {\n\t\t\tif !errdefs.IsNotFound(err) {\n\t\t\t\treturn nil, errors.Wrap(err, \"failed to kill task\")\n\t\t\t}\n\t\t\tgoto clean\n\t\t}\n\t\tmsg = waitExit()\n\t}\n\n\t// ignore the error is stop time out\n\t// TODO: how to design the stop error is time out?\n\tif err := msg.RawError(); err != nil && !errtypes.IsTimeout(err) {\n\t\treturn nil, err\n\t}\n\nclean:\n\t// for normal destroy process, task.Delete() and container.Delete()\n\t// is done in ctrd/watch.go, after task exit. clean is task effect only\n\t// when unexcepted error happened in task exit process.\n\tif _, err := pack.task.Delete(ctx); err != nil {\n\t\tif !errdefs.IsNotFound(err) {\n\t\t\tlogrus.Errorf(\"failed to delete task %s again: %v\", pack.id, err)\n\t\t}\n\t}\n\tif err := pack.container.Delete(ctx); err != nil {\n\t\tif !errdefs.IsNotFound(err) {\n\t\t\treturn msg, errors.Wrap(err, \"failed to delete container\")\n\t\t}\n\t}\n\n\tlogrus.Infof(\"success to destroy container: %s\", id)\n\n\treturn msg, c.watch.remove(ctx, id)\n}", "func createContainer(dockerClient *client.Client, dockerImageName string, httpServerContainerPort int, httpServerHostPort int) (container.ContainerCreateCreatedBody, error) {\n\n\thostBinding := nat.PortBinding{\n\t\tHostIP: \"0.0.0.0\",\n\t\tHostPort: fmt.Sprintf(\"%d\", httpServerHostPort),\n\t}\n\tcontainerPort, err := nat.NewPort(\"tcp\", fmt.Sprintf(\"%d\", httpServerContainerPort))\n\tif err != nil {\n\t\tlog.Panicf(\"Unable to create a tcp httpServerContainerPort %d\\n\", httpServerContainerPort)\n\t}\n\n\tportBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}\n\tcontainerBody, err := dockerClient.ContainerCreate(context.Background(),\n\t\t&container.Config{Image: dockerImageName},\n\t\t&container.HostConfig{\n\t\t\tPortBindings: portBinding,\n\t\t\tAutoRemove: true,\n\t\t},\n\t\tnil,\n\t\tfmt.Sprintf(\"HttpServerAt_%d\", httpServerHostPort))\n\tif err != nil {\n\t\tlog.Panicf(\"ContainerCreate failed for the image: %s, host port: %d with error: %s\\n\", dockerImageName, httpServerContainerPort, err)\n\t}\n\n\treturn containerBody, err\n}", "func (c *container) asContainer() corev1.Container {\n\treturn corev1.Container{\n\t\tName: c.Name,\n\t\tImage: c.Image,\n\t\tCommand: c.Command,\n\t\tArgs: c.Args,\n\t\tWorkingDir: c.WorkingDir,\n\t\tPorts: c.Ports,\n\t\tEnvFrom: c.EnvFrom,\n\t\tEnv: c.Env,\n\t\tResources: c.Resources,\n\t\tVolumeMounts: c.VolumeMounts,\n\t\tVolumeDevices: c.VolumeDevices,\n\t\tLivenessProbe: c.LivenessProbe,\n\t\tReadinessProbe: c.ReadinessProbe,\n\t\tLifecycle: c.Lifecycle,\n\t\tTerminationMessagePath: c.TerminationMessagePath,\n\t\tTerminationMessagePolicy: c.TerminationMessagePolicy,\n\t\tImagePullPolicy: c.ImagePullPolicy,\n\t\tSecurityContext: c.SecurityContext,\n\t\tStdin: c.Stdin,\n\t\tStdinOnce: c.StdinOnce,\n\t\tTTY: c.TTY,\n\t}\n}", "func (e *ContainerService) Create(container Container) (data *ContainerCreateResult, err error) {\n\tbody, err := JSONReader(container)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = e.client.magicRequestDecoder(\"POST\", \"containers\", body, &data)\n\treturn\n}", "func prepareContainerToAttach(base *testutil.Base, containerName string) {\n\topts := []func(*testutil.Cmd){\n\t\ttestutil.WithStdin(testutil.NewDelayOnceReader(bytes.NewReader(\n\t\t\t[]byte{16, 17}, // ctrl+p,ctrl+q, see https://www.physics.udel.edu/~watson/scen103/ascii.html\n\t\t))),\n\t}\n\t// unbuffer(1) emulates tty, which is required by `nerdctl run -t`.\n\t// unbuffer(1) can be installed with `apt-get install expect`.\n\t//\n\t// \"-p\" is needed because we need unbuffer to read from stdin, and from [1]:\n\t// \"Normally, unbuffer does not read from stdin. This simplifies use of unbuffer in some situations.\n\t// To use unbuffer in a pipeline, use the -p flag.\"\n\t//\n\t// [1] https://linux.die.net/man/1/unbuffer\n\tbase.CmdWithHelper([]string{\"unbuffer\", \"-p\"}, \"run\", \"-it\", \"--name\", containerName, testutil.CommonImage).\n\t\tCmdOption(opts...).AssertOutContains(\"read detach keys\")\n\tcontainer := base.InspectContainer(containerName)\n\tassert.Equal(base.T, container.State.Running, true)\n}", "func create(conf *Config) *lxc.Container {\n\tvar c *lxc.Container\n\tvar err error\n\n\t// ensure we're not attempting to recreate the same container\n\tactiveContainers := lxc.DefinedContainers(conf.LXCPath)\n\tfor idx := range activeContainers {\n\t\tif activeContainers[idx].Name() == conf.Name {\n\t\t\tfmt.Printf(\"Found existing container \\\"%s\\\"\\n\", conf.Name)\n\t\t\tc = &activeContainers[idx]\n\t\t}\n\n\t}\n\n\t// If we did not find a container, create a struct for one\n\tif c == nil {\n\t\tc, err = lxc.NewContainer(conf.Name, conf.LXCPath)\n\t\tif err != nil {\n\t\t\terrorExit(2, err)\n\t\t}\n\t}\n\n\t// double check on whether the container is defined\n\tif !(c.Defined()) {\n\t\tfmt.Printf(\"Creating new container: %s\\n\", conf.Name)\n\t\toptions := lxc.TemplateOptions{\n\t\t\tTemplate: conf.Template,\n\t\t}\n\t\t// provision the container\n\t\tif err = c.Create(options); err != nil {\n\t\t\tfmt.Printf(\"Could not create container \\\"%s\\\"\\n\", conf.Name)\n\t\t\terrorExit(2, err)\n\t\t}\n\t}\n\n\t// trace level logs end up here\n\tc.SetLogFile(\"/tmp/\" + conf.Name + \".log\")\n\tc.SetLogLevel(lxc.TRACE)\n\n\treturn c\n}", "func containerGCTest(f *framework.Framework, test testRun) {\n\tvar runtime internalapi.RuntimeService\n\tginkgo.BeforeEach(func() {\n\t\tvar err error\n\t\truntime, _, err = getCRIClient()\n\t\tframework.ExpectNoError(err)\n\t})\n\tfor _, pod := range test.testPods {\n\t\t// Initialize the getContainerNames function to use CRI runtime client.\n\t\tpod.getContainerNames = func() ([]string, error) {\n\t\t\trelevantContainers := []string{}\n\t\t\tcontainers, err := runtime.ListContainers(context.Background(), &runtimeapi.ContainerFilter{\n\t\t\t\tLabelSelector: map[string]string{\n\t\t\t\t\ttypes.KubernetesPodNameLabel: pod.podName,\n\t\t\t\t\ttypes.KubernetesPodNamespaceLabel: f.Namespace.Name,\n\t\t\t\t},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn relevantContainers, err\n\t\t\t}\n\t\t\tfor _, container := range containers {\n\t\t\t\trelevantContainers = append(relevantContainers, container.Labels[types.KubernetesContainerNameLabel])\n\t\t\t}\n\t\t\treturn relevantContainers, nil\n\t\t}\n\t}\n\n\tginkgo.Context(fmt.Sprintf(\"Garbage Collection Test: %s\", test.testName), func() {\n\t\tginkgo.BeforeEach(func(ctx context.Context) {\n\t\t\trealPods := getPods(test.testPods)\n\t\t\te2epod.NewPodClient(f).CreateBatch(ctx, realPods)\n\t\t\tginkgo.By(\"Making sure all containers restart the specified number of times\")\n\t\t\tgomega.Eventually(ctx, func(ctx context.Context) error {\n\t\t\t\tfor _, podSpec := range test.testPods {\n\t\t\t\t\terr := verifyPodRestartCount(ctx, f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, setupDuration, runtimePollInterval).Should(gomega.BeNil())\n\t\t})\n\n\t\tginkgo.It(\"Should eventually garbage collect containers when we exceed the number of dead containers per container\", func(ctx context.Context) {\n\t\t\ttotalContainers := 0\n\t\t\tfor _, pod := range test.testPods {\n\t\t\t\ttotalContainers += pod.numContainers*2 + 1\n\t\t\t}\n\t\t\tgomega.Eventually(ctx, func() error {\n\t\t\t\ttotal := 0\n\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\ttotal += len(containerNames)\n\t\t\t\t\t// Check maxPerPodContainer for each container in the pod\n\t\t\t\t\tfor i := 0; i < pod.numContainers; i++ {\n\t\t\t\t\t\tcontainerCount := 0\n\t\t\t\t\t\tfor _, containerName := range containerNames {\n\t\t\t\t\t\t\tif containerName == pod.getContainerName(i) {\n\t\t\t\t\t\t\t\tcontainerCount++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerCount > maxPerPodContainer+1 {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"expected number of copies of container: %s, to be <= maxPerPodContainer: %d; list of containers: %v\",\n\t\t\t\t\t\t\t\tpod.getContainerName(i), maxPerPodContainer, containerNames)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t//Check maxTotalContainers. Currently, the default is -1, so this will never happen until we can configure maxTotalContainers\n\t\t\t\tif maxTotalContainers > 0 && totalContainers <= maxTotalContainers && total > maxTotalContainers {\n\t\t\t\t\treturn fmt.Errorf(\"expected total number of containers: %v, to be <= maxTotalContainers: %v\", total, maxTotalContainers)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\n\t\t\tif maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers\n\t\t\t\tginkgo.By(\"Making sure the kubelet consistently keeps around an extra copy of each container.\")\n\t\t\t\tgomega.Consistently(ctx, func() error {\n\t\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor i := 0; i < pod.numContainers; i++ {\n\t\t\t\t\t\t\tcontainerCount := 0\n\t\t\t\t\t\t\tfor _, containerName := range containerNames {\n\t\t\t\t\t\t\t\tif containerName == pod.getContainerName(i) {\n\t\t\t\t\t\t\t\t\tcontainerCount++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif pod.restartCount > 0 && containerCount < maxPerPodContainer+1 {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"expected pod %v to have extra copies of old containers\", pod.podName)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\t\t\t}\n\t\t})\n\n\t\tginkgo.AfterEach(func(ctx context.Context) {\n\t\t\tfor _, pod := range test.testPods {\n\t\t\t\tginkgo.By(fmt.Sprintf(\"Deleting Pod %v\", pod.podName))\n\t\t\t\te2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)\n\t\t\t}\n\n\t\t\tginkgo.By(\"Making sure all containers get cleaned up\")\n\t\t\tgomega.Eventually(ctx, func() error {\n\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif len(containerNames) > 0 {\n\t\t\t\t\t\treturn fmt.Errorf(\"%v containers still remain\", containerNames)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\n\t\t\tif ginkgo.CurrentSpecReport().Failed() && framework.TestContext.DumpLogsOnFailure {\n\t\t\t\tlogNodeEvents(ctx, f)\n\t\t\t\tlogPodEvents(ctx, f)\n\t\t\t}\n\t\t})\n\t})\n}", "func (h *handler) enqueuePreexistingContainers() {\n\t// Before everything else, do cleanup of old containers. These have exited,\n\t// we just want to go through our wait procedure for the container (which\n\t// will also move the logs to long term storage and delete the container).\n\tfor _, c := range h.listStatus(\"exited\") {\n\t\th.jobQueueSemaphores.Enqueue(\n\t\t\tQ(c.Labels[\"job-queue\"]),\n\t\t\t// Wait until done.\n\t\t\tdockerHelper{h.dockerClient, h.dockerState, c.ID, h.storage}.Wait,\n\t\t)\n\t}\n\n\t// First, anything which is already running needs to take up slots in the\n\t// queue.\n\tfor _, c := range h.listStatus(\"running\") {\n\t\th.jobQueueSemaphores.Enqueue(\n\t\t\tQ(c.Labels[\"job-queue\"]),\n\t\t\t// Wait until done.\n\t\t\tdockerHelper{h.dockerClient, h.dockerState, c.ID, h.storage}.Wait,\n\t\t)\n\t}\n\t// Second, anything hanging around in the created state has been submitted.\n\t// Those should be started.\n\tfor _, c := range h.listStatus(\"created\") {\n\t\th.jobQueueSemaphores.Enqueue(\n\t\t\tQ(c.Labels[\"job-queue\"]),\n\t\t\t// Start and then wait.\n\t\t\tdockerHelper{h.dockerClient, h.dockerState, c.ID, h.storage}.Run,\n\t\t)\n\t}\n}", "func TestDanglingContainerRemoval(t *testing.T) {\n\tci.Parallel(t)\n\ttestutil.DockerCompatible(t)\n\n\t// start two containers: one tracked nomad container, and one unrelated container\n\ttask, cfg, ports := dockerTask(t)\n\tdefer freeport.Return(ports)\n\trequire.NoError(t, task.EncodeConcreteDriverConfig(cfg))\n\n\tclient, d, handle, cleanup := dockerSetup(t, task, nil)\n\tdefer cleanup()\n\trequire.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))\n\n\tnonNomadContainer, err := client.CreateContainer(docker.CreateContainerOptions{\n\t\tName: \"mytest-image-\" + uuid.Generate(),\n\t\tConfig: &docker.Config{\n\t\t\tImage: cfg.Image,\n\t\t\tCmd: append([]string{cfg.Command}, cfg.Args...),\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\tdefer client.RemoveContainer(docker.RemoveContainerOptions{\n\t\tID: nonNomadContainer.ID,\n\t\tForce: true,\n\t})\n\n\terr = client.StartContainer(nonNomadContainer.ID, nil)\n\trequire.NoError(t, err)\n\n\tuntrackedNomadContainer, err := client.CreateContainer(docker.CreateContainerOptions{\n\t\tName: \"mytest-image-\" + uuid.Generate(),\n\t\tConfig: &docker.Config{\n\t\t\tImage: cfg.Image,\n\t\t\tCmd: append([]string{cfg.Command}, cfg.Args...),\n\t\t\tLabels: map[string]string{\n\t\t\t\tdockerLabelAllocID: uuid.Generate(),\n\t\t\t},\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\tdefer client.RemoveContainer(docker.RemoveContainerOptions{\n\t\tID: untrackedNomadContainer.ID,\n\t\tForce: true,\n\t})\n\n\terr = client.StartContainer(untrackedNomadContainer.ID, nil)\n\trequire.NoError(t, err)\n\n\tdd := d.Impl().(*Driver)\n\n\treconciler := newReconciler(dd)\n\ttrackedContainers := map[string]bool{handle.containerID: true}\n\n\ttf := reconciler.trackedContainers()\n\trequire.Contains(t, tf, handle.containerID)\n\trequire.NotContains(t, tf, untrackedNomadContainer)\n\trequire.NotContains(t, tf, nonNomadContainer.ID)\n\n\t// assert tracked containers should never be untracked\n\tuntracked, err := reconciler.untrackedContainers(trackedContainers, time.Now())\n\trequire.NoError(t, err)\n\trequire.NotContains(t, untracked, handle.containerID)\n\trequire.NotContains(t, untracked, nonNomadContainer.ID)\n\trequire.Contains(t, untracked, untrackedNomadContainer.ID)\n\n\t// assert we recognize nomad containers with appropriate cutoff\n\tuntracked, err = reconciler.untrackedContainers(map[string]bool{}, time.Now())\n\trequire.NoError(t, err)\n\trequire.Contains(t, untracked, handle.containerID)\n\trequire.Contains(t, untracked, untrackedNomadContainer.ID)\n\trequire.NotContains(t, untracked, nonNomadContainer.ID)\n\n\t// but ignore if creation happened before cutoff\n\tuntracked, err = reconciler.untrackedContainers(map[string]bool{}, time.Now().Add(-1*time.Minute))\n\trequire.NoError(t, err)\n\trequire.NotContains(t, untracked, handle.containerID)\n\trequire.NotContains(t, untracked, untrackedNomadContainer.ID)\n\trequire.NotContains(t, untracked, nonNomadContainer.ID)\n\n\t// a full integration tests to assert that containers are removed\n\tprestineDriver := dockerDriverHarness(t, nil).Impl().(*Driver)\n\tprestineDriver.config.GC.DanglingContainers = ContainerGCConfig{\n\t\tEnabled: true,\n\t\tperiod: 1 * time.Second,\n\t\tCreationGrace: 0 * time.Second,\n\t}\n\tnReconciler := newReconciler(prestineDriver)\n\n\trequire.NoError(t, nReconciler.removeDanglingContainersIteration())\n\n\t_, err = client.InspectContainer(nonNomadContainer.ID)\n\trequire.NoError(t, err)\n\n\t_, err = client.InspectContainer(handle.containerID)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), NoSuchContainerError)\n\n\t_, err = client.InspectContainer(untrackedNomadContainer.ID)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), NoSuchContainerError)\n}", "func kubeconContainer(name, image, tag string) v1.Container {\n\treturn v1.Container{\n\t\tName: name,\n\t\tImage: fmt.Sprintf(\"%s:%s\", image, tag),\n\t\tImagePullPolicy: \"Always\",\n\t\tResources: v1.ResourceRequirements{\n\t\t\tLimits: v1.ResourceList{\"cpu\": cpuQuantity(0.5)},\n\t\t\tRequests: v1.ResourceList{\"cpu\": cpuQuantity(0.2)},\n\t\t},\n\t}\n}", "func CreateContainer(w http.ResponseWriter, r *http.Request) {\n\tsession, err := store.Get(r, \"session-name\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser := session.Values[\"user\"]\n\n\tif user == user {\n\t\t//\thttp.Redirect(w, r, \"/login\", http.StatusUnauthorized)\n\t\t//\treturn\n\t}\n\n\tif r.Method == post {\n\t\t/*name := r.FormValue(\"name\")\n\t\tf := model.Container{CreatedBy: \"sk\", Name: name, DisplayName: \"TestUbunt1\", Status: \"Creating\"}\n\n\t\tif obj := db.Create(f); obj.Error != nil {\n\t\t\tconsole.PrintError(\"Error Writing to DB\")\n\t\t\treturn\n\t\t}*/\n\t\tfmt.Fprint(w, \"Started\")\n\t\tgo create(r, user.(string))\n\t}\n}", "func (r *remoteRuntimeService) CreateContainer(ctx context.Context, podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] CreateContainer\", \"podSandboxID\", podSandBoxID, \"timeout\", r.timeout)\n\tctx, cancel := context.WithTimeout(ctx, r.timeout)\n\tdefer cancel()\n\n\treturn r.createContainerV1(ctx, podSandBoxID, config, sandboxConfig)\n}", "func IsContainerCreated(c *check.C, cname string) (bool, error) {\n\treturn isContainerStateEqual(c, cname, \"created\")\n}", "func TestContainers(t *testing.T) {\n\tctx := context.Background()\n\tstore := &mockstore.MockStore{}\n\tconfig, v := initConfig(store)\n\n\t// startup the server\n\tgrpcServer := startServer(config, v)\n\tdefer stopServer(grpcServer, v)\n\n\t// create a client\n\tconn, err := grpc.Dial(\"localhost:5001\", grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"Init gRPC conn error: %v\", err)\n\t}\n\tdefer conn.Close()\n\tclnt := pb.NewCoreRPCClient(conn)\n\n\t// test GetContainer error\n\tstore.On(\"GetContainer\", \"012\").Return(&types.Container{}, nil)\n\t_, err = clnt.GetContainer(ctx, &pb.ContainerID{Id: \"012\"})\n\tassert.Contains(t, err.Error(), \"Container ID must be length of 64\")\n\n\tID := \"586f906185de3ed755d0db1c0f37149c1eae1ba557a26adccbe1f51c500d07d1\"\n\tstore.On(\"GetContainer\", ID).Return(&types.Container{}, nil)\n\t_, err = clnt.GetContainer(ctx, &pb.ContainerID{Id: ID})\n\tassert.Contains(t, err.Error(), \"Engine is nil\")\n\n\t// test GetContainers\n\tcontainer := types.Container{\n\t\tID: ID,\n\t}\n\tstore.On(\"GetContainers\", []string{ID}).Return([]*types.Container{&container}, nil)\n\tgcResp, err := clnt.GetContainers(ctx, &pb.ContainerIDs{Ids: []string{ID}})\n\tassert.NoError(t, err)\n\t// 因为Container的engine是nil,所以获取不到信息,调用返回为空\n\tlog.Info(gcResp)\n\tassert.Nil(t, gcResp.GetContainers())\n}", "func Acquire(c *Container) bool {\n\treturn C.lxc_container_get(c.container) == 1\n}", "func ensureSidecarContainer(ctx context.Context, cli *client.Client, workDir string, log *zap.SugaredLogger, controlNetworkID string) (id string, err error) {\n\tdockerSock := \"/var/run/docker.sock\"\n\tif host := cli.DaemonHost(); strings.HasPrefix(host, \"unix://\") {\n\t\tdockerSock = host[len(\"unix://\"):]\n\t} else {\n\t\tlog.Warnf(\"guessing docker socket as %s\", dockerSock)\n\t}\n\tcontainer, _, err := docker.EnsureContainer(ctx, log, cli, &docker.EnsureContainerOpts{\n\t\tContainerName: \"testground-sidecar\",\n\t\tContainerConfig: &container.Config{\n\t\t\tImage: \"ipfs/testground:latest\",\n\t\t\tEntrypoint: []string{\"testground\"},\n\t\t\tCmd: []string{\"sidecar\", \"--runner\", \"docker\"},\n\t\t\tEnv: []string{\"REDIS_HOST=testground-redis\"},\n\t\t},\n\t\tHostConfig: &container.HostConfig{\n\t\t\tNetworkMode: container.NetworkMode(controlNetworkID),\n\t\t\t// To lookup namespaces. Can't use SandboxKey for some reason.\n\t\t\tPidMode: \"host\",\n\t\t\t// We need _both_ to actually get a network namespace handle.\n\t\t\t// We may be able to drop sys_admin if we drop netlink\n\t\t\t// sockets that we're not using.\n\t\t\tCapAdd: []string{\"NET_ADMIN\", \"SYS_ADMIN\"},\n\t\t\t// needed to talk to docker.\n\t\t\tMounts: []mount.Mount{{\n\t\t\t\tType: mount.TypeBind,\n\t\t\t\tSource: dockerSock,\n\t\t\t\tTarget: \"/var/run/docker.sock\",\n\t\t\t}},\n\t\t},\n\t\tPullImageIfMissing: false, // Don't pull from Docker Hub\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn container.ID, err\n}", "func (d *Docker) detectWorkload(ctx context.Context, ID string) (*Container, error) {\n\t// 标准化为 inspect 的数据\n\tvar c enginetypes.ContainerJSON\n\tvar err error\n\tutils.WithTimeout(ctx, d.config.GlobalConnectionTimeout, func(ctx context.Context) {\n\t\tc, err = d.client.ContainerInspect(ctx, ID)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlabel := c.Config.Labels\n\n\tif _, ok := label[cluster.ERUMark]; !ok {\n\t\treturn nil, common.ErrInvaildContainer\n\t}\n\n\t// TODO should be removed in the future\n\tif d.config.CheckOnlyMine && !utils.UseLabelAsFilter() && !d.checkHostname(c.Config.Env) {\n\t\treturn nil, common.ErrInvaildContainer\n\t}\n\n\t// 生成基准 meta\n\tmeta := coreutils.DecodeMetaInLabel(ctx, label)\n\n\t// 是否符合 eru pattern,如果一个容器又有 ERUMark 又是三段式的 name,那它就是个 ERU 容器\n\tcontainer, err := generateContainerMeta(ctx, c, meta, label)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// 计算容器用了多少 CPU\n\tcontainer = calcuateCPUNum(container, c, d.cpuCore)\n\tif container.Memory == 0 || container.Memory == math.MaxInt64 {\n\t\tcontainer.Memory = d.memory\n\t}\n\t// 活着才有发布必要\n\tif c.NetworkSettings != nil && container.Running {\n\t\tnetworks := map[string]string{}\n\t\tfor name, endpoint := range c.NetworkSettings.Networks {\n\t\t\tnetworkmode := enginecontainer.NetworkMode(name)\n\t\t\tif networkmode.IsHost() {\n\t\t\t\tcontainer.LocalIP = common.LocalIP\n\t\t\t\tnetworks[name] = d.nodeIP\n\t\t\t} else {\n\t\t\t\tcontainer.LocalIP = endpoint.IPAddress\n\t\t\t\tnetworks[name] = endpoint.IPAddress\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tcontainer.Networks = networks\n\t}\n\n\treturn container, nil\n}", "func (e *dockerExec) wait(ctx context.Context) (state execState, err error) {\n\t// We start profiling here. Note that if the executor is restarted,\n\t// and thus reattaches to the container, it will lose samples.\n\tprofc := make(chan stats)\n\tprofctx, cancelprof := context.WithCancel(ctx)\n\tgo func() {\n\t\tprofc <- e.profile(profctx)\n\t}()\n\n\t// The documentation for ContainerWait seems to imply that both channels will\n\t// be sent. In practice it's one or the other, and it's also not buffered. Cool API.\n\trespc, errc := e.client.ContainerWait(ctx, e.containerName(), container.WaitConditionNotRunning)\n\tvar code int64\n\tselect {\n\tcase err := <-errc:\n\t\tcancelprof()\n\t\treturn execInit, errors.E(\"ContainerWait\", e.containerName(), kind(err), err)\n\tcase resp := <-respc:\n\t\tcode = resp.StatusCode\n\t}\n\t// Best-effort writing of log files.\n\trc, err := e.client.ContainerLogs(\n\t\tctx, e.containerName(),\n\t\ttypes.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})\n\tif err == nil {\n\t\tstderr, err := os.Create(e.path(\"stderr\"))\n\t\tif err != nil {\n\t\t\te.Log.Errorf(\"failed to stderr log file %q: %s\", e.path(\"stderr\"), err)\n\t\t\tstderr = nil\n\t\t}\n\t\tstdout, err := os.Create(e.path(\"stdout\"))\n\t\tif err != nil {\n\t\t\te.Log.Errorf(\"failed to stdout log file %q: %s\", e.path(\"stdout\"), err)\n\t\t\tstdout = nil\n\t\t}\n\t\t_, err = stdcopy.StdCopy(stdout, stderr, rc)\n\t\tif err != nil {\n\t\t\te.Log.Errorf(\"failed to copy stdout and stderr logs: %s\", err)\n\t\t}\n\t\trc.Close()\n\t\tif stderr != nil {\n\t\t\tstderr.Close()\n\t\t}\n\t\tif stdout != nil {\n\t\t\tstdout.Close()\n\t\t}\n\t}\n\te.Docker, err = e.client.ContainerInspect(ctx, e.containerName())\n\n\t// Retrieve the profile before we clean up the results.\n\tcancelprof()\n\te.Manifest.Stats = <-profc\n\n\tif err != nil {\n\t\treturn execInit, errors.E(\"ContainerInspect\", e.containerName(), kind(err), err)\n\t}\n\t// Docker can return inconsistent return codes between a ContainerWait and\n\t// a ContainerInspect call. If either of these calls return a non zero exit code,\n\t// we use that as the exit status.\n\tif code == 0 && e.Docker.State.ExitCode != 0 {\n\t\tcode = int64(e.Docker.State.ExitCode)\n\t}\n\n\tfinishedAt, err := time.Parse(reflow.DockerInspectTimeFormat, e.Docker.State.FinishedAt)\n\tif err != nil {\n\t\treturn execInit, errors.E(errors.Invalid, errors.Errorf(\"parsing docker time %s: %v\", e.Docker.State.FinishedAt, err))\n\t}\n\n\tvar startedAt time.Time\n\tif t, err := time.Parse(reflow.DockerInspectTimeFormat, e.Docker.State.StartedAt); err == nil {\n\t\tstartedAt = t\n\t}\n\n\t// If the container was incredibly short-lived, we get bad time values from docker, for example:\n\t// start 2021-09-29 05:45:05.330744043\n\t// end 2021-09-29 05:45:05.330505255\n\t// notice that start is after end, so in such a case, just use now as the finish time.\n\tif !startedAt.Before(finishedAt) {\n\t\tfinishedAt = time.Now()\n\t}\n\n\t// Note: /dev/kmsg only exists on linux. If the container is running on a non-linux machine isOOMSystem will\n\t// always return false.\n\toomSys, oomSysReason := e.isOOMSystem(startedAt, finishedAt)\n\toomNode, oomNodeReason := e.isOOMNode(startedAt, finishedAt)\n\n\t// The Docker daemon does not reliably report the container's exit\n\t// status correctly, and, what's worse, ContainerWait can return\n\t// successfully while the container is still running. This appears\n\t// to happen during system shutdown (e.g., the Docker daemon is\n\t// killed before Reflow) and also on system restart (Docker daemon\n\t// restores container state from disk).\n\t//\n\t// We are not currently able to distinguish between a system restart\n\t// and a successful exit.\n\t//\n\t// This appears to be fixed in Docker/Moby 1.13, but we are not yet\n\t// ready to adopt this. See:\n\t// \thttps://github.com/moby/moby/issues/31262\n\t//\n\t// TODO(marius): either upgrade to Docker/Moby 1.13, or else add\n\t// some sort of epoch detection (Docker isn't helpful here either,\n\t// but system start time might be a good proxy.)\n\tswitch {\n\t// ContainerWait returns while the container is in running state\n\t// (explicitly, or without a finish time). This happens during\n\t// system shutdown.\n\tcase e.Docker.State.Running || finishedAt.IsZero():\n\t\treturn execInit, errors.E(\n\t\t\t\"exec\", e.id, errors.Temporary,\n\t\t\terrors.New(\"container returned in running state; docker daemon likely shutting down\"))\n\t// The remaining appear to be true completions.\n\tcase code == 0:\n\t\tif err := e.install(ctx); err != nil {\n\t\t\treturn execInit, err\n\t\t}\n\tcase code == temporaryExecErrorExitCode:\n\t\te.Manifest.Result.Err = errors.Recover(errors.E(\"exec\", e.id, errors.Temporary,\n\t\t\terrors.Errorf(\"exec returned exit code %d (considered temporary)\", temporaryExecErrorExitCode)))\n\tcase e.Docker.State.OOMKilled:\n\t\te.Manifest.Result.Err = errors.Recover(errors.E(\"exec\", e.id, errors.OOM, errors.New(\"killed by OOM killer (docker)\")))\n\tcase oomSys:\n\t\te.Manifest.Result.Err = errors.Recover(errors.E(\"exec\", e.id, errors.OOM, errors.Errorf(\"killed by OOM killer: %s\", oomSysReason)))\n\tcase e.isOOMGolang(ctx):\n\t\te.Manifest.Result.Err = errors.Recover(errors.E(\"exec\", e.id, errors.OOM, errors.New(\"detected golang OOM error\")))\n\tcase code == possibleOOMExitCode:\n\t\te.Manifest.Result.Err = errors.Recover(errors.E(\"exec\", e.id, errors.OOM,\n\t\t\terrors.Errorf(\"docker returned possible OOM exit code %d\", possibleOOMExitCode)))\n\tcase oomNode:\n\t\te.Manifest.Result.Err = errors.Recover(errors.E(\"exec\", e.id, errors.OOM, oomNodeReason))\n\tdefault:\n\t\tvar hintStr string\n\t\tif n := e.stderrHints.Len(); n > 0 {\n\t\t\thintStr = hintStr + e.stderrHints.String()\n\t\t}\n\t\tif n := e.stdoutHints.Len(); n > 0 {\n\t\t\thintStr = hintStr + e.stdoutHints.String()\n\t\t}\n\t\tif len(hintStr) > 0 {\n\t\t\thintStr = fmt.Sprintf(\" (hints from exec logs):\\n%s\", hintStr)\n\t\t}\n\t\te.Manifest.Result.Err = errors.Recover(errors.E(\"exec\", e.id, errors.DockerExec, errors.Errorf(\"exited with code %d%s\", code, hintStr)))\n\t}\n\n\t// Clean up args. TODO(marius): replace these with symlinks to sha256s also?\n\tif err := os.RemoveAll(e.path(\"arg\")); err != nil {\n\t\te.Log.Errorf(\"failed to remove arg path: %v\", err)\n\t}\n\tif err := os.RemoveAll(e.path(\"tmp\")); err != nil {\n\t\te.Log.Errorf(\"failed to remove tmpdir: %v\", err)\n\t}\n\treturn execComplete, nil\n}", "func TestServiceCreateWithMultipleContainers(t *testing.T) {\n\tif test.ServingFlags.DisableOptionalAPI {\n\t\tt.Skip(\"Multiple containers support is not required by Knative Serving API Specification\")\n\t}\n\tif !test.ServingFlags.EnableBetaFeatures {\n\t\tt.Skip()\n\t}\n\tt.Parallel()\n\tclients := test.Setup(t)\n\n\tnames := test.ResourceNames{\n\t\tService: test.ObjectNameForTest(t),\n\t\tImage: test.ServingContainer,\n\t\tSidecars: []string{\n\t\t\ttest.SidecarContainer,\n\t\t},\n\t}\n\n\t// Clean up on test failure or interrupt\n\ttest.EnsureTearDown(t, clients, &names)\n\tcontainers := []corev1.Container{{\n\t\tImage: pkgtest.ImagePath(names.Image),\n\t\tPorts: []corev1.ContainerPort{{\n\t\t\tContainerPort: 8881,\n\t\t}},\n\t}, {\n\t\tImage: pkgtest.ImagePath(names.Sidecars[0]),\n\t}}\n\n\t// Setup initial Service\n\tif _, err := v1test.CreateServiceReady(t, clients, &names, func(svc *v1.Service) {\n\t\tsvc.Spec.Template.Spec.Containers = containers\n\t}); err != nil {\n\t\tt.Fatalf(\"Failed to create initial Service %v: %v\", names.Service, err)\n\t}\n\n\t// Validate State after Creation\n\tif err := validateControlPlane(t, clients, names, \"1\" /*1 is the expected generation value*/); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif err := validateDataPlane(t, clients, names, test.MultiContainerResponse); err != nil {\n\t\tt.Error(err)\n\t}\n}", "func (i *Docker) Create(assembly *global.AssemblyWithComponents, id string, instance bool, act_id string) (string, error) {\n\tlog.Info(\"%q\", assembly)\n\tpair_endpoint, perrscm := global.ParseKeyValuePair(assembly.Inputs, \"endpoint\")\n\tif perrscm != nil {\n\t\tlog.Error(\"Failed to get the endpoint value : %s\", perrscm)\n\t\treturn \"\", perrscm\n\t}\n\n\tvar endpoint string\n\tif pair_endpoint.Value == BAREMETAL {\n\n\t\t/*\n\t\t * swarm host is obtained from conf file. Swarm host is considered\n\t\t * only when the 'endpoint' is baremetal in the Component JSON\n\t\t */\n\t\tapi_host, _ := config.GetString(\"docker:swarm_host\")\n\t\tendpoint = api_host\n\n\t\tcontainerID, containerName, cerr := create(assembly, endpoint)\n\t\tif cerr != nil {\n\t\t\tlog.Error(\"container creation was failed : %s\", cerr)\n\t\t\treturn \"\", cerr\n\t\t}\n\n\t\tpair_cpu, perrscm := global.ParseKeyValuePair(assembly.Components[0].Inputs, \"cpu\")\n\t\tif perrscm != nil {\n\t\t\tlog.Error(\"Failed to get the endpoint value : %s\", perrscm)\n\t\t}\n\n\t\tpair_memory, iderr := global.ParseKeyValuePair(assembly.Components[0].Outputs, \"memory\")\n\t\tif iderr != nil {\n\t\t\tlog.Error(\"Failed to get the endpoint value : %s\", iderr)\n\t\t}\n\n\t\tserr := StartContainer(containerID, endpoint, pair_cpu.Value, pair_memory.Value)\n\t\tif serr != nil {\n\t\t\tlog.Error(\"container starting error : %s\", serr)\n\t\t\treturn \"\", serr\n\t\t}\n\n\t\tipaddress, iperr := setContainerNAL(containerID, containerName, endpoint)\n\t\tif iperr != nil {\n\t\t\tlog.Error(\"set container network was failed : %s\", iperr)\n\t\t\treturn \"\", iperr\n\t\t}\n\n\t\therr := setHostName(containerName, ipaddress)\n\t\tif herr != nil {\n\t\t\tlog.Error(\"set host name error : %s\", herr)\n\t\t}\n\n\t\tupdateContainerJSON(assembly, ipaddress, containerID, endpoint)\n\t} else {\n\t\tendpoint = pair_endpoint.Value\n\t\tcreate(assembly, endpoint)\n\t}\n\n\treturn \"\", nil\n}", "func startContainer(config *startConfig) (int, error) {\n\tconn, err := runvRequest(config.Root, config.Name, RUNV_STARTCONTAINER, config)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn containerTtySplice(config.Root, config.Name, conn, true)\n}", "func Attach(ctx context.Context, client *containerd.Client, req string, options types.ContainerAttachOptions) error {\n\t// Find the container.\n\tvar container containerd.Container\n\twalker := &containerwalker.ContainerWalker{\n\t\tClient: client,\n\t\tOnFound: func(ctx context.Context, found containerwalker.Found) error {\n\t\t\tcontainer = found.Container\n\t\t\treturn nil\n\t\t},\n\t}\n\tn, err := walker.Walk(ctx, req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error when trying to find the container: %w\", err)\n\t}\n\tif n == 0 {\n\t\treturn fmt.Errorf(\"no container is found given the string: %s\", req)\n\t} else if n > 1 {\n\t\treturn fmt.Errorf(\"more than one containers are found given the string: %s\", req)\n\t}\n\n\t// Attach to the container.\n\tvar task containerd.Task\n\tdetachC := make(chan struct{})\n\tspec, err := container.Spec(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get the OCI runtime spec for the container: %w\", err)\n\t}\n\tvar (\n\t\topt cio.Opt\n\t\tcon console.Console\n\t)\n\tif spec.Process.Terminal {\n\t\tcon = console.Current()\n\t\tdefer con.Reset()\n\t\tif err := con.SetRaw(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to set the console to raw mode: %w\", err)\n\t\t}\n\t\tcloser := func() {\n\t\t\tdetachC <- struct{}{}\n\t\t\t// task will be set by container.Task later.\n\t\t\t//\n\t\t\t// We cannot use container.Task(ctx, cio.Load) to get the IO here\n\t\t\t// because the `cancel` field of the returned `*cio` is nil. [1]\n\t\t\t//\n\t\t\t// [1] https://github.com/containerd/containerd/blob/8f756bc8c26465bd93e78d9cd42082b66f276e10/cio/io.go#L358-L359\n\t\t\tio := task.IO()\n\t\t\tif io == nil {\n\t\t\t\tlogrus.Errorf(\"got a nil io\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tio.Cancel()\n\t\t}\n\t\tin, err := consoleutil.NewDetachableStdin(con, options.DetachKeys, closer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\topt = cio.WithStreams(in, con, nil)\n\t} else {\n\t\topt = cio.WithStreams(options.Stdin, options.Stdout, options.Stderr)\n\t}\n\ttask, err = container.Task(ctx, cio.NewAttach(opt))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to attach to the container: %w\", err)\n\t}\n\tif spec.Process.Terminal {\n\t\tif err := consoleutil.HandleConsoleResize(ctx, task, con); err != nil {\n\t\t\tlogrus.WithError(err).Error(\"console resize\")\n\t\t}\n\t}\n\tsigC := signalutil.ForwardAllSignals(ctx, task)\n\tdefer signalutil.StopCatch(sigC)\n\n\t// Wait for the container to exit.\n\tstatusC, err := task.Wait(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to init an async wait for the container to exit: %w\", err)\n\t}\n\tselect {\n\t// io.Wait() would return when either 1) the user detaches from the container OR 2) the container is about to exit.\n\t//\n\t// If we replace the `select` block with io.Wait() and\n\t// directly use task.Status() to check the status of the container after io.Wait() returns,\n\t// it can still be running even though the container is about to exit (somehow especially for Windows).\n\t//\n\t// As a result, we need a separate detachC to distinguish from the 2 cases mentioned above.\n\tcase <-detachC:\n\t\tio := task.IO()\n\t\tif io == nil {\n\t\t\treturn errors.New(\"got a nil IO from the task\")\n\t\t}\n\t\tio.Wait()\n\tcase status := <-statusC:\n\t\tcode, _, err := status.Result()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif code != 0 {\n\t\t\treturn errutil.NewExitCoderErr(int(code))\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Client) WaitContainer(ctx context.Context, id string) (types.ContainerWaitOKBody, error) {\n\twaitBody, err := c.waitContainer(ctx, id)\n\tif err != nil {\n\t\treturn waitBody, convertCtrdErr(err)\n\t}\n\treturn waitBody, nil\n}", "func (runtime DockerRuntime) BuildAndRunContainer(ctx context.Context, eazy config.EazyYml, cfg config.RuntimeConfig, routableLinks *[]string, liveContainers *[]string) (string, error) {\n\n\ttar, err := archive.TarWithOptions(\"./\", &archive.TarOptions{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer tar.Close()\n\n\topt := types.ImageBuildOptions{\n\t\tDockerfile: cfg.Dockerfile,\n\t}\n\n\tresp, err := runtime.client.ImageBuild(ctx, tar, opt)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\timageID := \"\"\n\taux := func(msg jsonmessage.JSONMessage) {\n\t\tvar result types.BuildResult\n\t\tif err := json.Unmarshal(*msg.Aux, &result); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t} else {\n\t\t\timageID = result.ID\n\t\t}\n\t}\n\n\tjsonmessage.DisplayJSONMessagesStream(resp.Body, os.Stdout, os.Stdout.Fd(), true, aux)\n\n\tif err == nil {\n\t\tresp.Body.Close()\n\t} else {\n\t\treturn \"\", err\n\t}\n\n\tcontainerID, err := createContainer(ctx, eazy, runtime, imageID, cfg, *routableLinks)\n\tif err != nil {\n\t\treturn containerID, err\n\t}\n\n\t*liveContainers = append(*liveContainers, containerID)\n\n\tvar oldStateIn *term.State\n\tif cfg.Attach {\n\t\toldStateIn, _ = term.SetRawTerminal(os.Stdin.Fd())\n\t}\n\n\terr = startContainer(ctx, containerID, runtime, cfg)\n\tif err == nil {\n\t\tif cfg.IsRootImage {\n\t\t\t*routableLinks = append(*routableLinks, (containerID + \":\" + eazy.Name))\n\t\t}\n\t}\n\n\tif cfg.Attach {\n\t\tterm.RestoreTerminal(os.Stdin.Fd(), oldStateIn)\n\t}\n\n\treturn imageID, err\n\n}", "func TestContainerCreationConflict(t *testing.T) {\n\tsConfig := makeSandboxConfig(\"foo\", \"bar\", \"1\", 0)\n\tconfig := makeContainerConfig(sConfig, \"pause\", \"iamimage\", 0, map[string]string{}, map[string]string{})\n\tcontainerName := makeContainerName(sConfig, config)\n\tconst sandboxId = \"sandboxid\"\n\tconst containerId = \"containerid\"\n\tconflictError := fmt.Errorf(\"Error response from daemon: Conflict. The name \\\"/%s\\\" is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.\",\n\t\tcontainerName, containerId)\n\tnoContainerError := fmt.Errorf(\"Error response from daemon: No such container: %s\", containerId)\n\trandomError := fmt.Errorf(\"random error\")\n\n\tfor desc, test := range map[string]struct {\n\t\tcreateError error\n\t\tremoveError error\n\t\texpectError error\n\t\texpectCalls []string\n\t\texpectFields int\n\t}{\n\t\t\"no create error\": {\n\t\t\texpectCalls: []string{\"create\"},\n\t\t\texpectFields: 6,\n\t\t},\n\t\t\"random create error\": {\n\t\t\tcreateError: randomError,\n\t\t\texpectError: randomError,\n\t\t\texpectCalls: []string{\"create\"},\n\t\t},\n\t\t\"conflict create error with successful remove\": {\n\t\t\tcreateError: conflictError,\n\t\t\texpectError: conflictError,\n\t\t\texpectCalls: []string{\"create\", \"remove\"},\n\t\t},\n\t\t\"conflict create error with random remove error\": {\n\t\t\tcreateError: conflictError,\n\t\t\tremoveError: randomError,\n\t\t\texpectError: conflictError,\n\t\t\texpectCalls: []string{\"create\", \"remove\"},\n\t\t},\n\t\t\"conflict create error with no such container remove error\": {\n\t\t\tcreateError: conflictError,\n\t\t\tremoveError: noContainerError,\n\t\t\texpectCalls: []string{\"create\", \"remove\", \"create\"},\n\t\t\texpectFields: 7,\n\t\t},\n\t} {\n\t\tt.Logf(\"TestCase: %s\", desc)\n\t\tds, fDocker, _ := newTestDockerService()\n\n\t\tif test.createError != nil {\n\t\t\tfDocker.InjectError(\"create\", test.createError)\n\t\t}\n\t\tif test.removeError != nil {\n\t\t\tfDocker.InjectError(\"remove\", test.removeError)\n\t\t}\n\t\tid, err := ds.CreateContainer(sandboxId, config, sConfig)\n\t\trequire.Equal(t, test.expectError, err)\n\t\tassert.NoError(t, fDocker.AssertCalls(test.expectCalls))\n\t\tif err == nil {\n\t\t\tc, err := fDocker.InspectContainer(id)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Len(t, strings.Split(c.Name, nameDelimiter), test.expectFields)\n\t\t}\n\t}\n}", "func (sm SchedulerModel) createContainerSpec(name string, image string, env map[string]string) (containers []v1.Container) {\n\tenvVars := []v1.EnvVar{}\n\n\tfor envName, envVal := range env {\n\t\tenvVars = append(envVars, v1.EnvVar{\n\t\t\tName: envName,\n\t\t\tValue: envVal,\n\t\t})\n\t}\n\n\tcontainer := v1.Container{\n\t\tName: name,\n\t\tImage: image,\n\t\tEnv: envVars,\n\t}\n\n\treturn []v1.Container{container}\n}", "func isContainerRunning(\n docker *client.Client, \n containerId string) (isrunning bool, err error) {\n\n inspect, err := docker.ContainerInspect(context.Background(), containerId)\n if err != nil {\n return\n }\n\n isrunning = inspect.State.Running\n return \n}", "func AssertRequestedContainersAreGone() {\n\n\tcli := GetDockerClient()\n\n\tcontainers, err := getContainers(cli)\n\tif err != nil {\n\t\tlog.Panicf(\"Cannot access live containers...\\n\")\n\t}\n\n\tcontainersCount := len(containers)\n\n\tif containersCount > 0 {\n\t\tlog.Printf(\"Containers are still alive... Wanted 0 containers but got %d.\\n\", containersCount)\n\t}\n\n\t// Assert owned containers are not running\n\tfor _, container := range containers {\n\t\tlog.Panicf(\"Found container %s with state %s, status %s.\\n\", container.ID, container.State, container.Status)\n\t}\n\n\tfmt.Printf(\"\\n**** Passed RequestedContainers == 0 assertion. ****\\n\\n\")\n}", "func (s *Server) createContainerPlatform(ctx context.Context, container *oci.Container, cgroupParent string, idMappings *idtools.IDMappings) error {\n\tctx, span := log.StartSpan(ctx)\n\tdefer span.End()\n\tif idMappings != nil && !container.Spoofed() {\n\t\trootPair := idMappings.RootPair()\n\t\tfor _, path := range []string{container.BundlePath(), container.MountPoint()} {\n\t\t\tif err := makeAccessible(path, rootPair.UID, rootPair.GID, false); err != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot make %s accessible to %d:%d: %w\", path, rootPair.UID, rootPair.GID, err)\n\t\t\t}\n\t\t}\n\t\tif err := makeMountsAccessible(rootPair.UID, rootPair.GID, container.Spec().Mounts); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn s.Runtime().CreateContainer(ctx, container, cgroupParent, false)\n}", "func GenerateContainer(name, image string, isPrivileged bool, command, args []string, envVars []corev1.EnvVar, resourceReqs corev1.ResourceRequirements, ports []corev1.ContainerPort) *corev1.Container {\n\tcontainer := &corev1.Container{\n\t\tName: name,\n\t\tImage: image,\n\t\tImagePullPolicy: corev1.PullAlways,\n\t\tResources: resourceReqs,\n\t\tEnv: envVars,\n\t\tPorts: ports,\n\t\tCommand: command,\n\t\tArgs: args,\n\t}\n\n\tif isPrivileged {\n\t\tcontainer.SecurityContext = &corev1.SecurityContext{\n\t\t\tPrivileged: &isPrivileged,\n\t\t}\n\t}\n\n\treturn container\n}", "func (dr *dockerRunnerImpl) builderEnsureContainer(name, platform, dockerContext string, args ...string) error {\n\t// if no error, then we have a builder already\n\t// inspect it to make sure it is of the right type\n\tvar b bytes.Buffer\n\tif err := dr.command(nil, &b, ioutil.Discard, \"buildx\", \"inspect\", name); err != nil {\n\t\t// we did not have the named builder, so create the builder\n\t\targs = append(args, \"--name\", name)\n\t\tmsg := fmt.Sprintf(\"creating builder '%s'\", name)\n\t\tif platform != \"\" {\n\t\t\targs = append(args, \"--platform\", platform)\n\t\t\tmsg = fmt.Sprintf(\"%s for platform '%s'\", msg, platform)\n\t\t} else {\n\t\t\tmsg = fmt.Sprintf(\"%s for all supported platforms\", msg)\n\t\t}\n\t\tif dockerContext != \"\" {\n\t\t\targs = append(args, dockerContext)\n\t\t\tmsg = fmt.Sprintf(\"%s based on docker context '%s'\", msg, dockerContext)\n\t\t}\n\t\tfmt.Println(msg)\n\t\treturn dr.command(nil, ioutil.Discard, ioutil.Discard, args...)\n\t}\n\t// if we got here, we found a builder already, so let us check its type\n\tvar (\n\t\tscanner = bufio.NewScanner(&b)\n\t\tdriver string\n\t)\n\tfor scanner.Scan() {\n\t\tfields := strings.Fields(scanner.Text())\n\t\tif len(fields) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tif fields[0] != \"Driver:\" {\n\t\t\tcontinue\n\t\t}\n\t\tdriver = fields[1]\n\t\tbreak\n\t}\n\n\tswitch driver {\n\tcase \"\":\n\t\treturn fmt.Errorf(\"builder '%s' exists but has no driver type\", name)\n\tcase \"docker-container\":\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"builder '%s' exists but has wrong driver type '%s'\", name, driver)\n\t}\n}", "func (f *dockerBuildFixture) startContainer(ctx context.Context, config *container.Config) wmcontainer.ID {\n\tresp, err := f.dCli.ContainerCreate(ctx, config, nil, nil, nil, \"\")\n\tif err != nil {\n\t\tf.t.Fatalf(\"startContainer: %v\", err)\n\t}\n\tcID := resp.ID\n\n\terr = f.dCli.ContainerStart(ctx, cID, types.ContainerStartOptions{})\n\tif err != nil {\n\t\tf.t.Fatalf(\"startContainer: %v\", err)\n\t}\n\n\tresult := wmcontainer.ID(cID)\n\tf.containerIDs = append(f.containerIDs, result)\n\treturn result\n}", "func (redis *Redis) Container(f func() error) error {\n\tctx := context.Background()\n\treader, err := redis.Client.ImagePull(ctx, \"docker.io/library/redis:latest\", types.ImagePullOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer reader.Close()\n\n\t_, err = io.Copy(os.Stdout, reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := redis.Client.ContainerCreate(\n\t\tctx,\n\t\t&container.Config{\n\t\t\tImage: \"redis:latest\",\n\t\t\tTty: true,\n\t\t},\n\t\t&container.HostConfig{\n\t\t\tPortBindings: nat.PortMap{\n\t\t\t\t\"6379/tcp\": []nat.PortBinding{\n\t\t\t\t\t{\n\t\t\t\t\t\tHostIP: \"0.0.0.0\",\n\t\t\t\t\t\tHostPort: strconv.Itoa(redis.Port),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tnil,\n\t\tredis.ContainerName,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tredis.Client.ContainerStop(ctx, resp.ID, durationPointer(30*time.Second))\n\t\tredis.Client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{\n\t\t\tForce: true,\n\t\t})\n\t}()\n\n\terr = redis.Client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(\"successfully created Redis container\")\n\n\treturn f()\n}", "func (c *client) RunContainer(ctx context.Context, ctn *pipeline.Container, b *pipeline.Build) error {\n\tlogrus.Tracef(\"running container %s\", ctn.ID)\n\n\t// TODO: investigate way to move this logic\n\t//\n\t// check if the pod is already created\n\tif len(c.pod.ObjectMeta.Name) == 0 {\n\t\t// TODO: investigate way to make this cleaner\n\t\t//\n\t\t// iterate through each container in the pod\n\t\tfor _, container := range c.pod.Spec.Containers {\n\t\t\t// update the container with the volume to mount\n\t\t\tcontainer.VolumeMounts = []v1.VolumeMount{\n\t\t\t\t{\n\t\t\t\t\tName: b.ID,\n\t\t\t\t\tMountPath: \"/vela\",\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// check if other volumes were provided\n\t\t\tif len(c.volumes) > 0 {\n\t\t\t\t// iterate through all volumes provided\n\t\t\t\tfor k, v := range c.volumes {\n\t\t\t\t\t// parse the volume provided\n\t\t\t\t\t_volume := vol.Parse(v)\n\n\t\t\t\t\t// add the volume to the container\n\t\t\t\t\tcontainer.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{\n\t\t\t\t\t\tName: fmt.Sprintf(\"%s_%d\", b.ID, k),\n\t\t\t\t\t\tMountPath: _volume.Destination,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// create the object metadata for the pod\n\t\t//\n\t\t// https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1?tab=doc#ObjectMeta\n\t\tc.pod.ObjectMeta = metav1.ObjectMeta{\n\t\t\tName: b.ID,\n\t\t\tLabels: map[string]string{\"pipeline\": b.ID},\n\t\t}\n\n\t\t// create the restart policy for the pod\n\t\t//\n\t\t// https://pkg.go.dev/k8s.io/api/core/v1?tab=doc#RestartPolicy\n\t\tc.pod.Spec.RestartPolicy = v1.RestartPolicyNever\n\n\t\tlogrus.Infof(\"creating pod %s\", c.pod.ObjectMeta.Name)\n\t\t// send API call to create the pod\n\t\t//\n\t\t// https://pkg.go.dev/k8s.io/client-go/kubernetes/typed/core/v1?tab=doc#PodInterface\n\t\t_, err := c.kubernetes.CoreV1().Pods(c.namespace).Create(c.pod)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// parse image from step\n\t_image, err := image.ParseWithError(ctn.Image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set the pod container image to the parsed step image\n\tc.pod.Spec.Containers[ctn.Number-2].Image = _image\n\n\t// send API call to patch the pod with the new container image\n\t//\n\t// https://pkg.go.dev/k8s.io/client-go/kubernetes/typed/core/v1?tab=doc#PodInterface\n\t_, err = c.kubernetes.CoreV1().Pods(c.namespace).Patch(\n\t\tc.pod.ObjectMeta.Name,\n\t\ttypes.StrategicMergePatchType,\n\t\t[]byte(fmt.Sprintf(imagePatch, ctn.ID, _image)),\n\t\t\"\",\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (n *mockAgent) pauseContainer(sandbox *Sandbox, c Container) error {\n\treturn nil\n}", "func MakeElaQueueContainer(rev *v1alpha1.Revision, controllerConfig *ControllerConfig) *corev1.Container {\n\tconst elaQueueConfigVolumeName = \"queue-config\"\n\treturn &corev1.Container{\n\t\tName: queueContainerName,\n\t\tImage: controllerConfig.QueueSidecarImage,\n\t\tResources: corev1.ResourceRequirements{\n\t\t\tRequests: corev1.ResourceList{\n\t\t\t\tcorev1.ResourceName(\"cpu\"): resource.MustParse(queueContainerCPU),\n\t\t\t},\n\t\t},\n\t\tPorts: []corev1.ContainerPort{\n\t\t\t{\n\t\t\t\tName: queue.RequestQueuePortName,\n\t\t\t\tContainerPort: int32(queue.RequestQueuePort),\n\t\t\t},\n\t\t\t// Provides health checks and lifecycle hooks.\n\t\t\t{\n\t\t\t\tName: queue.RequestQueueAdminPortName,\n\t\t\t\tContainerPort: int32(queue.RequestQueueAdminPort),\n\t\t\t},\n\t\t},\n\t\t// This handler (1) marks the service as not ready and (2)\n\t\t// adds a small delay before the container is killed.\n\t\tLifecycle: &corev1.Lifecycle{\n\t\t\tPreStop: &corev1.Handler{\n\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\tPort: intstr.FromInt(queue.RequestQueueAdminPort),\n\t\t\t\t\tPath: queue.RequestQueueQuitPath,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tReadinessProbe: &corev1.Probe{\n\t\t\tHandler: corev1.Handler{\n\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\tPort: intstr.FromInt(queue.RequestQueueAdminPort),\n\t\t\t\t\tPath: queue.RequestQueueHealthPath,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// We want to mark the service as not ready as soon as the\n\t\t\t// PreStop handler is called, so we need to check a little\n\t\t\t// bit more often than the default. It is a small\n\t\t\t// sacrifice for a low rate of 503s.\n\t\t\tPeriodSeconds: 1,\n\t\t},\n\t\tArgs: []string{\n\t\t\tfmt.Sprintf(\"-concurrencyQuantumOfTime=%v\", controllerConfig.AutoscaleConcurrencyQuantumOfTime.Get()),\n\t\t},\n\t\tEnv: []corev1.EnvVar{\n\t\t\t{\n\t\t\t\tName: \"ELA_NAMESPACE\",\n\t\t\t\tValue: rev.Namespace,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"ELA_CONFIGURATION\",\n\t\t\t\tValue: controller.LookupOwningConfigurationName(rev.OwnerReferences),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"ELA_REVISION\",\n\t\t\t\tValue: rev.Name,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"ELA_AUTOSCALER\",\n\t\t\t\tValue: controller.GetRevisionAutoscalerName(rev),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"ELA_AUTOSCALER_PORT\",\n\t\t\t\tValue: strconv.Itoa(autoscalerPort),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"ELA_POD\",\n\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\tFieldRef: &corev1.ObjectFieldSelector{\n\t\t\t\t\t\tFieldPath: \"metadata.name\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"ELA_LOGGING_CONFIG\",\n\t\t\t\tValue: controllerConfig.QueueProxyLoggingConfig,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"ELA_LOGGING_LEVEL\",\n\t\t\t\tValue: controllerConfig.QueueProxyLoggingLevel,\n\t\t\t},\n\t\t},\n\t}\n}", "func (container *container) Wait() error {\r\n\terr := container.system.Wait()\r\n\tif err == nil {\r\n\t\terr = container.system.ExitError()\r\n\t}\r\n\treturn convertSystemError(err, container)\r\n}", "func spinUpTestContainer(t *testing.T, rchan chan<- *dockertest.Resource, pool *dockertest.Pool, c *Chain, dir string, wg *sync.WaitGroup, tc testChain) {\n\tdefer wg.Done()\n\tvar err error\n\t// initialize the chain\n\n\t// add extra logging if TEST_DEBUG=true\n\tvar debug bool\n\tif val, ok := os.LookupEnv(\"TEST_DEBUG\"); ok {\n\t\tdebug, err = strconv.ParseBool(val)\n\t\tif err != nil {\n\t\t\tdebug = false\n\t\t}\n\t}\n\n\tif err = c.Init(dir, tc.t.cdc, tc.t.amino, tc.t.timeout, debug); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// create the test key\n\tif err = c.createTestKey(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tdockerOpts := &dockertest.RunOptions{\n\t\tName: fmt.Sprintf(\"%s-%s\", c.ChainID, t.Name()),\n\t\tRepository: tc.t.dockerImage,\n\t\tTag: tc.t.dockerTag,\n\t\tCmd: []string{c.ChainID, c.MustGetAddress().String()},\n\t\tExposedPorts: []string{tc.t.rpcPort},\n\t\tPortBindings: map[dc.Port][]dc.PortBinding{\n\t\t\tdc.Port(tc.t.rpcPort): []dc.PortBinding{{HostPort: c.getRPCPort()}},\n\t\t},\n\t}\n\n\t// create the proper docker image with port forwarding setup\n\tvar resource *dockertest.Resource\n\tif resource, err = pool.RunWithOptions(dockerOpts); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tc.Log(fmt.Sprintf(\"- [%s] SPUN UP IN CONTAINER %s from %s\", c.ChainID, resource.Container.Name, resource.Container.Config.Image))\n\n\t// retry polling the container until status doesn't error\n\tif err = pool.Retry(c.statusErr); err != nil {\n\t\tt.Errorf(\"Could not connect to docker: %s\", err)\n\t}\n\n\tc.Log(fmt.Sprintf(\"- [%s] CONTAINER AVAILABLE AT PORT %s\", c.ChainID, c.RPCAddr))\n\n\t// initalize the lite client\n\tif err = c.forceInitLite(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\trchan <- resource\n}", "func setNewContainerLive(dockerClient *client.Client, imageName string, httpServerContainerPort int, httpServerHostPort int) (string, error) {\n\n\tcont, err := createContainer(dockerClient, imageName, httpServerContainerPort, httpServerHostPort)\n\tcontainerID, err := setContainerLive(dockerClient, cont.ID)\n\tif err != nil {\n\t\tlog.Printf(\"ContainerStart failed for the image: %s, host port: %d with error: %s\\n\", imageName, httpServerContainerPort, err)\n\t\treturn \"\", err\n\t}\n\tlog.Printf(\"Container %s with host port %d is live.\\n\", containerID, httpServerHostPort)\n\treturn containerID, err\n}", "func waitForImage(ctx context.Context, cl client.Client, timeout time.Duration, ns, key, val, container, expected string) error {\n\tpods := &corev1.PodList{}\n\terr := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := cl.List(ctx, pods, client.MatchingLabels{key: val}, client.InNamespace(ns)); err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tfor _, p := range pods.Items {\n\t\t\tfor _, c := range p.Spec.Containers {\n\t\t\t\tif c.Name == container && c.Image != expected {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func StartContainer(url string, imageName string, done chan bool, ef util.ExchangeFolder, a action.ActionID) (int, error) {\n\tenvVar := []string{}\n\tenvVar = append(envVar, util.StarterEnvVariableKey+\"=\"+url)\n\tenvVar = append(envVar, util.StarterEnvNameVariableKey+\"=\"+common.Flags.Descriptor.File)\n\tenvVar = append(envVar, util.StarterEnvLoginVariableKey+\"=\"+common.Flags.Descriptor.Login)\n\tenvVar = append(envVar, util.StarterEnvPasswordVariableKey+\"=\"+common.Flags.Descriptor.Password)\n\tenvVar = append(envVar, util.StarterVerbosityVariableKey+\"=\"+strconv.Itoa(common.Flags.Logging.VerbosityLevel()))\n\tenvVar = append(envVar, util.ActionEnvVariableSkip+\"=\"+strconv.Itoa(common.Flags.Skipping.SkippingLevel()))\n\tenvVar = append(envVar, util.ActionEnvVariableKey+\"=\"+a.String())\n\tenvVar = append(envVar, \"http_proxy=\"+common.Flags.Proxy.HTTP)\n\tenvVar = append(envVar, \"https_proxy=\"+common.Flags.Proxy.HTTPS)\n\tenvVar = append(envVar, \"no_proxy=\"+common.Flags.Proxy.Exclusions)\n\n\tcommon.Logger.Printf(common.LOG_PASSING_CONTAINER_ENVARS, envVar)\n\n\t// Check if we need to load parameters from the comand line\n\tif common.Flags.Descriptor.ParamFile != \"\" {\n\t\tcopyExtraParameters(common.Flags.Descriptor.ParamFile, ef)\n\t}\n\n\tstartedAt := time.Now().UTC()\n\tstartedAt = startedAt.Add(time.Second * -2)\n\tresp, err := client.ContainerCreate(context.Background(), &container.Config{\n\t\tImage: imageName,\n\t\tWorkingDir: util.InstallerVolume,\n\t\tEnv: envVar,\n\t}, &container.HostConfig{\n\t\tMounts: []mount.Mount{\n\t\t\t{\n\t\t\t\tType: mount.TypeBind,\n\t\t\t\tSource: ef.Location.AdaptedPath(),\n\t\t\t\tTarget: util.InstallerVolume,\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: mount.TypeBind,\n\t\t\t\tSource: \"/var/run/docker.sock\",\n\t\t\t\tTarget: \"/var/run/docker.sock\",\n\t\t\t},\n\t\t},\n\t}, nil, \"\")\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Chan used to turn off the rolling log\n\tstopLogReading := make(chan bool)\n\n\t// Rolling output of the container logs\n\tgo func(start time.Time, exit chan bool) {\n\t\tlogMap := make(map[string]string)\n\t\t// Trick to avoid tracing twice the same log line\n\t\tnotExist := func(s string) (bool, string) {\n\t\t\ttab := strings.Split(s, util.InstallerLogPrefix)\n\t\t\tif len(tab) > 1 {\n\t\t\t\tsTrim := strings.Trim(tab[1], \" \")\n\t\t\t\tif _, ok := logMap[sTrim]; ok {\n\t\t\t\t\treturn false, \"\"\n\t\t\t\t}\n\t\t\t\tlogMap[sTrim] = \"\"\n\t\t\t\treturn true, util.InstallerLogPrefix + sTrim\n\t\t\t} else {\n\t\t\t\treturn true, s\n\t\t\t}\n\t\t}\n\n\t\t// Request to get the logs content from the container\n\t\treq := func(sr string) {\n\t\t\tout, err := client.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{Since: sr, ShowStdout: true, ShowStderr: true})\n\t\t\tif err != nil {\n\t\t\t\tstopLogReading <- true\n\t\t\t}\n\t\t\ts := bufio.NewScanner(out)\n\t\t\tfor s.Scan() {\n\t\t\t\tstr := s.Text()\n\t\t\t\tif b, sTrim := notExist(str); b {\n\t\t\t\t\tidx := strings.Index(sTrim, util.FeedbackPrefix)\n\t\t\t\t\tif idx != -1 {\n\t\t\t\t\t\tfU := util.FeedbackUpdate{}\n\t\t\t\t\t\terr = json.Unmarshal([]byte(sTrim[idx+len(util.FeedbackPrefix):]), &fU)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tcommon.Logger.Println(\"Unable to parse progress update: \" + err.Error())\n\t\t\t\t\t\t} else if !common.Flags.Logging.ShouldOutputLogs() {\n\t\t\t\t\t\t\tswitch fU.Type {\n\t\t\t\t\t\t\tcase \"I\":\n\t\t\t\t\t\t\t\tcommon.CliFeedbackNotifier.Info(fU.Message)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tcase \"E\":\n\t\t\t\t\t\t\t\tcommon.CliFeedbackNotifier.Error(fU.Message)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tcase \"P\":\n\t\t\t\t\t\t\t\tcommon.CliFeedbackNotifier.ProgressG(fU.Key, fU.Goal, fU.Message)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tcase \"D\":\n\t\t\t\t\t\t\t\tcommon.CliFeedbackNotifier.Detail(fU.Message)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if common.Flags.Logging.ShouldOutputLogs() {\n\t\t\t\t\t\tfmt.Println(sTrim)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = out.Close()\n\t\t\tif err != nil {\n\t\t\t\tcommon.Logger.Println(\"Unable to close container log reader: \" + err.Error())\n\t\t\t}\n\t\t}\n\tLoop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-exit:\n\t\t\t\t// Last call to be sure to get the end of the logs content\n\t\t\t\tnow := time.Now()\n\t\t\t\tnow = now.Add(time.Second * -1)\n\t\t\t\tsinceReq := strconv.FormatInt(now.Unix(), 10)\n\t\t\t\treq(sinceReq)\n\t\t\t\tbreak Loop\n\t\t\tdefault:\n\t\t\t\t// Running call to trace the container logs every 500ms\n\t\t\t\tsinceReq := strconv.FormatInt(start.Unix(), 10)\n\t\t\t\tstart = start.Add(time.Millisecond * 500)\n\t\t\t\treq(sinceReq)\n\t\t\t\ttime.Sleep(time.Millisecond * 500)\n\t\t\t}\n\t\t}\n\t}(startedAt, stopLogReading)\n\n\tdefer func() {\n\t\tif err := LogAllFromContainer(resp.ID, ef, done); err != nil {\n\t\t\tcommon.Logger.Println(\"Unable to fetch logs from container\")\n\t\t}\n\t}()\n\n\tif err := client.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {\n\t\tcommon.CliFeedbackNotifier.Error(\"Unable to start container: %s\", err.Error())\n\t\treturn 0, err\n\t}\n\n\tstatusCh, errCh := client.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)\n\tselect {\n\tcase err := <-errCh:\n\t\tstopLogReading <- true\n\t\treturn 0, err\n\tcase status := <-statusCh:\n\t\tstopLogReading <- true\n\t\treturn int(status.StatusCode), nil\n\t}\n}", "func (m *masterService) allocateContainers() {\n\tdefMap := m.db.ListDefinitions()\n\tcontMap := m.db.ListContainers()\n\tnodeMap := m.db.ListNodes()\n\n\t//\n\t// definition -> container map\n\t//\n\tdefContMapList := make(map[string][]*model.Container)\n\tfor _, cont := range contMap {\n\t\tconts := defContMapList[cont.DefinitionName]\n\t\tconts = append(conts, cont)\n\t\tdefContMapList[cont.DefinitionName] = conts\n\t}\n\n\t//\n\t// node -> container map\n\t//\n\tnodeContMap := make(map[string][]*model.Container)\n\tfor nodeName := range nodeMap {\n\t\tconts := make([]*model.Container, 0)\n\t\tfor _, cont := range contMap {\n\t\t\tif cont.NodeName == nodeName {\n\t\t\t\tconts = append(conts, cont)\n\t\t\t}\n\t\t}\n\t\tnodeContMap[nodeName] = conts\n\t}\n\n\t//\n\t// todo\n\t//\n\tfor k, def := range defMap {\n\t\tconts := defContMapList[k]\n\t\tn := len(conts)\n\t\tif def.Count < n {\n\t\t\t// deallocate some containers for definition\n\t\t\tdiff := n - def.Count\n\t\t\tlog.Info(\"Adjusting container count (%d delta)\", diff)\n\t\t\tfor i := 0; i < diff; i++ {\n\t\t\t\tidx := rand.Intn(len(conts))\n\t\t\t\tcont := conts[idx]\n\t\t\t\tconts = append(conts[:idx], conts[idx+1:]...)\n\t\t\t\tlog.Info(\"Deleting container id %s/%s\", cont.ContainerID, cont.Name)\n\t\t\t\tm.db.DeleteContainer(cont.ContainerID)\n\t\t\t}\n\t\t} else if def.Count > n {\n\t\t\t// allocate more containers for definition\n\t\t\tdiff := def.Count - n\n\t\t\tlog.Info(\"Adjusting container count (%d delta)\", diff)\n\t\t\tfor i := 0; i < diff; i++ {\n\t\t\t\tc := &model.Container{}\n\t\t\t\tc.Name = fmt.Sprintf(\"%s-%d\", def.Name, m.db.NextAutoIncrement(\"inc.container\", def.Name))\n\t\t\t\tc.DefinitionName = def.Name\n\n\t\t\t\t//\n\t\t\t\t// find node with least numbers of containers\n\t\t\t\t//\n\t\t\t\tcurrentN := 999999999\n\t\t\t\tvar currentNodeName string\n\t\t\t\tlog.Debug(\"nodeContMap: %s\", nodeContMap)\n\t\t\t\tfor nodeName, contSlice := range nodeContMap {\n\t\t\t\t\tn := len(contSlice)\n\t\t\t\t\tlog.Debug(\"checking node for number of containers (%d) less than %d\", n, currentN)\n\t\t\t\t\tif currentN > n {\n\t\t\t\t\t\tcurrentNodeName = nodeName\n\t\t\t\t\t\tcurrentN = n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\tif currentNodeName == \"\" {\n\t\t\t\t\tlog.Warn(\"Not able to create node %s...no nodes available!\", c.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tc.NodeName = currentNodeName\n\n\t\t\t\t//\n\t\t\t\tc.Image = def.Image\n\t\t\t\tc.Running = false\n\t\t\t\tc.HTTPPort = def.HTTPPort\n\t\t\t\t// generate a mapping nodeHttpPort -> httpPort\n\t\t\t\tif c.HTTPPort > 0 {\n\t\t\t\t\tc.NodeHTTPPort = minHTTPPort + m.db.NextAutoIncrement(\"http.port\", \"http.port\")\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Creating container id %s/%s\", c.ContainerID, c.Name)\n\t\t\t\tif err := m.db.SaveContainer(c); err != nil {\n\t\t\t\t\tlog.Error(\"Error saving container %s\", c.Name)\n\t\t\t\t} else {\n\t\t\t\t\tnodeContMap[c.NodeName] = append(nodeContMap[c.NodeName], c)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (f FakeContainerImpl) ContainerExists(containerID string) bool {\n\treturn true\n}", "func TestUnitToContainer(t *testing.T) {\n\tuser := int64(rand.Intn(65536))\n\tgroup := int64(rand.Intn(65536))\n\ttestCases := []struct {\n\t\tunit api.Unit\n\t}{\n\t\t{\n\t\t\tunit: api.Unit{\n\t\t\t\tName: rand.String(16),\n\t\t\t\tImage: fmt.Sprintf(\"elotl/%s:latest\", rand.String(8)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunit: api.Unit{\n\t\t\t\tName: rand.String(16),\n\t\t\t\tImage: fmt.Sprintf(\"elotl/%s:latest\", rand.String(8)),\n\t\t\t\tCommand: []string{\n\t\t\t\t\t\"/bin/bash\",\n\t\t\t\t},\n\t\t\t\tArgs: []string{\n\t\t\t\t\t\"-c\",\n\t\t\t\t\t\"sleep 1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunit: api.Unit{\n\t\t\t\tName: rand.String(16),\n\t\t\t\tImage: fmt.Sprintf(\"elotl/%s:latest\", rand.String(8)),\n\t\t\t\tEnv: []api.EnvVar{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"env1\",\n\t\t\t\t\t\tValue: \"value1\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\tValue: \"bar\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunit: api.Unit{\n\t\t\t\tName: rand.String(16),\n\t\t\t\tImage: fmt.Sprintf(\"elotl/%s:latest\", rand.String(8)),\n\t\t\t\tVolumeMounts: []api.VolumeMount{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"myvolume\",\n\t\t\t\t\t\tMountPath: \"/my/path\",\n\t\t\t\t\t\tSubPath: \"\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"foo-volume\",\n\t\t\t\t\t\tMountPath: \"/var/run/bar\",\n\t\t\t\t\t\tSubPath: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunit: api.Unit{\n\t\t\t\tName: rand.String(16),\n\t\t\t\tImage: fmt.Sprintf(\"elotl/%s:latest\", rand.String(8)),\n\t\t\t\tPorts: []api.ContainerPort{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"my-tcp-port\",\n\t\t\t\t\t\tProtocol: api.ProtocolTCP,\n\t\t\t\t\t\tContainerPort: 80,\n\t\t\t\t\t\tHostPort: 8880,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"my-udp-port\",\n\t\t\t\t\t\tProtocol: api.ProtocolUDP,\n\t\t\t\t\t\tContainerPort: 53,\n\t\t\t\t\t\tHostPort: 5353,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunit: api.Unit{\n\t\t\t\tName: rand.String(16),\n\t\t\t\tImage: fmt.Sprintf(\"elotl/%s:latest\", rand.String(8)),\n\t\t\t\tWorkingDir: \"/home/nobody\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunit: api.Unit{\n\t\t\t\tName: rand.String(16),\n\t\t\t\tImage: fmt.Sprintf(\"elotl/%s:latest\", rand.String(8)),\n\t\t\t\tSecurityContext: &api.SecurityContext{\n\t\t\t\t\tRunAsUser: &user,\n\t\t\t\t\tRunAsGroup: &group,\n\t\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\t\tAdd: []string{\n\t\t\t\t\t\t\t\"add-cap-1\",\n\t\t\t\t\t\t\t\"add-cap-2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDrop: []string{\n\t\t\t\t\t\t\t\"drop-cap-1\",\n\t\t\t\t\t\t\t\"drop-cap-2\",\n\t\t\t\t\t\t\t\"drop-cap-3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tcontainer := unitToContainer(tc.unit, nil)\n\t\tassert.Equal(t, tc.unit.Name, container.Name)\n\t\tassert.Equal(t, tc.unit.Image, container.Image)\n\t\tassert.Equal(t, tc.unit.WorkingDir, container.WorkingDir)\n\t\tassert.ElementsMatch(t, tc.unit.Command, container.Command)\n\t\tassert.ElementsMatch(t, tc.unit.Args, container.Args)\n\t\tassert.Equal(t, len(tc.unit.Env), len(container.Env))\n\t\tfor _, e := range tc.unit.Env {\n\t\t\tenv := v1.EnvVar{\n\t\t\t\tName: e.Name,\n\t\t\t\tValue: e.Value,\n\t\t\t}\n\t\t\tassert.Contains(t, container.Env, env)\n\t\t}\n\t\tassert.Equal(t, len(tc.unit.VolumeMounts), len(container.VolumeMounts))\n\t\tfor _, v := range tc.unit.VolumeMounts {\n\t\t\tvol := v1.VolumeMount{\n\t\t\t\tName: v.Name,\n\t\t\t\tMountPath: v.MountPath,\n\t\t\t}\n\t\t\tassert.Contains(t, container.VolumeMounts, vol)\n\t\t}\n\t\tfor _, p := range tc.unit.Ports {\n\t\t\tport := v1.ContainerPort{\n\t\t\t\tName: p.Name,\n\t\t\t\tProtocol: v1.Protocol(string(p.Protocol)),\n\t\t\t\tHostPort: int32(p.HostPort),\n\t\t\t\tContainerPort: int32(p.ContainerPort),\n\t\t\t}\n\t\t\tassert.Contains(t, container.Ports, port)\n\t\t}\n\t\tif tc.unit.SecurityContext != nil {\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\ttc.unit.SecurityContext.RunAsUser,\n\t\t\t\tcontainer.SecurityContext.RunAsUser)\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\ttc.unit.SecurityContext.RunAsGroup,\n\t\t\t\tcontainer.SecurityContext.RunAsGroup)\n\t\t\tif tc.unit.SecurityContext.Capabilities != nil {\n\t\t\t\tassert.NotNil(t, container.SecurityContext.Capabilities)\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt,\n\t\t\t\t\tlen(tc.unit.SecurityContext.Capabilities.Add),\n\t\t\t\t\tlen(container.SecurityContext.Capabilities.Add))\n\t\t\t\tfor _, a := range tc.unit.SecurityContext.Capabilities.Add {\n\t\t\t\t\tassert.Contains(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tcontainer.SecurityContext.Capabilities.Add,\n\t\t\t\t\t\tv1.Capability(a))\n\t\t\t\t}\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt,\n\t\t\t\t\tlen(tc.unit.SecurityContext.Capabilities.Drop),\n\t\t\t\t\tlen(container.SecurityContext.Capabilities.Drop))\n\t\t\t\tfor _, d := range tc.unit.SecurityContext.Capabilities.Drop {\n\t\t\t\t\tassert.Contains(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tcontainer.SecurityContext.Capabilities.Drop,\n\t\t\t\t\t\tv1.Capability(d))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tunit := containerToUnit(container)\n\t\tremoveVolumeMount(&unit, resolvconfVolumeName)\n\t\tremoveVolumeMount(&unit, etchostsVolumeName)\n\t\tassert.Equal(t, tc.unit, unit)\n\t}\n}", "func (s *DockerSandbox) MakeReady() (err error) {\n\t// make sure image is pulled\n\timgExists, err := s.mgr.DockerImageExists(s.name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !imgExists {\n\t\tif err := s.mgr.dockerPull(s.name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// make sure container is created\n\tcontExists, err := s.mgr.dockerContainerExists(s.name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !contExists {\n\t\tif _, err := s.mgr.dockerCreate(s.name, []string{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Service) Create(ctx context.Context, img string, opts ...CreateOption) (*Container, error) {\n\tc := CreateConfig{\n\t\tSpec: Spec{\n\t\t\tConfig: containerapi.Config{Image: img},\n\t\t},\n\t}\n\tfor _, o := range opts {\n\t\to(&c)\n\t}\n\n\twithName := func(req *http.Request) error { return nil }\n\tif c.Name != \"\" {\n\t\twithName = func(req *http.Request) error {\n\t\t\tq := req.URL.Query()\n\t\t\tif q == nil {\n\t\t\t\tq = url.Values{}\n\t\t\t}\n\n\t\t\tq.Set(\"name\", c.Name)\n\t\t\treq.URL.RawQuery = q.Encode()\n\t\t\treturn nil\n\t\t}\n\t}\n\n\twithPlatform := func(req *http.Request) error {\n\t\tq := req.URL.Query()\n\t\tq.Set(\"platform\", c.Platform)\n\t\treq.URL.RawQuery = q.Encode()\n\t\treturn nil\n\t}\n\n\tresp, err := httputil.DoRequest(ctx, func(ctx context.Context) (*http.Response, error) {\n\t\treturn s.tr.Do(ctx, http.MethodPost, version.Join(ctx, \"/containers/create\"), httputil.WithJSONBody(c.Spec), withName, withPlatform)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, errdefs.Wrap(err, \"error reading response body\")\n\t}\n\n\tvar cc containerCreateResponse\n\tif err := json.Unmarshal(data, &cc); err != nil {\n\t\treturn nil, errdefs.Wrap(err, \"error decoding container create response\")\n\t}\n\n\tif cc.ID == \"\" {\n\t\treturn nil, fmt.Errorf(\"empty ID in response: %v\", string(data))\n\t}\n\treturn &Container{id: cc.ID, tr: s.tr}, nil\n}", "func (c *Client) ProbeContainer(ctx context.Context, id string, timeout time.Duration) *Message {\n\tch := c.watch.notify(id)\n\n\tif timeout <= 0 {\n\t\tmsg := <-ch\n\t\tch <- msg // put it back, make sure the method can be called repeatedly.\n\n\t\treturn msg\n\t}\n\tselect {\n\tcase msg := <-ch:\n\t\tch <- msg // put it back, make sure the method can be called repeatedly.\n\t\treturn msg\n\tcase <-time.After(timeout):\n\t\treturn &Message{err: errtypes.ErrTimeout}\n\tcase <-ctx.Done():\n\t\treturn &Message{err: ctx.Err()}\n\t}\n}", "func (d *dockerWaiter) Wait(ctx context.Context, containerID string) error {\n\treturn d.retryWait(ctx, containerID, nil)\n}", "func NewContainer() Container {\n\treturn Container{}\n}", "func (c *client) SetupContainer(ctx context.Context, ctn *pipeline.Container) error {\n\tlogrus.Tracef(\"setting up for container %s\", ctn.ID)\n\n\t// create the container object for the pod\n\t//\n\t// https://pkg.go.dev/k8s.io/api/core/v1?tab=doc#Container\n\tcontainer := v1.Container{\n\t\tName: ctn.ID,\n\t\t// create the container with the kubernetes/pause image\n\t\t//\n\t\t// This is done due to the nature of how containers are\n\t\t// executed inside the pod. Kubernetes will attempt to\n\t\t// start and run all containers in the pod at once. We\n\t\t// want to control the execution of the containers\n\t\t// inside the pod so we use the pause image as the\n\t\t// default for containers, and then sequentially patch\n\t\t// the containers with the proper image.\n\t\t//\n\t\t// https://hub.docker.com/r/kubernetes/pause\n\t\tImage: image.Parse(\"kubernetes/pause:latest\"),\n\t\tEnv: []v1.EnvVar{},\n\t\tStdin: false,\n\t\tStdinOnce: false,\n\t\tTTY: false,\n\t\tWorkingDir: ctn.Directory,\n\t\tImagePullPolicy: v1.PullAlways,\n\t}\n\n\t// check if the environment is provided\n\tif len(ctn.Environment) > 0 {\n\t\t// iterate through each element in the container environment\n\t\tfor k, v := range ctn.Environment {\n\t\t\t// add key/value environment to container config\n\t\t\tcontainer.Env = append(container.Env, v1.EnvVar{Name: k, Value: v})\n\t\t}\n\t}\n\n\t// check if the entrypoint is provided\n\tif len(ctn.Entrypoint) > 0 {\n\t\t// add entrypoint to container config\n\t\tcontainer.Args = ctn.Entrypoint\n\t}\n\n\t// check if the commands are provided\n\tif len(ctn.Commands) > 0 {\n\t\t// add commands to container config\n\t\tcontainer.Args = append(container.Args, ctn.Commands...)\n\t}\n\n\t// add the container definition to the pod spec\n\t//\n\t// https://pkg.go.dev/k8s.io/api/core/v1?tab=doc#PodSpec\n\tc.pod.Spec.Containers = append(c.pod.Spec.Containers, container)\n\n\treturn nil\n}", "func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGenerator, clone bool, c *libpod.Container) (*specs.Spec, *specgen.SpecGenerator, []libpod.CtrCreateOption, error) {\n\trtc, err := rt.GetConfigNoCopy()\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\trlimits, err := specgenutil.GenRlimits(rtc.Ulimits())\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\ts.Rlimits = append(rlimits, s.Rlimits...)\n\n\tif s.OOMScoreAdj == nil {\n\t\ts.OOMScoreAdj = rtc.Containers.OOMScoreAdj\n\t}\n\n\tif len(rtc.Containers.CgroupConf) > 0 {\n\t\tif s.ResourceLimits == nil {\n\t\t\ts.ResourceLimits = &specs.LinuxResources{}\n\t\t}\n\t\tif s.ResourceLimits.Unified == nil {\n\t\t\ts.ResourceLimits.Unified = make(map[string]string)\n\t\t}\n\t\tfor _, cgroupConf := range rtc.Containers.CgroupConf {\n\t\t\tcgr := strings.SplitN(cgroupConf, \"=\", 2)\n\t\t\tif len(cgr) != 2 {\n\t\t\t\treturn nil, nil, nil, fmt.Errorf(\"CgroupConf %q from containers.conf invalid, must be name=value\", cgr)\n\t\t\t}\n\t\t\tif _, ok := s.ResourceLimits.Unified[cgr[0]]; !ok {\n\t\t\t\ts.ResourceLimits.Unified[cgr[0]] = cgr[1]\n\t\t\t}\n\t\t}\n\t}\n\n\t// If joining a pod, retrieve the pod for use, and its infra container\n\tvar pod *libpod.Pod\n\tvar infra *libpod.Container\n\tif s.Pod != \"\" {\n\t\tpod, err = rt.LookupPod(s.Pod)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"retrieving pod %s: %w\", s.Pod, err)\n\t\t}\n\t\tif pod.HasInfraContainer() {\n\t\t\tinfra, err = pod.InfraContainer()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\toptions := []libpod.CtrCreateOption{}\n\tcompatibleOptions := &libpod.InfraInherit{}\n\tvar infraSpec *specs.Spec\n\tif infra != nil {\n\t\toptions, infraSpec, compatibleOptions, err = Inherit(infra, s, rt)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t}\n\n\tif err := specgen.FinishThrottleDevices(s); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\t// Set defaults for unset namespaces\n\tif s.PidNS.IsDefault() {\n\t\tdefaultNS, err := GetDefaultNamespaceMode(\"pid\", rtc, pod)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\ts.PidNS = defaultNS\n\t}\n\tif s.IpcNS.IsDefault() {\n\t\tdefaultNS, err := GetDefaultNamespaceMode(\"ipc\", rtc, pod)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\ts.IpcNS = defaultNS\n\t}\n\tif s.UtsNS.IsDefault() {\n\t\tdefaultNS, err := GetDefaultNamespaceMode(\"uts\", rtc, pod)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\ts.UtsNS = defaultNS\n\t}\n\tif s.UserNS.IsDefault() {\n\t\tdefaultNS, err := GetDefaultNamespaceMode(\"user\", rtc, pod)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\ts.UserNS = defaultNS\n\t\tvalue := string(s.UserNS.NSMode)\n\t\tif s.UserNS.Value != \"\" {\n\t\t\tvalue = value + \":\" + s.UserNS.Value\n\t\t}\n\t\tmappings, err := util.ParseIDMapping(namespaces.UsernsMode(value), nil, nil, \"\", \"\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\ts.IDMappings = mappings\n\t}\n\tif s.NetNS.IsDefault() {\n\t\tdefaultNS, err := GetDefaultNamespaceMode(\"net\", rtc, pod)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\ts.NetNS = defaultNS\n\t}\n\tif s.CgroupNS.IsDefault() {\n\t\tdefaultNS, err := GetDefaultNamespaceMode(\"cgroup\", rtc, pod)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\ts.CgroupNS = defaultNS\n\t}\n\n\tif s.ContainerCreateCommand != nil {\n\t\toptions = append(options, libpod.WithCreateCommand(s.ContainerCreateCommand))\n\t}\n\n\tif s.Rootfs != \"\" {\n\t\toptions = append(options, libpod.WithRootFS(s.Rootfs, s.RootfsOverlay, s.RootfsMapping))\n\t}\n\n\tnewImage, resolvedImageName, imageData, err := getImageFromSpec(ctx, rt, s)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tif imageData != nil {\n\t\tociRuntimeVariant := rtc.Engine.ImagePlatformToRuntime(imageData.Os, imageData.Architecture)\n\t\t// Don't unnecessarily set and invoke additional libpod\n\t\t// option if OCI runtime is still default.\n\t\tif ociRuntimeVariant != rtc.Engine.OCIRuntime {\n\t\t\toptions = append(options, libpod.WithCtrOCIRuntime(ociRuntimeVariant))\n\t\t}\n\t}\n\n\tif newImage != nil {\n\t\t// If the input name changed, we could properly resolve the\n\t\t// image. Otherwise, it must have been an ID where we're\n\t\t// defaulting to the first name or an empty one if no names are\n\t\t// present.\n\t\tif strings.HasPrefix(newImage.ID(), resolvedImageName) {\n\t\t\tnames := newImage.Names()\n\t\t\tif len(names) > 0 {\n\t\t\t\tresolvedImageName = names[0]\n\t\t\t}\n\t\t}\n\n\t\toptions = append(options, libpod.WithRootFSFromImage(newImage.ID(), resolvedImageName, s.RawImageName))\n\t}\n\n\t_, err = rt.LookupPod(s.Hostname)\n\tif len(s.Hostname) > 0 && !s.UtsNS.IsPrivate() && err == nil {\n\t\t// ok, we are incorrectly setting the pod as the hostname, let's undo that before validation\n\t\ts.Hostname = \"\"\n\t}\n\n\t// Set defaults if network info is not provided\n\tif s.NetNS.IsPrivate() || s.NetNS.IsDefault() {\n\t\tif rootless.IsRootless() {\n\t\t\t// when we are rootless we default to default_rootless_network_cmd from containers.conf\n\t\t\tconf, err := rt.GetConfigNoCopy()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\t\t\tswitch conf.Network.DefaultRootlessNetworkCmd {\n\t\t\tcase slirp4netns.BinaryName, \"\":\n\t\t\t\ts.NetNS.NSMode = specgen.Slirp\n\t\t\tcase pasta.BinaryName:\n\t\t\t\ts.NetNS.NSMode = specgen.Pasta\n\t\t\tdefault:\n\t\t\t\treturn nil, nil, nil, fmt.Errorf(\"invalid default_rootless_network_cmd option %q\",\n\t\t\t\t\tconf.Network.DefaultRootlessNetworkCmd)\n\t\t\t}\n\t\t} else {\n\t\t\t// as root default to bridge\n\t\t\ts.NetNS.NSMode = specgen.Bridge\n\t\t}\n\t}\n\n\tif err := s.Validate(); err != nil {\n\t\treturn nil, nil, nil, fmt.Errorf(\"invalid config provided: %w\", err)\n\t}\n\n\tfinalMounts, finalVolumes, finalOverlays, err := finalizeMounts(ctx, s, rt, rtc, newImage)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tif len(s.HostUsers) > 0 {\n\t\toptions = append(options, libpod.WithHostUsers(s.HostUsers))\n\t}\n\n\tcommand, err := makeCommand(s, imageData, rtc)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tinfraVol := (len(compatibleOptions.Mounts) > 0 || len(compatibleOptions.Volumes) > 0 || len(compatibleOptions.ImageVolumes) > 0 || len(compatibleOptions.OverlayVolumes) > 0)\n\topts, err := createContainerOptions(rt, s, pod, finalVolumes, finalOverlays, imageData, command, infraVol, *compatibleOptions)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\toptions = append(options, opts...)\n\n\tif containerType := s.InitContainerType; len(containerType) > 0 {\n\t\toptions = append(options, libpod.WithInitCtrType(containerType))\n\t}\n\tif len(s.Name) > 0 {\n\t\tlogrus.Debugf(\"setting container name %s\", s.Name)\n\t\toptions = append(options, libpod.WithName(s.Name))\n\t}\n\tif len(s.Devices) > 0 {\n\t\topts = ExtractCDIDevices(s)\n\t\toptions = append(options, opts...)\n\t}\n\truntimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts, pod, command, compatibleOptions)\n\tif clone { // the container fails to start if cloned due to missing Linux spec entries\n\t\tif c == nil {\n\t\t\treturn nil, nil, nil, errors.New(\"the given container could not be retrieved\")\n\t\t}\n\t\tconf := c.Config()\n\t\tif conf != nil && conf.Spec != nil && conf.Spec.Linux != nil {\n\t\t\tout, err := json.Marshal(conf.Spec.Linux)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\t\t\tresources := runtimeSpec.Linux.Resources\n\n\t\t\t// resources get overwritten similarly to pod inheritance, manually assign here if there is a new value\n\t\t\tmarshalRes, err := json.Marshal(resources)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\n\t\t\terr = json.Unmarshal(out, runtimeSpec.Linux)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\n\t\t\terr = json.Unmarshal(marshalRes, runtimeSpec.Linux.Resources)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\t\t}\n\t\tif s.ResourceLimits != nil {\n\t\t\tswitch {\n\t\t\tcase s.ResourceLimits.CPU != nil:\n\t\t\t\truntimeSpec.Linux.Resources.CPU = s.ResourceLimits.CPU\n\t\t\tcase s.ResourceLimits.Memory != nil:\n\t\t\t\truntimeSpec.Linux.Resources.Memory = s.ResourceLimits.Memory\n\t\t\tcase s.ResourceLimits.BlockIO != nil:\n\t\t\t\truntimeSpec.Linux.Resources.BlockIO = s.ResourceLimits.BlockIO\n\t\t\tcase s.ResourceLimits.Devices != nil:\n\t\t\t\truntimeSpec.Linux.Resources.Devices = s.ResourceLimits.Devices\n\t\t\t}\n\t\t}\n\t}\n\tif len(s.HostDeviceList) > 0 {\n\t\toptions = append(options, libpod.WithHostDevice(s.HostDeviceList))\n\t}\n\tif infraSpec != nil && infraSpec.Linux != nil { // if we are inheriting Linux info from a pod...\n\t\t// Pass Security annotations\n\t\tif len(infraSpec.Annotations[define.InspectAnnotationLabel]) > 0 && len(runtimeSpec.Annotations[define.InspectAnnotationLabel]) == 0 {\n\t\t\truntimeSpec.Annotations[define.InspectAnnotationLabel] = infraSpec.Annotations[define.InspectAnnotationLabel]\n\t\t}\n\t\tif len(infraSpec.Annotations[define.InspectAnnotationSeccomp]) > 0 && len(runtimeSpec.Annotations[define.InspectAnnotationSeccomp]) == 0 {\n\t\t\truntimeSpec.Annotations[define.InspectAnnotationSeccomp] = infraSpec.Annotations[define.InspectAnnotationSeccomp]\n\t\t}\n\t\tif len(infraSpec.Annotations[define.InspectAnnotationApparmor]) > 0 && len(runtimeSpec.Annotations[define.InspectAnnotationApparmor]) == 0 {\n\t\t\truntimeSpec.Annotations[define.InspectAnnotationApparmor] = infraSpec.Annotations[define.InspectAnnotationApparmor]\n\t\t}\n\t}\n\treturn runtimeSpec, s, options, err\n}", "func CreateContainer(ctx context.Context, client *docker.Client, pullOutput io.Writer, containerDef *ContainerDefinition) (containerID string, err error) {\n\tif containerDef.Image == \"\" {\n\t\treturn \"\", fmt.Errorf(\"create container: image not specified\")\n\t}\n\tif len(containerDef.Argv) > 0 && containerDef.Command != \"\" {\n\t\treturn \"\", fmt.Errorf(\"create container: both Argv and Command specified\")\n\t}\n\n\tcontainerName := containerDef.containerName()\n\tlog.Debugf(ctx, \"Creating container '%s'\", containerName)\n\n\tif err := PullImageIfNotHere(ctx, client, pullOutput, containerDef, docker.AuthConfiguration{}); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar ports []string\n\tbindings := make(map[docker.Port][]docker.PortBinding)\n\texposedPorts := make(map[docker.Port]struct{})\n\n\tif len(containerDef.Ports) > 0 {\n\t\tlog.Debugf(ctx, \"Will map the following ports:\")\n\n\t\tfor _, portSpec := range containerDef.Ports {\n\t\t\tparts := strings.Split(portSpec, \":\")\n\t\t\tif len(parts) != 2 {\n\t\t\t\treturn \"\", fmt.Errorf(\"create container %s: format of port must be HOSTPORT:CONTAINERPORT, but was %s\", containerName, portSpec)\n\t\t\t}\n\t\t\texternalPort := parts[0]\n\t\t\tinternalPort := parts[1]\n\n\t\t\tprotocol := \"tcp\"\n\t\t\tif protoParts := strings.Split(internalPort, \"/\"); len(protoParts) == 2 {\n\t\t\t\tprotocol = protoParts[1]\n\t\t\t\tinternalPort = protoParts[0]\n\t\t\t}\n\n\t\t\tlog.Debugf(ctx, \" * %s -> %s/%s in container\", externalPort, internalPort, protocol)\n\t\t\tportKey := docker.Port(internalPort + \"/\" + protocol)\n\t\t\tports = append(ports, string(portKey))\n\t\t\tbindings[portKey] = append(bindings[portKey], docker.PortBinding{\n\t\t\t\tHostIP: \"0.0.0.0\",\n\t\t\t\tHostPort: externalPort,\n\t\t\t})\n\t\t\texposedPorts[portKey] = struct{}{}\n\t\t}\n\t}\n\n\tif containerDef.HealthCheckPort != 0 {\n\t\texposedPorts[dockerTCPPort(containerDef.HealthCheckPort)] = struct{}{}\n\t\tdockerNetworkExists, err := hostHasDockerNetwork()\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"create container %s: checking for docker0: %w\", containerName, err)\n\t\t}\n\t\tif !dockerNetworkExists {\n\t\t\t// Need to map to localhost port if Docker isn't doing it for us.\n\t\t\tlog.Infof(ctx, \"Port wait check on port %d; finding free local port...\", containerDef.HealthCheckPort)\n\t\t\tlocalPort, err := findFreePort()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"create container %s: find free TCP port to forward from: %w\", containerName, err)\n\t\t\t}\n\t\t\tlog.Infof(ctx, \"Mapping %d locally to %d in the container.\", localPort, containerDef.HealthCheckPort)\n\n\t\t\tportKey := dockerTCPPort(containerDef.HealthCheckPort)\n\t\t\tports = append(ports, string(portKey))\n\t\t\tbindings[portKey] = append(bindings[portKey], docker.PortBinding{\n\t\t\t\tHostIP: \"127.0.0.1\",\n\t\t\t\tHostPort: strconv.Itoa(localPort),\n\t\t\t})\n\t\t}\n\t}\n\n\thostConfig := docker.HostConfig{\n\t\tMounts: containerDef.Mounts,\n\t\tPortBindings: bindings,\n\t\tPrivileged: containerDef.Privileged,\n\t}\n\n\tconfig := docker.Config{\n\t\tEnv: containerDef.Environment,\n\t\tAttachStdout: false,\n\t\tAttachStdin: false,\n\t\tImage: containerDef.Image,\n\t\tPortSpecs: ports,\n\t\tExposedPorts: exposedPorts,\n\t}\n\n\tif len(containerDef.Argv) > 0 {\n\t\tconfig.Cmd = containerDef.Argv\n\t} else if len(containerDef.Command) > 0 {\n\t\tcmd := containerDef.Command\n\t\tlog.Debugf(ctx, \"Will run %s in the container\", cmd)\n\t\tcmdParts := strings.Split(cmd, \" \")\n\t\tconfig.Cmd = cmdParts\n\t}\n\n\tcontainer, err := client.CreateContainer(docker.CreateContainerOptions{\n\t\tContext: ctx,\n\t\tName: containerName,\n\t\tConfig: &config,\n\t\tHostConfig: &hostConfig,\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"create container %s: %v\", containerName, err)\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\trmErr := client.RemoveContainer(docker.RemoveContainerOptions{\n\t\t\t\tContext: xcontext.IgnoreDeadline(ctx),\n\t\t\t\tID: container.ID,\n\t\t\t\tRemoveVolumes: true,\n\t\t\t})\n\t\t\tif rmErr != nil {\n\t\t\t\tlog.Infof(ctx, \"unable to remove container: %v\", rmErr)\n\t\t\t}\n\t\t}\n\t}()\n\n\tlog.Debugf(ctx, \"Created container ID: %s\", container.ID)\n\treturn container.ID, nil\n}", "func CreateBusyboxContainer(c *check.C, cname string, cmd ...string) (*http.Response, error) {\n\tq := url.Values{}\n\tq.Add(\"name\", cname)\n\n\tobj := map[string]interface{}{\n\t\t\"Image\": busyboxImage,\n\t\t\"Cmd\": cmd,\n\t\t\"HostConfig\": map[string]interface{}{},\n\t}\n\n\tpath := \"/containers/create\"\n\tquery := request.WithQuery(q)\n\tbody := request.WithJSONBody(obj)\n\treturn request.Post(path, query, body)\n}", "func CreateContainer(server lxd.ContainerServer, container api.ContainersPost) error {\n\top, err := server.CreateContainer(container)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn op.Wait()\n}", "func CreateContainer(server lxd.ContainerServer, container api.ContainersPost) error {\n\top, err := server.CreateContainer(container)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn op.Wait()\n}", "func (c *TestClient) processPodCreationEvents() {\n\twait.Until(c.processNextPodCreationEvent, 0, c.informerStopChan)\n\tklog.Infof(\"processPodCreationEvents() goroutine quitting\")\n}", "func (c *MockDockerClient) ContainerExecCreate(ctx context.Context, containerID string, cmd []string, attachStdout bool, attachStderr bool) (dockertypes.IDResponse, error) {\n\tif c.ContainerExecCreateFn != nil {\n\t\tfmt.Println(\"[MockDockerClient] In \", utils.CurrentFunctionName())\n\t\tfmt.Println(\"[MockDockerClient] - ctx: \", ctx)\n\t\tfmt.Println(\"[MockDockerClient] - containerID: \", containerID)\n\t\tfmt.Println(\"[MockDockerClient] - cmd: \", cmd)\n\t\tfmt.Println(\"[MockDockerClient] - attachStdout: \", attachStdout)\n\t\tfmt.Println(\"[MockDockerClient] - attachStderr: \", attachStderr)\n\t\treturn c.ContainerExecCreateFn(ctx, containerID, cmd, attachStdout, attachStderr)\n\t}\n\tpanic(fmt.Sprintf(\"No function defined for: %s\", utils.CurrentFunctionName()))\n}", "func (b *Botanist) WaitUntilContainerRuntimeResourcesReady(ctx context.Context) error {\n\tfns := []flow.TaskFn{}\n\n\tfor _, worker := range b.Shoot.Info.Spec.Provider.Workers {\n\t\tif worker.CRI != nil {\n\t\t\tfor _, containerRuntime := range worker.CRI.ContainerRuntimes {\n\t\t\t\tvar (\n\t\t\t\t\tname = getContainerRuntimeKey(containerRuntime.Type, worker.Name)\n\t\t\t\t\tnamespace = b.Shoot.SeedNamespace\n\t\t\t\t)\n\t\t\t\tfns = append(fns, func(ctx context.Context) error {\n\t\t\t\t\tif err := retry.UntilTimeout(ctx, DefaultInterval, shoot.ExtensionDefaultTimeout, func(ctx context.Context) (bool, error) {\n\t\t\t\t\t\treq := &extensionsv1alpha1.ContainerRuntime{}\n\t\t\t\t\t\tif err := b.K8sSeedClient.Client().Get(ctx, kutil.Key(namespace, name), req); err != nil {\n\t\t\t\t\t\t\treturn retry.SevereError(err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif err := health.CheckExtensionObject(req); err != nil {\n\t\t\t\t\t\t\tb.Logger.WithError(err).Errorf(\"Container runtime %s/%s did not get ready yet\", namespace, name)\n\t\t\t\t\t\t\treturn retry.MinorError(err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn retry.Ok()\n\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\treturn gardencorev1beta1helper.DetermineError(err, fmt.Sprintf(\"failed waiting for container runtime %s to be ready: %v\", name, err))\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn flow.ParallelExitOnError(fns...)(ctx)\n}", "func runSleepingContainer(c *testing.T, extraArgs ...string) string {\n\tc.Helper()\n\treturn runSleepingContainerInImage(c, \"busybox\", extraArgs...)\n}", "func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) {\n\tout, _ := dockerCmd(c, \"run\", \"-d\", \"busybox\", \"sh\", \"-c\", \"true\")\n\tcontainerID := strings.TrimSpace(out)\n\n\terr := waitInspect(containerID, \"{{.State.Running}}\", \"false\", 30*time.Second)\n\tc.Assert(err, checker.IsNil) //Container should have stopped by now\n\n\tout, _ = dockerCmd(c, \"wait\", containerID)\n\tc.Assert(strings.TrimSpace(out), checker.Equals, \"0\", check.Commentf(\"failed to set up container, %v\", out))\n\n}", "func TestCreateArgs(t *testing.T) {\n\trunCmd := exec.Command(dockerBinary, \"create\", \"busybox\", \"command\", \"arg1\", \"arg2\", \"arg with space\")\n\tout, _, _, err := runCommandWithStdoutStderr(runCmd)\n\tif err != nil {\n\t\tt.Fatal(out, err)\n\t}\n\n\tcleanedContainerID := stripTrailingCharacters(out)\n\n\tinspectCmd := exec.Command(dockerBinary, \"inspect\", cleanedContainerID)\n\tout, _, err = runCommandWithOutput(inspectCmd)\n\tif err != nil {\n\t\tt.Fatalf(\"out should've been a container id: %s, %v\", out, err)\n\t}\n\n\tcontainers := []struct {\n\t\tID string\n\t\tCreated time.Time\n\t\tPath string\n\t\tArgs []string\n\t\tImage string\n\t}{}\n\tif err := json.Unmarshal([]byte(out), &containers); err != nil {\n\t\tt.Fatalf(\"Error inspecting the container: %s\", err)\n\t}\n\tif len(containers) != 1 {\n\t\tt.Fatalf(\"Unexpected container count. Expected 0, received: %d\", len(containers))\n\t}\n\n\tc := containers[0]\n\tif c.Path != \"command\" {\n\t\tt.Fatalf(\"Unexpected container path. Expected command, received: %s\", c.Path)\n\t}\n\n\tb := false\n\texpected := []string{\"arg1\", \"arg2\", \"arg with space\"}\n\tfor i, arg := range expected {\n\t\tif arg != c.Args[i] {\n\t\t\tb = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(c.Args) != len(expected) || b {\n\t\tt.Fatalf(\"Unexpected args. Expected %v, received: %v\", expected, c.Args)\n\t}\n\n\tdeleteAllContainers()\n\n\tlogDone(\"create - args\")\n}" ]
[ "0.6748301", "0.6719026", "0.6384122", "0.63736033", "0.6281763", "0.61672676", "0.61550915", "0.61136335", "0.60960704", "0.608407", "0.60707927", "0.6065003", "0.6033309", "0.6006447", "0.60003614", "0.5999315", "0.59982395", "0.5979116", "0.5961838", "0.59560144", "0.59537566", "0.59454674", "0.5933249", "0.58555377", "0.58490074", "0.58404577", "0.58199954", "0.5787216", "0.5776296", "0.5775795", "0.57382923", "0.57286644", "0.5724386", "0.57209295", "0.5716956", "0.57063633", "0.5703848", "0.57011116", "0.5696709", "0.5696396", "0.56606233", "0.56384116", "0.56264", "0.56253517", "0.56201357", "0.5616692", "0.5609254", "0.560171", "0.559324", "0.559247", "0.55865526", "0.5582189", "0.5572966", "0.556832", "0.55659515", "0.555946", "0.55570376", "0.5556843", "0.55550766", "0.5551881", "0.55483574", "0.5543664", "0.554325", "0.5543151", "0.55394846", "0.5531374", "0.5528876", "0.5525537", "0.55129457", "0.5502761", "0.5501353", "0.5497181", "0.5480822", "0.5478531", "0.54753345", "0.54727453", "0.54706174", "0.5465614", "0.5461398", "0.54607767", "0.5443503", "0.54386777", "0.54377854", "0.5437088", "0.5433849", "0.54191035", "0.5418413", "0.5413275", "0.5411967", "0.54093254", "0.54089737", "0.5407786", "0.5406241", "0.5405953", "0.5405953", "0.5385174", "0.53754497", "0.537321", "0.5359778", "0.53572047", "0.53479975" ]
0.0
-1
newQuota returns a totally fake quota.
func newQuota() *catalogue.Quota { return &catalogue.Quota{ Cores: 99999, FloatingIPs: 99999, Instances: 99999, KeyPairs: 99999, RAM: 99999, Tenant: "test-tenant", } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Drive) SetQuota(value Quotaable)() {\n m.quota = value\n}", "func newQuotaPool(q int) *quotaPool {\n\tqp := &quotaPool{\n\t\tacquireChannel: make(chan int, 1),\n\t}\n\tif q > 0 {\n\t\tqp.acquireChannel <- q\n\t} else {\n\t\tqp.quota = q\n\t}\n\treturn qp\n}", "func NewQuotaRateLimit(ctx *pulumi.Context,\n\tname string, args *QuotaRateLimitArgs, opts ...pulumi.ResourceOption) (*QuotaRateLimit, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Rate == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Rate'\")\n\t}\n\tvar resource QuotaRateLimit\n\terr := ctx.RegisterResource(\"vault:index/quotaRateLimit:QuotaRateLimit\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewResourceQuota(client clientset.Interface, registry quota.Registry) (admission.Interface, error) {\n\tliveLookupCache, err := lru.New(100)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlw := &cache.ListWatch{\n\t\tListFunc: func(options api.ListOptions) (runtime.Object, error) {\n\t\t\treturn client.Core().ResourceQuotas(api.NamespaceAll).List(options)\n\t\t},\n\t\tWatchFunc: func(options api.ListOptions) (watch.Interface, error) {\n\t\t\treturn client.Core().ResourceQuotas(api.NamespaceAll).Watch(options)\n\t\t},\n\t}\n\tindexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)\n\treflector.Run()\n\treturn &quotaAdmission{\n\t\tHandler: admission.NewHandler(admission.Create, admission.Update),\n\t\tclient: client,\n\t\tindexer: indexer,\n\t\tregistry: registry,\n\t\tliveLookupCache: liveLookupCache,\n\t\tliveTTL: time.Duration(30 * time.Second),\n\t}, nil\n}", "func newQuotasManager() QuotaManager {\n\treturn &quotasManager{}\n}", "func TestRealisClient_Quota(t *testing.T) {\n\tvar resp *aurora.Response\n\tvar err error\n\n\tcpu := 3.5\n\tram := int64(20480)\n\tdisk := int64(10240)\n\n\tt.Run(\"Set\", func(t *testing.T) {\n\t\tresp, err = r.SetQuota(\"vagrant\", &cpu, &ram, &disk)\n\t\tassert.NoError(t, err)\n\t})\n\n\tt.Run(\"Get\", func(t *testing.T) {\n\t\t// Test GetQuota based on previously set values\n\t\tvar result *aurora.GetQuotaResult_\n\t\tresp, err = r.GetQuota(\"vagrant\")\n\t\tif resp.GetResult_() != nil {\n\t\t\tresult = resp.GetResult_().GetQuotaResult_\n\t\t}\n\t\tassert.NoError(t, err)\n\n\t\tfor _, res := range result.Quota.GetResources() {\n\t\t\tswitch true {\n\t\t\tcase res.DiskMb != nil:\n\t\t\t\tassert.Equal(t, disk, *res.DiskMb)\n\t\t\tcase res.NumCpus != nil:\n\t\t\t\tassert.Equal(t, cpu, *res.NumCpus)\n\t\t\tcase res.RamMb != nil:\n\t\t\t\tassert.Equal(t, ram, *res.RamMb)\n\t\t\t}\n\t\t}\n\t})\n}", "func NewGetQuotaQuotaDefault(code int) *GetQuotaQuotaDefault {\n\treturn &GetQuotaQuotaDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (h *handlerState) Quota(ctx context.Context, tracker attribute.Tracker, request *mixerpb.QuotaRequest, response *mixerpb.QuotaResponse) {\n\tresponse.RequestIndex = request.RequestIndex\n\tstatus := h.execute(ctx, tracker, request.AttributeUpdate, config.QuotaMethod)\n\n\tif status.Code == int32(code.Code_OK) {\n\t\tresponse.Amount = 1\n\t}\n}", "func getProjectQuota(backingFsBlockDev string, projectID quotaID) (*types.DiskQuotaSize, error) {\n\tvar d C.fs_disk_quota_t\n\n\tvar cs = C.CString(backingFsBlockDev)\n\tdefer C.free(unsafe.Pointer(cs))\n\n\t_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,\n\t\tuintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),\n\t\tuintptr(unsafe.Pointer(&d)), 0, 0)\n\tif errno != 0 {\n\t\treturn nil, fmt.Errorf(\"failed to get quota limit for projid %d on %s: %v\",\n\t\t\tprojectID, backingFsBlockDev, errno)\n\t}\n\n\treturn &types.DiskQuotaSize{\n\t\tQuota: uint64(d.d_blk_hardlimit) * 512,\n\t\tInodes: uint64(d.d_ino_hardlimit),\n\t\tQuotaUsed: uint64(d.d_bcount) * 512,\n\t\tInodesUsed: uint64(d.d_icount),\n\t}, nil\n}", "func (j *JuiceFSEngine) getQuota(v string) (int64, error) {\n\tq, err := resource.ParseQuantity(v)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"invalid quota %s: %v\", v, err)\n\t}\n\tqs := q.Value() / 1024 / 1024 / 1024\n\tif qs <= 0 {\n\t\treturn 0, fmt.Errorf(\"quota %s is too small, at least 1GiB for quota\", v)\n\t}\n\n\treturn qs, nil\n}", "func (p *nfsProvisioner) createQuota(directory string, capacity resource.Quantity) (string, uint16, error) {\n\tpath := path.Join(p.exportDir, directory)\n\n\tlimit := strconv.FormatInt(capacity.Value(), 10)\n\n\tblock, projectID, err := p.quotaer.AddProject(path, limit)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"error adding project for path %s: %v\", path, err)\n\t}\n\n\terr = p.quotaer.SetQuota(projectID, path, limit)\n\tif err != nil {\n\t\tp.quotaer.RemoveProject(block, projectID)\n\t\treturn \"\", 0, fmt.Errorf(\"error setting quota for path %s: %v\", path, err)\n\t}\n\n\treturn block, projectID, nil\n}", "func CreateQuota(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {\n\tname, err := NameFromCommandArgs(cmd, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar generator kubectl.StructuredGenerator\n\tswitch generatorName := cmdutil.GetFlagString(cmd, \"generator\"); generatorName {\n\tcase cmdutil.ResourceQuotaV1GeneratorName:\n\t\tgenerator = &kubectl.ResourceQuotaGeneratorV1{\n\t\t\tName: name,\n\t\t\tHard: cmdutil.GetFlagString(cmd, \"hard\"),\n\t\t\tScopes: cmdutil.GetFlagString(cmd, \"scopes\"),\n\t\t}\n\tdefault:\n\t\treturn cmdutil.UsageError(cmd, fmt.Sprintf(\"Generator: %s not supported.\", generatorName))\n\t}\n\treturn RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{\n\t\tName: name,\n\t\tStructuredGenerator: generator,\n\t\tDryRun: cmdutil.GetFlagBool(cmd, \"dry-run\"),\n\t\tOutputFormat: cmdutil.GetFlagString(cmd, \"output\"),\n\t})\n}", "func (m *Drive) GetQuota()(Quotaable) {\n return m.quota\n}", "func NewRateLimit(client curator.CuratorFramework, username string,\n\ttotalAllowedQuota, baseQuota int64, lockTimeout time.Duration, refreshWindow time.Duration,\n\tenableOptimization bool, optimizationPctAsk float64, optimizationPctLeft float64) (*RateLimit, error) {\n\tvar err error\n\trl := &RateLimit{\n\t\tusername: username,\n\t\ttotalAllowedQuota: totalAllowedQuota,\n\t\tusableQuotaLeft: totalAllowedQuota,\n\t\tbaseQuota: baseQuota,\n\t\tlockTimeout: lockTimeout,\n\t\trefreshWindow: refreshWindow,\n\t\tclient: client,\n\t\tbaseQuotaPath: prefix + \"/\" + username + baseSuffix,\n\t\tusableQuotaPath: prefix + \"/\" + username + usableSuffix,\n\t\ttotalQuotaPath: prefix + \"/\" + username + totalSuffix,\n\t\trefreshQuotaPath: prefix + \"/\" + username + refreshSuffix,\n\t\toptimizationPctAsk: optimizationPctAsk,\n\t\toptimizationPctLeft: optimizationPctLeft,\n\t\tenableOptimization: enableOptimization,\n\t}\n\n\t// initialize the lock to be used and inject it wherever required.\n\trl.lock, err = recipe.NewInterProcessMutex(rl.client, lockPath+username)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = rl.create(prefix, []byte(\"\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = rl.create(prefix+\"/\"+rl.username, []byte(\"\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = rl.create(rl.baseQuotaPath, []byte(strconv.FormatInt(rl.baseQuota, 10)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = rl.create(rl.totalQuotaPath, []byte(strconv.FormatInt(rl.totalAllowedQuota, 10)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = rl.create(rl.usableQuotaPath, []byte(strconv.FormatInt(rl.usableQuotaLeft, 10)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = rl.create(rl.refreshQuotaPath, []byte(\"\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = rl.addWatch()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// concurrently look to refresh quota\n\tgo rl.refreshQuota()\n\t// mimic user requests being processed with random size\n\tgo rl.startRequests()\n\t// just in case there is skewness observed through loadbalancer and\n\t// quota gets concentrated on a single rate limit node\n\tgo rl.relinquish()\n\n\treturn rl, nil\n}", "func TestQuotaGetSet(t *testing.T) {\n\n\tvolumeName := \"test_quota_get_set\"\n\tquotaSize := int64(12345)\n\n\t// Setup the test\n\t_, err := client.CreateVolume(defaultCtx, volumeName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// make sure we clean up when we're done\n\tdefer client.DeleteVolume(defaultCtx, volumeName)\n\tdefer client.ClearQuota(defaultCtx, volumeName)\n\n\t// Make sure there is no quota yet\n\tquota, err := client.GetQuota(defaultCtx, volumeName)\n\tif quota != nil {\n\t\tpanic(fmt.Sprintf(\"Quota should be nil: %v\", quota))\n\t}\n\tif err == nil {\n\t\tpanic(fmt.Sprintf(\"GetQuota should return an error when there isn't a quota.\"))\n\t}\n\n\t// Set the quota\n\terr = client.SetQuotaSize(defaultCtx, volumeName, quotaSize)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Make sure the quota was set\n\tquota, err = client.GetQuota(defaultCtx, volumeName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif quota == nil {\n\t\tpanic(\"Quota should not be nil\")\n\t}\n\tif quota.Thresholds.Hard != quotaSize {\n\t\tpanic(fmt.Sprintf(\"Unexpected new quota. Expected: %d Actual: %d\", quotaSize, quota.Thresholds.Hard))\n\t}\n\n}", "func (q *Control) GetQuota(targetPath string, quota *Quota) error {\n\tq.RLock()\n\tprojectID, ok := q.quotas[targetPath]\n\tq.RUnlock()\n\tif !ok {\n\t\treturn errors.Errorf(\"quota not found for path: %s\", targetPath)\n\t}\n\n\t//\n\t// get the quota limit for the container's project id\n\t//\n\tvar d C.fs_disk_quota_t\n\n\tcs := C.CString(q.backingFsBlockDev)\n\tdefer C.free(unsafe.Pointer(cs))\n\n\t_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,\n\t\tuintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),\n\t\tuintptr(unsafe.Pointer(&d)), 0, 0)\n\tif errno != 0 {\n\t\treturn errors.Wrapf(errno, \"Failed to get quota limit for projid %d on %s\",\n\t\t\tprojectID, q.backingFsBlockDev)\n\t}\n\tquota.Size = uint64(d.d_blk_hardlimit) * 512\n\n\treturn nil\n}", "func newTenantLimits(manager *runtimeconfig.Manager) validation.TenantLimits {\n\treturn &runtimeConfigTenantLimits{\n\t\tmanager: manager,\n\t}\n}", "func NewProjectQuota() manager.QuotaManager {\n\tprjFile := NewProjectFile()\n\tidPaths, idNames, err := prjFile.DumpProjectIds()\n\tif err != nil {\n\t\tklog.Fatalf(\"init existing project ids failed: %v\", err)\n\t}\n\tpathIds := make(map[string]quotaID)\n\tfor id, paths := range idPaths {\n\t\tfor _, p := range paths {\n\t\t\tpathIds[p] = id\n\t\t}\n\t}\n\n\tnameIds := make(map[string]quotaID)\n\tfor id, name := range idNames {\n\t\tnameIds[name] = id\n\n\t}\n\n\treturn &projectQuota{\n\t\tpathMapBackingDev: make(map[string]*backingDev),\n\t\tidPaths: idPaths,\n\t\tidNames: idNames,\n\t\tpathIds: pathIds,\n\t\tnameIds: nameIds,\n\t\tprjFile: prjFile,\n\t}\n}", "func (b *ClusterBuilder) StorageQuota(value *ValueBuilder) *ClusterBuilder {\n\tb.storageQuota = value\n\tif value != nil {\n\t\tb.bitmap_ |= 1099511627776\n\t} else {\n\t\tb.bitmap_ &^= 1099511627776\n\t}\n\treturn b\n}", "func (p *projectQuota) GetQuota(targetPath string) (*types.DiskQuotaSize, error) {\n\tbackingFsBlockDev, err := p.findAvailableBackingDev(targetPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// no need to create new project id\n\tprjId, _, err := p.findOrCreateProjectId(targetPath, \"\", projIdNoCreate, !persistToFile)\n\tif err != nil {\n\t\tklog.Errorf(\"find project id err: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn getProjectQuota(backingFsBlockDev.device, prjId)\n}", "func (c *SpaceQuotaClient) Create(ctx context.Context, r *resource.SpaceQuotaCreateOrUpdate) (*resource.SpaceQuota, error) {\n\tvar q resource.SpaceQuota\n\t_, err := c.client.post(ctx, \"/v3/space_quotas\", r, &q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &q, nil\n}", "func GetQuota(id int64) (*models.Quota, error) {\n\tq := models.Quota{ID: id}\n\terr := GetOrmer().Read(&q, \"ID\")\n\tif err == orm.ErrNoRows {\n\t\treturn nil, nil\n\t}\n\treturn &q, err\n}", "func CreateResourceQuota(parent *tenancyv1alpha1.TanzuNamespace) (metav1.Object, error) {\n\n\tfmap := template.FuncMap{\n\t\t\"defaultResourceQuotaCPURequests\": defaultResourceQuotaCPURequests,\n\t\t\"defaultResourceQuotaMemoryRequests\": defaultResourceQuotaMemoryRequests,\n\t\t\"defaultResourceQuotaCPULimits\": defaultResourceQuotaCPULimits,\n\t\t\"defaultResourceQuotaMemoryLimits\": defaultResourceQuotaMemoryLimits,\n\t}\n\n\tchildContent, err := runTemplate(\"tanzu-resource-quota\", resourceResourceQuota, parent, fmap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdecode := scheme.Codecs.UniversalDeserializer().Decode\n\tobj, _, err := decode([]byte(childContent), nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresourceObj := obj.(*k8s_api.ResourceQuota)\n\tresourceObj.Namespace = defaultNamespace(parent.Name, &parent.Spec)\n\n\treturn resourceObj, nil\n}", "func newMockTenantLimits(limits map[string]*Limits) *mockTenantLimits {\n\treturn &mockTenantLimits{\n\t\tlimits: limits,\n\t}\n}", "func FakeNewStorage() *fakeStorage {\n\treturn &fakeStorage{}\n}", "func (a *Client) SetQuota(params *SetQuotaParams) (*SetQuotaOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewSetQuotaParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"setQuota\",\n\t\tMethod: \"PATCH\",\n\t\tPathPattern: \"/api/v1/quotas\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &SetQuotaReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*SetQuotaOK), nil\n\n}", "func (a *Client) GetQuota(params *GetQuotaParams) (*GetQuotaOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetQuotaParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getQuota\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/quotas\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetQuotaReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetQuotaOK), nil\n\n}", "func (client QuotaRequestClient) Create(ctx context.Context, subscriptionID string, providerID string, location string, resourceName string, createQuotaRequest CurrentQuotaLimitBase, ifMatch string) (result QuotaRequestCreateFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/QuotaRequestClient.Create\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response() != nil {\n\t\t\t\tsc = result.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.CreatePreparer(ctx, subscriptionID, providerID, location, resourceName, createQuotaRequest, ifMatch)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"reservations.QuotaRequestClient\", \"Create\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"reservations.QuotaRequestClient\", \"Create\", result.Response(), \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func TestV3StorageQuotaApply(t *testing.T) {\n\tintegration.BeforeTest(t)\n\tquotasize := int64(16 * os.Getpagesize())\n\n\tclus := integration.NewCluster(t, &integration.ClusterConfig{Size: 2})\n\tdefer clus.Terminate(t)\n\tkvc1 := integration.ToGRPC(clus.Client(1)).KV\n\n\t// Set a quota on one node\n\tclus.Members[0].QuotaBackendBytes = quotasize\n\tclus.Members[0].Stop(t)\n\tclus.Members[0].Restart(t)\n\tclus.WaitMembersForLeader(t, clus.Members)\n\tkvc0 := integration.ToGRPC(clus.Client(0)).KV\n\twaitForRestart(t, kvc0)\n\n\tkey := []byte(\"abc\")\n\n\t// test small put still works\n\tsmallbuf := make([]byte, 1024)\n\t_, serr := kvc0.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf})\n\tif serr != nil {\n\t\tt.Fatal(serr)\n\t}\n\n\t// test big put\n\tbigbuf := make([]byte, quotasize)\n\t_, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// quorum get should work regardless of whether alarm is raised\n\t_, err = kvc0.Range(context.TODO(), &pb.RangeRequest{Key: []byte(\"foo\")})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// wait until alarm is raised for sure-- poll the alarms\n\tstopc := time.After(5 * time.Second)\n\tfor {\n\t\treq := &pb.AlarmRequest{Action: pb.AlarmRequest_GET}\n\t\tresp, aerr := clus.Members[0].Server.Alarm(context.TODO(), req)\n\t\tif aerr != nil {\n\t\t\tt.Fatal(aerr)\n\t\t}\n\t\tif len(resp.Alarms) != 0 {\n\t\t\tbreak\n\t\t}\n\t\tselect {\n\t\tcase <-stopc:\n\t\t\tt.Fatalf(\"timed out waiting for alarm\")\n\t\tcase <-time.After(10 * time.Millisecond):\n\t\t}\n\t}\n\n\t// txn with non-mutating Ops should go through when NOSPACE alarm is raised\n\t_, err = kvc0.Txn(context.TODO(), &pb.TxnRequest{\n\t\tCompare: []*pb.Compare{\n\t\t\t{\n\t\t\t\tKey: key,\n\t\t\t\tResult: pb.Compare_EQUAL,\n\t\t\t\tTarget: pb.Compare_CREATE,\n\t\t\t\tTargetUnion: &pb.Compare_CreateRevision{CreateRevision: 0},\n\t\t\t},\n\t\t},\n\t\tSuccess: []*pb.RequestOp{\n\t\t\t{\n\t\t\t\tRequest: &pb.RequestOp_RequestDeleteRange{\n\t\t\t\t\tRequestDeleteRange: &pb.DeleteRangeRequest{\n\t\t\t\t\t\tKey: key,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout)\n\tdefer cancel()\n\n\t// small quota machine should reject put\n\tif _, err := kvc0.Put(ctx, &pb.PutRequest{Key: key, Value: smallbuf}); err == nil {\n\t\tt.Fatalf(\"past-quota instance should reject put\")\n\t}\n\n\t// large quota machine should reject put\n\tif _, err := kvc1.Put(ctx, &pb.PutRequest{Key: key, Value: smallbuf}); err == nil {\n\t\tt.Fatalf(\"past-quota instance should reject put\")\n\t}\n\n\t// reset large quota node to ensure alarm persisted\n\tclus.Members[1].Stop(t)\n\tclus.Members[1].Restart(t)\n\tclus.WaitMembersForLeader(t, clus.Members)\n\n\tif _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err == nil {\n\t\tt.Fatalf(\"alarmed instance should reject put after reset\")\n\t}\n}", "func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {\n\tvar d C.fs_disk_quota_t\n\td.d_version = C.FS_DQUOT_VERSION\n\td.d_id = C.__u32(projectID)\n\td.d_flags = C.XFS_PROJ_QUOTA\n\n\td.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT\n\td.d_blk_hardlimit = C.__u64(quota.Size / 512)\n\td.d_blk_softlimit = d.d_blk_hardlimit\n\n\tcs := C.CString(backingFsBlockDev)\n\tdefer C.free(unsafe.Pointer(cs))\n\n\t_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,\n\t\tuintptr(unsafe.Pointer(cs)), uintptr(d.d_id),\n\t\tuintptr(unsafe.Pointer(&d)), 0, 0)\n\tif errno != 0 {\n\t\treturn errors.Wrapf(errno, \"failed to set quota limit for projid %d on %s\",\n\t\t\tprojectID, backingFsBlockDev)\n\t}\n\n\treturn nil\n}", "func getResourceQuota(cpu, memory int) *v1.ResourceQuota {\n\t// Don's use \"MustParse\" it might panic at runtime , have some validation\n\thard := v1.ResourceList{\n\t\tv1.ResourceCPU: resource.MustParse(getCPUMilli(cpu)),\n\t\tv1.ResourceMemory: resource.MustParse(getMemoryMI(memory)),\n\t}\n\n\treturn &v1.ResourceQuota{\n\t\tSpec: v1.ResourceQuotaSpec{\n\t\t\tHard: hard,\n\t\t},\n\t}\n}", "func Quota(path string, size ...string) string {\n\tif len(size) > 0 && len(size[0]) > 0 {\n\t\tout, err := exec.Command(\"btrfs\", \"qgroup\", \"limit\", size[0]+\"G\", config.Agent.LxcPrefix+path).CombinedOutput()\n\t\tlog.Check(log.ErrorLevel, \"Limiting BTRFS subvolume \"+config.Agent.LxcPrefix+path+\" \"+string(out), err)\n\t\texec.Command(\"btrfs\", \"quota\", \"rescan\", \"-w\", config.Agent.LxcPrefix).Run()\n\t}\n\treturn Stat(path, \"quota\", false)\n}", "func (q *Control) GetQuota(targetPath string, quota *Quota) error {\n\treturn ErrQuotaNotSupported\n}", "func (o UsagePlanOutput) Quota() UsagePlanQuotaSettingsPtrOutput {\n\treturn o.ApplyT(func(v *UsagePlan) UsagePlanQuotaSettingsPtrOutput { return v.Quota }).(UsagePlanQuotaSettingsPtrOutput)\n}", "func TestQuota(t *testing.T) {\n\tinitializationCh := make(chan struct{})\n\t// Set up a master\n\tvar m *master.Master\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t<-initializationCh\n\t\tm.Handler.ServeHTTP(w, req)\n\t}))\n\tdefer s.Close()\n\tadmissionCh := make(chan struct{})\n\tclientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})\n\tadmission, err := resourcequota.NewResourceQuota(clientset, quotainstall.NewRegistry(clientset), 5, admissionCh)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tdefer close(admissionCh)\n\n\tmasterConfig := framework.NewIntegrationTestMasterConfig()\n\tmasterConfig.AdmissionControl = admission\n\tm, err = master.New(masterConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"Error in bringing up the master: %v\", err)\n\t}\n\tclose(initializationCh)\n\n\tns := framework.CreateTestingNamespace(\"quotaed\", s, t)\n\tdefer framework.DeleteTestingNamespace(ns, s, t)\n\tns2 := framework.CreateTestingNamespace(\"non-quotaed\", s, t)\n\tdefer framework.DeleteTestingNamespace(ns2, s, t)\n\n\tcontrollerCh := make(chan struct{})\n\tdefer close(controllerCh)\n\n\tgo replicationcontroller.NewReplicationManagerFromClientForIntegration(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas, 4096).\n\t\tRun(3, controllerCh)\n\n\tresourceQuotaRegistry := quotainstall.NewRegistry(clientset)\n\tgroupKindsToReplenish := []unversioned.GroupKind{\n\t\tapi.Kind(\"Pod\"),\n\t}\n\tresourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{\n\t\tKubeClient: clientset,\n\t\tResyncPeriod: controller.NoResyncPeriodFunc,\n\t\tRegistry: resourceQuotaRegistry,\n\t\tGroupKindsToReplenish: groupKindsToReplenish,\n\t\tReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,\n\t\tControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactoryFromClient(clientset),\n\t}\n\tgo resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(2, controllerCh)\n\n\tstartTime := time.Now()\n\tscale(t, ns2.Name, clientset)\n\tendTime := time.Now()\n\tt.Logf(\"Took %v to scale up without quota\", endTime.Sub(startTime))\n\n\tquota := &api.ResourceQuota{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"quota\",\n\t\t\tNamespace: ns.Name,\n\t\t},\n\t\tSpec: api.ResourceQuotaSpec{\n\t\t\tHard: api.ResourceList{\n\t\t\t\tapi.ResourcePods: resource.MustParse(\"1000\"),\n\t\t\t},\n\t\t},\n\t}\n\twaitForQuota(t, quota, clientset)\n\n\tstartTime = time.Now()\n\tscale(t, \"quotaed\", clientset)\n\tendTime = time.Now()\n\tt.Logf(\"Took %v to scale up with quota\", endTime.Sub(startTime))\n}", "func setProjectQuota(backingFsBlockDev string, projectID quotaID, quota *types.DiskQuotaSize) error {\n\tklog.V(4).Infof(\"Setting projec quota for %d: %+v\", projectID, quota)\n\n\tvar d C.fs_disk_quota_t\n\td.d_version = C.FS_DQUOT_VERSION\n\td.d_id = C.__u32(projectID)\n\td.d_flags = C.XFS_PROJ_QUOTA\n\n\td.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT | C.FS_DQ_IHARD | C.FS_DQ_ISOFT\n\td.d_blk_hardlimit = C.__u64(quota.Quota / 512)\n\td.d_blk_softlimit = d.d_blk_hardlimit\n\td.d_ino_hardlimit = C.__u64(quota.Inodes)\n\td.d_ino_softlimit = d.d_ino_hardlimit\n\n\tvar cs = C.CString(backingFsBlockDev)\n\tdefer C.free(unsafe.Pointer(cs))\n\n\t_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,\n\t\tuintptr(unsafe.Pointer(cs)), uintptr(d.d_id),\n\t\tuintptr(unsafe.Pointer(&d)), 0, 0)\n\tif errno != 0 {\n\t\treturn fmt.Errorf(\"failed to set quota limit for projid %d on %s: %v\",\n\t\t\tprojectID, backingFsBlockDev, errno)\n\t}\n\n\treturn nil\n}", "func testQuotaContext(ctx context.Context, t *testing.T, policies []*quotapb.Policy) context.Context {\n\t// Create a miniredis instance for the luci-quota library to use.\n\t// Arguably it would be better to stub out the quota implementation\n\t// entirely, but that's not cleanly supported by luci-quota.\n\ts, err := miniredis.Run()\n\tSo(err, ShouldBeNil)\n\tt.Cleanup(s.Close)\n\tctx = redisconn.UsePool(ctx, &redis.Pool{\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn redis.Dial(\"tcp\", s.Addr())\n\t\t},\n\t})\n\t// Use a simple quota config that grants a generous 1k RPCs (per 10\n\t// minutes) to every 'user', which should be plenty for unit tests.\n\tquotacfg, err := quotaconfig.NewMemory(ctx, policies)\n\tSo(err, ShouldBeNil)\n\treturn quota.Use(ctx, quotacfg)\n}", "func (d *dir) setQuota(path string, volID int64, sizeBytes int64) error {\n\tif volID == volIDQuotaSkip {\n\t\t// Disabled on purpose, just ignore.\n\t\treturn nil\n\t}\n\n\tif volID == 0 {\n\t\treturn fmt.Errorf(\"Missing volume ID\")\n\t}\n\n\tok, err := quota.Supported(path)\n\tif err != nil || !ok {\n\t\tif sizeBytes > 0 {\n\t\t\t// Skipping quota as underlying filesystem doesn't suppport project quotas.\n\t\t\td.logger.Warn(\"The backing filesystem doesn't support quotas, skipping set quota\", logger.Ctx{\"path\": path, \"size\": sizeBytes, \"volID\": volID})\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tprojectID := d.quotaProjectID(volID)\n\tcurrentProjectID, err := quota.GetProject(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Remove current project if desired project ID is different.\n\tif currentProjectID != d.quotaProjectID(volID) {\n\t\terr = quota.DeleteProject(path, currentProjectID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Initialise the project.\n\terr = quota.SetProject(path, projectID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set the project quota size.\n\treturn quota.SetProjectQuota(path, projectID, sizeBytes)\n}", "func (j *JuiceFSEngine) genQuotaCmd(value *JuiceFS, mount datav1alpha1.Mount) error {\n\toptions := mount.Options\n\tfor k, v := range options {\n\t\tif k == \"quota\" {\n\t\t\tqs, err := j.getQuota(v)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"invalid quota %s\", v)\n\t\t\t}\n\t\t\tif value.Fuse.SubPath == \"\" {\n\t\t\t\treturn fmt.Errorf(\"subPath must be set when quota is enabled\")\n\t\t\t}\n\t\t\tif value.Edition == CommunityEdition {\n\t\t\t\t// ce\n\t\t\t\t// juicefs quota set ${metaurl} --path ${path} --capacity ${capacity}\n\t\t\t\tvalue.Configs.QuotaCmd = fmt.Sprintf(\"%s quota set %s --path %s --capacity %d\", common.JuiceCeCliPath, value.Source, value.Fuse.SubPath, qs)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// ee\n\t\t\t// juicefs quota set ${metaurl} --path ${path} --capacity ${capacity}\n\t\t\tcli := common.JuiceCliPath\n\t\t\tvalue.Configs.QuotaCmd = fmt.Sprintf(\"%s quota set %s --path %s --capacity %d\", cli, value.Source, value.Fuse.SubPath, qs)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}", "func (c *SpaceQuotaClient) Get(ctx context.Context, guid string) (*resource.SpaceQuota, error) {\n\tvar q resource.SpaceQuota\n\terr := c.client.get(ctx, path.Format(\"/v3/space_quotas/%s\", guid), &q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &q, nil\n}", "func TenantResourceQuota(tenant string) string {\n\treturn tenant\n}", "func QuotaStorage(v int) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldQuotaStorage), v))\n\t})\n}", "func NewCmdCreateQuota(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"quota NAME [--hard=key1=value1,key2=value2] [--scopes=Scope1,Scope2] [--dry-run=bool]\",\n\t\tAliases: []string{\"resourcequota\"},\n\t\tShort: i18n.T(\"Create a quota with the specified name.\"),\n\t\tLong: quotaLong,\n\t\tExample: quotaExample,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := CreateQuota(f, cmdOut, cmd, args)\n\t\t\tcmdutil.CheckErr(err)\n\t\t},\n\t}\n\n\tcmdutil.AddApplyAnnotationFlags(cmd)\n\tcmdutil.AddValidateFlags(cmd)\n\tcmdutil.AddPrinterFlags(cmd)\n\tcmdutil.AddGeneratorFlags(cmd, cmdutil.ResourceQuotaV1GeneratorName)\n\tcmd.Flags().String(\"hard\", \"\", i18n.T(\"A comma-delimited set of resource=quantity pairs that define a hard limit.\"))\n\tcmd.Flags().String(\"scopes\", \"\", i18n.T(\"A comma-delimited set of quota scopes that must all match each object tracked by the quota.\"))\n\treturn cmd\n}", "func New(opts Options) (*Queue, error) {\n\tif opts.HardLimit <= 0 || opts.HardLimit < opts.SoftQuota {\n\t\treturn nil, errHardLimit\n\t}\n\tif opts.BurstCredit < 0 {\n\t\treturn nil, errBurstCredit\n\t}\n\tif opts.SoftQuota <= 0 {\n\t\topts.SoftQuota = opts.HardLimit\n\t}\n\tif opts.BurstCredit == 0 {\n\t\topts.BurstCredit = float64(opts.SoftQuota)\n\t}\n\tsentinel := new(entry)\n\tq := &Queue{\n\t\tsoftQuota: opts.SoftQuota,\n\t\thardLimit: opts.HardLimit,\n\t\tcredit: opts.BurstCredit,\n\t\tback: sentinel,\n\t\tfront: sentinel,\n\t}\n\tq.nempty = sync.NewCond(&q.mu)\n\treturn q, nil\n}", "func (s *grpcServer) Quota(stream mixerpb.Mixer_QuotaServer) error {\n\treturn s.dispatcher(stream, \"/istio.mixer.v1.Mixer/Quota\",\n\t\tfunc() (proto.Message, proto.Message, *mixerpb.Attributes, *mixerpb.Attributes, *rpc.Status) {\n\t\t\trequest := &mixerpb.QuotaRequest{}\n\t\t\tresponse := &mixerpb.QuotaResponse{}\n\t\t\tresponse.AttributeUpdate = &mixerpb.Attributes{}\n\t\t\treturn request, response, &request.AttributeUpdate, response.AttributeUpdate, &response.Result\n\t\t},\n\t\tfunc(ctx context.Context, requestBag *attribute.MutableBag, responseBag *attribute.MutableBag, request proto.Message, response proto.Message) {\n\t\t\ts.handlers.Quota(ctx, requestBag, responseBag, request.(*mixerpb.QuotaRequest), response.(*mixerpb.QuotaResponse))\n\t\t})\n}", "func limitPrjQuota(prj, quota string) error {\n\tlimitQuotaCmd := fmt.Sprintf(\"xfs_quota -x -c 'limit -p bsoft=%s bhard=%s %s' %s\", quota, quota, prj, conf.WORKSPACE)\n\tlog.WithFields(log.Fields{\n\t\t\"project\": prj,\n\t\t\"quota\": quota,\n\t\t\"command\": limitQuotaCmd,\n\t}).Info(\"Limit project quota\")\n\n\tlimitQuotaExecCmd := exec.Command(\"sh\", \"-c\", limitQuotaCmd)\n\tif err := limitQuotaExecCmd.Run(); err != nil {\n\t\treturn errors.Wrap(err, \"limit project quota\")\n\t}\n\n\treturn nil\n}", "func (o OpenZfsVolumeOutput) StorageCapacityQuotaGib() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *OpenZfsVolume) pulumi.IntOutput { return v.StorageCapacityQuotaGib }).(pulumi.IntOutput)\n}", "func newQuotasManager() Manager {\n\treturn &quotasManager{}\n}", "func New(c *aqm.Config) *Limiter {\n\tl := &Limiter{\n\t\trate: vegas.New(),\n\t\tqueue: aqm.New(c),\n\t}\n\tgo func() {\n\t\tticker := time.NewTicker(time.Second * 1)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\t<-ticker.C\n\t\t\tv := l.rate.Stat()\n\t\t\tq := l.queue.Stat()\n\t\t\tlog.Info(\"rate/limit: limit(%d) inFlight(%d) minRtt(%v) rtt(%v) codel packets(%d)\", v.Limit, v.InFlight, v.MinRTT, v.LastRTT, q.Packets)\n\t\t}\n\t}()\n\treturn l\n}", "func QuotaInstance(v int) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldQuotaInstance), v))\n\t})\n}", "func NewAccountQuotaPolicyClient(subscriptionID string) AccountQuotaPolicyClient {\n return NewAccountQuotaPolicyClientWithBaseURI(DefaultBaseURI, subscriptionID)\n}", "func NewDiskQuota(config types.DiskQuotaConfig, k8s types.K8sConfig,\n\tpodInformer cache.SharedIndexInformer) DiskQuotaInterface {\n\n\t// init all kinds of volume manager\n\tvolumeQuotaManagers := []volume.VolumeQuotaManager{\n\t\tvolume.NewRootFsDiskQuota(config.ContainerRuntime, config.VolumeSizes[types.VolumeTypeRootFs]),\n\t\tvolume.NewEmptyDirQuota(config.VolumeSizes[types.VolumeTypeEmptyDir], k8s.KubeletRootDir),\n\t\tvolume.NewHostPathQuota(config.VolumeSizes[types.VolumeTypeHostPath]),\n\t}\n\treturn &diskQuota{\n\t\tDiskQuotaConfig: config,\n\t\tpodInformer: podInformer,\n\t\tquotaManager: projectquota.NewProjectQuota(),\n\t\tvolumeQuotaManagers: volumeQuotaManagers,\n\t\thandedPods: make(map[k8stypes.UID]*PodVolumes),\n\t}\n}", "func AddQuota(quota models.Quota) (int64, error) {\n\tnow := time.Now()\n\tquota.CreationTime = now\n\tquota.UpdateTime = now\n\treturn GetOrmer().Insert(&quota)\n}", "func NewRateLimit(limit int, deltat time.Duration) *RateLimit {\n\treturn &RateLimit{Rate{NewCounter(0), deltat}, limit, time.Now()}\n}", "func (s *Module) createSubvolWithQuota(size gridtypes.Unit, name string) (filesystem.Volume, error) {\n\tvolume, err := s.createSubvol(size, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = volume.Limit(uint64(size)); err != nil {\n\t\tlog.Error().Err(err).Str(\"volume\", volume.Path()).Msg(\"failed to set volume size limit\")\n\t\treturn nil, err\n\t}\n\n\treturn volume, nil\n}", "func initPrjQuota(prj, quota string) error {\n\tinitQuotaCmd := fmt.Sprintf(\"xfs_quota -x -c 'project -s %s' %s\", prj, conf.WORKSPACE)\n\tlog.WithFields(log.Fields{\n\t\t\"project\": prj,\n\t\t\"quota\": quota,\n\t\t\"command\": initQuotaCmd,\n\t}).Info(\"Init project quota\")\n\n\tinitQuotaExecCmd := exec.Command(\"bash\", \"-c\", initQuotaCmd)\n\tif err := initQuotaExecCmd.Run(); err != nil {\n\t\treturn errors.Wrap(err, \"init project quota\")\n\t}\n\n\treturn nil\n}", "func NewSpaceQuotaListOptions() *SpaceQuotaListOptions {\n\treturn &SpaceQuotaListOptions{\n\t\tListOptions: NewListOptions(),\n\t}\n}", "func newAccount() *Account {\n\treturn &Account{\n\t\tblocks: make(map[string]uint64),\n\t}\n}", "func New(quantum time.Duration) *limiter {\n\tl := &limiter{\n\t\tquantum: quantum,\n\t\tschedule: make(chan ask, 1),\n\t\tclosecap: make(chan bool, 1),\n\t\tdone: make(chan bool),\n\t}\n\tl.closecap <- true\n\tgo l.run()\n\treturn l\n}", "func NewItemQuotaRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ItemQuotaRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewItemQuotaRequestBuilderInternal(urlParams, requestAdapter)\n}", "func newRateLimiter() *middlewares.Limiter {\n\treturn middlewares.NewLimiter(newRedis())\n}", "func NewLimiter(rate float64, burst float64, initialTokens float64) *Limiter {\n\treturn &Limiter{\n\t\trate: rate,\n\t\tburst: burst,\n\t\ttokens: initialTokens,\n\t\tlast: time.Now(),\n\t}\n}", "func QuotaStorageGT(v int) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldQuotaStorage), v))\n\t})\n}", "func newRingBuf(size int) *CircularQueue {\n\tvar buf = new(CircularQueue)\n\tbuf.Make(size)\n\tglog.Info(\"maximum backups: \" + strconv.Itoa(size))\n\treturn buf\n}", "func DiskQuota(path string, size ...string) string {\n\tparent := id(path)\n\texec.Command(\"btrfs\", \"qgroup\", \"create\", \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\texec.Command(\"btrfs\", \"qgroup\", \"assign\", \"0/\"+id(path+\"/opt\"), \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\texec.Command(\"btrfs\", \"qgroup\", \"assign\", \"0/\"+id(path+\"/var\"), \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\texec.Command(\"btrfs\", \"qgroup\", \"assign\", \"0/\"+id(path+\"/home\"), \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\texec.Command(\"btrfs\", \"qgroup\", \"assign\", \"0/\"+id(path+\"/rootfs\"), \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\n\tif len(size) > 0 && len(size[0]) > 0 {\n\t\tout, err := exec.Command(\"btrfs\", \"qgroup\", \"limit\", size[0]+\"G\", \"1/\"+parent, config.Agent.LxcPrefix+path).CombinedOutput()\n\t\tlog.Check(log.ErrorLevel, \"Limiting BTRFS group 1/\"+parent+\" \"+string(out), err)\n\t\texec.Command(\"btrfs\", \"quota\", \"rescan\", \"-w\", config.Agent.LxcPrefix).Run()\n\t}\n\treturn Stat(path, \"quota\", false)\n}", "func NewGetQuotaQuotaOK() *GetQuotaQuotaOK {\n\treturn &GetQuotaQuotaOK{}\n}", "func QuotaStorageGTE(v int) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldQuotaStorage), v))\n\t})\n}", "func (q *Control) SetQuota(targetPath string, quota Quota) error {\n\tq.RLock()\n\tprojectID, ok := q.quotas[targetPath]\n\tq.RUnlock()\n\tif !ok {\n\t\tstate := getPquotaState()\n\t\tstate.Lock()\n\t\tprojectID = state.nextProjectID\n\n\t\t//\n\t\t// assign project id to new container directory\n\t\t//\n\t\terr := setProjectID(targetPath, projectID)\n\t\tif err != nil {\n\t\t\tstate.Unlock()\n\t\t\treturn err\n\t\t}\n\n\t\tstate.nextProjectID++\n\t\tstate.Unlock()\n\n\t\tq.Lock()\n\t\tq.quotas[targetPath] = projectID\n\t\tq.Unlock()\n\t}\n\n\t//\n\t// set the quota limit for the container's project id\n\t//\n\tlog.G(context.TODO()).Debugf(\"SetQuota(%s, %d): projectID=%d\", targetPath, quota.Size, projectID)\n\treturn setProjectQuota(q.backingFsBlockDev, projectID, quota)\n}", "func newTenantMutation(c config, op Op) *TenantMutation {\n\treturn &TenantMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeTenant,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n}", "func NewRateLimit(storage store.Store, statsClient stats.Client) *RateLimit {\n\treturn &RateLimit{storage, statsClient}\n}", "func (m *quotasManager) NewAspect(c *cpb.Combined, a adapter.Builder, env adapter.Env) (Wrapper, error) {\n\tparams := c.Aspect.Params.(*aconfig.QuotasParams)\n\n\t// TODO: get this from config\n\tif len(params.Quotas) == 0 {\n\t\tparams = &aconfig.QuotasParams{\n\t\t\tQuotas: []*aconfig.QuotasParams_Quota{\n\t\t\t\t{DescriptorName: \"RequestCount\"},\n\t\t\t},\n\t\t}\n\t}\n\n\t// TODO: get this from config\n\tdesc := []dpb.QuotaDescriptor{\n\t\t{\n\t\t\tName: \"RequestCount\",\n\t\t\tMaxAmount: 5,\n\t\t\tExpiration: &ptypes.Duration{Seconds: 1},\n\t\t},\n\t}\n\n\tmetadata := make(map[string]*quotaInfo, len(desc))\n\tdefs := make(map[string]*adapter.QuotaDefinition, len(desc))\n\tfor _, d := range desc {\n\t\tquota := findQuota(params.Quotas, d.Name)\n\t\tif quota == nil {\n\t\t\tenv.Logger().Warningf(\"No quota found for descriptor %s, skipping it\", d.Name)\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO: once we plumb descriptors into the validation, remove this err: no descriptor should make it through validation\n\t\t// if it cannot be converted into a QuotaDefinition, so we should never have to handle the error case.\n\t\tdef, err := quotaDefinitionFromProto(&d)\n\t\tif err != nil {\n\t\t\t_ = env.Logger().Errorf(\"Failed to convert quota descriptor '%s' to definition with err: %s; skipping it.\", d.Name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tdefs[d.Name] = def\n\t\tmetadata[d.Name] = &quotaInfo{\n\t\t\tlabels: quota.Labels,\n\t\t\tdefinition: def,\n\t\t}\n\t}\n\n\tasp, err := a.(adapter.QuotasBuilder).NewQuotasAspect(env, c.Builder.Params.(adapter.Config), defs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &quotasWrapper{\n\t\tmanager: m,\n\t\tmetadata: metadata,\n\t\taspect: asp,\n\t\tadapter: a.Name(),\n\t}, nil\n}", "func NewQuotaListIterator(page QuotaListPage) QuotaListIterator {\n\treturn QuotaListIterator{page: page}\n}", "func New(rate int, opts ...Option) Limiter {\n\treturn newAtomicBased(rate, opts...)\n}", "func newTenantMutation(c config, op Op, opts ...tenantOption) *TenantMutation {\n\tm := &TenantMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeTenant,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}", "func newStorage(account *account, prov provider.Account, cfg *config.Storage) (*storage, error) {\n\tlog.Debug(\"Initializing Storage\")\n\n\t// Validate the config.Storage object.\n\tif cfg.Buckets == nil {\n\t\treturn nil, fmt.Errorf(\"The buckets element is missing from the storage configuration\")\n\t}\n\n\ts := &storage{\n\t\tResources: resource.NewResources(),\n\t\tStorage: cfg,\n\t\taccount: account,\n\t}\n\n\tvar err error\n\ts.providerStorage, err = prov.NewStorage(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.buckets, err = newBuckets(s, prov, cfg.Buckets)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.Append(s.buckets)\n\treturn s, nil\n}", "func ExampleShareURL_SetQuota() {\n\t// Create a request pipeline using your Storage account's name and account key.\n\taccountName, accountKey := accountInfo()\n\tcredential, err := azfile.NewSharedKeyCredential(accountName, accountKey)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tp := azfile.NewPipeline(credential, azfile.PipelineOptions{})\n\n\t// From the Azure portal, get your Storage account file service URL endpoint.\n\tsURL, _ := url.Parse(fmt.Sprintf(\"https://%s.file.core.windows.net/newshareforquotademo\", accountName))\n\n\t// Create an ShareURL object that wraps the share URL and a request pipeline to making requests.\n\tshareURL := azfile.NewShareURL(*sURL, p)\n\n\tctx := context.Background() // This example uses a never-expiring context\n\n\t_, err = shareURL.Create(ctx, azfile.Metadata{}, 0)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Check current usage stats for the share.\n\t// Note that the ShareStats object is part of the protocol layer for the File service.\n\tif statistics, err := shareURL.GetStatistics(ctx); err == nil {\n\t\tshareUsageGB := statistics.ShareUsageBytes/1024/1024/1024\n\t\tfmt.Printf(\"Current share usage: %d GB\\n\", shareUsageGB)\n\n\t\tshareURL.SetQuota(ctx, 10+shareUsageGB)\n\n\t\tproperties, err := shareURL.GetProperties(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfmt.Printf(\"Updated share usage: %d GB\\n\", properties.Quota())\n\t}\n\n\t_, err = shareURL.Delete(ctx, azfile.DeleteSnapshotsOptionNone)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Output:\n\t// Current share usage: 0 GB\n\t// Updated share usage: 10 GB\n}", "func New(config config.Config) (RateLimiter, error) {\n\n\tstorage, err := resolveBucketStore(config.Storage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlimits := []limit.Limit{}\n\tfor name, config := range config.Limits {\n\t\tlimit, err := limit.New(name, config, storage)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlimits = append(limits, limit)\n\t}\n\n\trateLimiter := &rateLimiter{limits: limits}\n\treturn rateLimiter, nil\n}", "func (self *QuotaSetRespV2) ToQuotaSet() nova.QuotaSet {\n\treturn nova.QuotaSet{\n\t\tCores: self.Cores,\n\t\tFixedIps: self.FixedIps,\n\t\tFloatingIps: self.FloatingIps,\n\t\tInstances: self.Instances,\n\t\tKeyPairs: self.KeyPairs,\n\t\tRam: self.Ram,\n\t\tSecurityGroups: self.SecurityGroups,\n\t}\n}", "func New(target string, opts ...grpc.DialOption) (*Client, error) {\n\tconn, err := grpc.Dial(target, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Client{conn, quotaservice.NewQuotaServiceClient(conn)}, nil\n}", "func New(rate int, opts ...Option) Limiter {\r\n\treturn newAtomicBased(rate, opts...)\r\n}", "func (s *QuotaService) Set(ctx context.Context, item quota.QuotaItem, inUse int) error {\n\tq, err := s.Storage.Get(ctx, item.GetName())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif inUse < 0 {\n\t\treturn quota.ErrLessThanZero\n\t}\n\tif !q.IsUnlimited() && inUse > q.Limit {\n\t\treturn &quota.QuotaExceededError{\n\t\t\tRequested: uint(inUse),\n\t\t\tAvailable: uint(q.Limit),\n\t\t}\n\t}\n\treturn s.Storage.Set(ctx, item.GetName(), inUse)\n}", "func (in *ResourceQuota) DeepCopy() *ResourceQuota {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceQuota)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func reportQuota(c context.Context, payload proto.Message) error {\n\ttask, ok := payload.(*tasks.ReportQuota)\n\tswitch {\n\tcase !ok:\n\t\treturn errors.Reason(\"unexpected payload type %T\", payload).Err()\n\tcase task.GetId() == \"\":\n\t\treturn errors.Reason(\"ID is required\").Err()\n\t}\n\tp := &model.Project{\n\t\tID: task.Id,\n\t}\n\tif err := datastore.Get(c, p); err != nil {\n\t\treturn errors.Annotate(err, \"failed to fetch project\").Err()\n\t}\n\tmets := stringset.NewFromSlice(p.Config.Metric...)\n\tregs := stringset.NewFromSlice(p.Config.Region...)\n\trsp, err := getCompute(c).Regions.List(p.Config.Project).Context(c).Do()\n\tif err != nil {\n\t\tif gerr, ok := err.(*googleapi.Error); ok {\n\t\t\tlogErrors(c, task.Id, gerr)\n\t\t}\n\t\treturn errors.Annotate(err, \"failed to fetch quota\").Err()\n\t}\n\tfor _, r := range rsp.Items {\n\t\tif regs.Has(r.Name) {\n\t\t\tfor _, q := range r.Quotas {\n\t\t\t\tif mets.Has(q.Metric) {\n\t\t\t\t\tmetrics.UpdateQuota(c, q.Limit, q.Usage, q.Metric, p.Config.Project, r.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (m *ItemQuotaRequestBuilder) Get(ctx context.Context, requestConfiguration *ItemQuotaRequestBuilderGetRequestConfiguration)(ie98116770ca9f5eee835504331ccb9976e822c2f776cca356ee95c843b4cce86.ConnectionQuotaable, error) {\n requestInfo, err := m.ToGetRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ie98116770ca9f5eee835504331ccb9976e822c2f776cca356ee95c843b4cce86.CreateConnectionQuotaFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ie98116770ca9f5eee835504331ccb9976e822c2f776cca356ee95c843b4cce86.ConnectionQuotaable), nil\n}", "func (d *dir) setupInitialQuota(vol Volume) (revert.Hook, error) {\n\tif vol.IsVMBlock() {\n\t\treturn nil, nil\n\t}\n\n\tvolPath := vol.MountPath()\n\n\t// Get the volume ID for the new volume, which is used to set project quota.\n\tvolID, err := d.getVolID(vol.volType, vol.name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t// Define a function to revert the quota being setup.\n\trevertFunc := func() { _ = d.deleteQuota(volPath, volID) }\n\trevert.Add(revertFunc)\n\n\t// Initialise the volume's project using the volume ID and set the quota.\n\tsizeBytes, err := units.ParseByteSizeString(vol.ConfigSize())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.setQuota(volPath, volID, sizeBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Success()\n\treturn revertFunc, nil\n}", "func New(qps int64) *limiter {\n\tif qps <= 0 {\n\t\treturn nil\n\t}\n\n\trl := &limiter{\n\t\tqps: qps,\n\t}\n\trl.current = make(map[string]int64, 0)\n\n\t// launch a goroutine to reset the counter every second\n\tgo rl.reset()\n\n\treturn rl\n}", "func NewQuotaListPage(getNextPage func(context.Context, QuotaList) (QuotaList, error)) QuotaListPage {\n\treturn QuotaListPage{fn: getNextPage}\n}", "func (client AccountQuotaPolicyClient) CreateOrUpdateSender(req *http.Request) (future AccountQuotaPolicyCreateOrUpdateFuture, err error) {\n var resp *http.Response\n resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))\n if err != nil {\n return\n }\n future.Future, err = azure.NewFutureFromResponse(resp)\n return\n }", "func newQueue(size int) metricQueue {\n\treturn metricQueue{elt: make([]MetricElt, size), start: 0, currentSize: 0, size: size}\n}", "func newClusterResourceQuotas(c *Client) *clusterResourceQuotas {\n\treturn &clusterResourceQuotas{\n\t\tr: c,\n\t}\n}", "func (m *Manager) NewRateLimit(conf ratelimit.Config) (ratelimit.V1, error) {\n\treturn bundle.AllRateLimits.Init(conf, m)\n}", "func newRateLimiter() *rateLimiter {\n\tdecayFactor := 9.0 / 8.0\n\treturn &rateLimiter{\n\t\tstats: info.RateLimiterStats{\n\t\t\tTargetRate: 1,\n\t\t},\n\t\tdecayPeriod: 5 * time.Second,\n\t\tdecayFactor: decayFactor,\n\t\texit: make(chan struct{}),\n\t}\n}", "func (*QuotaMetric) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_quota_quota_proto_rawDescGZIP(), []int{0}\n}", "func NewHDD() *pb.Storage {\r\n\tmemTB := randomInt(1, 6)\r\n\r\n\thdd := &pb.Storage{\r\n\t\tDriver: pb.Storage_HDD,\r\n\t\tMemory: &pb.Memory{\r\n\t\t\tValue: uint64(memTB),\r\n\t\t\tUnit: pb.Memory_TERABYTE,\r\n\t\t},\r\n\t}\r\n\r\n\treturn hdd\r\n}", "func newRateLimiter(delay time.Duration) *rateLimiter {\n\treturn &rateLimiter{\n\t\tdelay: delay,\n\t\tops: make(map[string]time.Time),\n\t}\n}", "func (i *IpScheduler) startGetQuota(ctx context.Context) {\n\ti.setQuota()\n\tticker := time.NewTicker(30 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\ti.setQuota()\n\t\tcase <-ctx.Done():\n\t\t\tblog.Warnf(\"get quota loop end\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (ca *CreateAction) createQuotaToStore() error {\n\tcreateTime := time.Now().Format(time.RFC3339)\n\tca.quota.ClusterName = ca.allocatedCluster\n\tnewQuota := &cmproto.ResourceQuota{\n\t\tNamespace: ca.req.Namespace,\n\t\tFederationClusterID: ca.req.FederationClusterID,\n\t\tClusterID: ca.allocatedCluster,\n\t\tRegion: ca.req.Region,\n\t\tResourceQuota: ca.req.ResourceQuota,\n\t\tCreateTime: createTime,\n\t\tUpdateTime: createTime,\n\t}\n\tif err := ca.model.CreateQuota(ca.ctx, newQuota); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (q *Control) SetQuota(targetPath string, quota Quota) error {\n\treturn ErrQuotaNotSupported\n}", "func newNGram(n int) *nGram {\n\tngram := new(nGram)\n\tngram.nValue = n\n\treturn ngram\n}", "func (in *PoolQuota) DeepCopy() *PoolQuota {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PoolQuota)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
[ "0.65636003", "0.6540336", "0.64748436", "0.63845986", "0.63766193", "0.6278411", "0.6246568", "0.6214396", "0.6117383", "0.60835624", "0.60697496", "0.60086185", "0.5968151", "0.5924283", "0.590642", "0.58879817", "0.58335894", "0.5798567", "0.57906127", "0.5765551", "0.5741168", "0.5736799", "0.57336736", "0.5708075", "0.5693048", "0.56813186", "0.567721", "0.56383324", "0.56216735", "0.56161636", "0.56052935", "0.56001145", "0.558218", "0.55751824", "0.5557265", "0.5556519", "0.5531916", "0.5525171", "0.55202", "0.55185145", "0.5513193", "0.5484314", "0.5474666", "0.54588515", "0.5436738", "0.54015946", "0.53898525", "0.53830665", "0.5371655", "0.536557", "0.53622556", "0.5347724", "0.5340616", "0.53238934", "0.53207856", "0.5298706", "0.52985734", "0.5288021", "0.5284005", "0.52755666", "0.5273968", "0.52592164", "0.52514267", "0.5242503", "0.52369946", "0.52334803", "0.5227401", "0.52218086", "0.521287", "0.5182934", "0.5180556", "0.5170672", "0.5168308", "0.5160642", "0.5154598", "0.51361346", "0.51286477", "0.5125424", "0.5123427", "0.51095253", "0.5105141", "0.5104127", "0.51038563", "0.51011044", "0.5094543", "0.5087118", "0.5079894", "0.5067787", "0.5066487", "0.50629133", "0.50587773", "0.5056049", "0.50546485", "0.50540805", "0.5053207", "0.5050775", "0.50489247", "0.50443137", "0.50369287", "0.5034585" ]
0.8754749
0
equalFunc returns a function that tests the equality of two values. It fails if there is a type mismatch.
func equalFunc(t *testing.T) func(got, want interface{}) { return func(a, b interface{}) { t.Helper() if !reflect.DeepEqual(a, b) { t.Fatalf("mismatch: got %v, wanted %v", a, b) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func EqFn(a interface{}, b interface{}) bool {\n\treturn a == b\n}", "func (f *Function) Equal(other Type) bool {\n\totherFn, ok := other.(*Function)\n\tif !ok {\n\t\treturn false\n\t}\n\tif !f.Ret.Equal(otherFn.Ret) {\n\t\treturn false\n\t}\n\tif len(f.Params) != len(otherFn.Params) {\n\t\treturn false\n\t}\n\tfor i, arg := range f.Params {\n\t\tif !arg.Equal(otherFn.Params[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func funcEqual(a, b interface{}) bool {\n\tav := reflect.ValueOf(&a).Elem()\n\tbv := reflect.ValueOf(&b).Elem()\n\treturn av.InterfaceData() == bv.InterfaceData()\n}", "func (sf *ScalarFunction) Equal(ctx sctx.Context, e Expression) bool {\n\tfun, ok := e.(*ScalarFunction)\n\tif !ok {\n\t\treturn false\n\t}\n\tif sf.FuncName.L != fun.FuncName.L {\n\t\treturn false\n\t}\n\treturn sf.Function.equal(fun.Function)\n}", "func Eq(a, b interface{}, f Func) bool {\n\treturn f(a, b) == 0\n}", "func TestFunctionsEqual(t *testing.T) {\n\tcases := []struct {\n\t\tf, g interface{}\n\t}{\n\t\t{Interval, func(s, n uint64) (uint64, uint64) { return s, s + n }},\n\t\t{Butterfly, func(x0, x1 float64) (float64, float64) { return x0 + x1, x0 - x1 }},\n\t\t{Septuple, func(b byte) [7]byte { return [...]byte{b, b, b, b, b, b, b} }},\n\t\t{CriticalLine, func(t float64) complex128 { return complex(0.5, t) }},\n\t\t{NewStruct, func(w uint16, p [2]float64, q uint64) Struct { return Struct{Word: w, Point: p, Quad: q} }},\n\t}\n\tfor _, c := range cases {\n\t\tif err := quick.CheckEqual(c.f, c.g, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func (t *FunctionType) Equal(other Type) bool {\n\tif other, ok := other.(*FunctionType); ok {\n\t\tif !t.RetType.Equal(other.RetType) {\n\t\t\treturn false\n\t\t}\n\t\tif len(t.ArgTypes) != len(other.ArgTypes) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range t.ArgTypes {\n\t\t\tif !t.ArgTypes[i].Equal(other.ArgTypes[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func (f *Function) Equal(other *Function) bool {\n\treturn f.fptr == other.fptr\n}", "func funcsEqual(f1, f2 any) bool {\n\tval1 := reflect.ValueOf(f1)\n\tval2 := reflect.ValueOf(f2)\n\treturn val1.Pointer() == val2.Pointer()\n}", "func Equal(t string) MatchFunc {\n\treturn func(s string) bool { return s == t }\n}", "func TestFunctionsEqual(t *testing.T) {\n\tcases := []struct {\n\t\tf, g interface{}\n\t}{\n\t\t{Second, func(x, y int32) int32 { return y }},\n\t\t{StringLen, func(s string) int { return len(s) }},\n\t\t{SliceLen, func(s []int) int { return len(s) }},\n\t\t{SliceCap, func(s []int) int { return cap(s) }},\n\t\t{ArrayThree, func(a [7]uint64) uint64 { return a[3] }},\n\t\t{FieldByte, func(s Struct) byte { return s.Byte }},\n\t\t{FieldInt8, func(s Struct) int8 { return s.Int8 }},\n\t\t{FieldUint16, func(s Struct) uint16 { return s.Uint16 }},\n\t\t{FieldInt32, func(s Struct) int32 { return s.Int32 }},\n\t\t{FieldUint64, func(s Struct) uint64 { return s.Uint64 }},\n\t\t{FieldFloat32, func(s Struct) float32 { return s.Float32 }},\n\t\t{FieldFloat64, func(s Struct) float64 { return s.Float64 }},\n\t\t{FieldStringLen, func(s Struct) int { return len(s.String) }},\n\t\t{FieldSliceCap, func(s Struct) int { return cap(s.Slice) }},\n\t\t{FieldArrayTwoBTwo, func(s Struct) byte { return s.Array[2].B[2] }},\n\t\t{FieldArrayOneC, func(s Struct) uint16 { return s.Array[1].C }},\n\t\t{FieldComplex64Imag, func(s Struct) float32 { return imag(s.Complex64) }},\n\t\t{FieldComplex128Real, func(s Struct) float64 { return real(s.Complex128) }},\n\t}\n\tfor _, c := range cases {\n\t\tif err := quick.CheckEqual(c.f, c.g, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func EqualFuncFor[T any]() func(a, b T) bool {\n\tvar t T\n\tvar i any = t\n\tswitch i.(type) {\n\tcase Equaler[T]:\n\t\treturn equalEqualer[T]\n\tcase Samer:\n\t\treturn equalSamer[T]\n\tcase nil:\n\t\treturn Equal[T]\n\t}\n\tif func() (comp bool) {\n\t\tdefer recover() //nolint:errcheck\n\t\t_ = map[any]struct{}{i: {}}\n\t\treturn true\n\t}() {\n\t\treturn equalComparable[T]\n\t}\n\treturn Equal[T]\n}", "func equal(lhsV, rhsV reflect.Value) bool {\n\tlhsNotValid, rhsVNotValid := !lhsV.IsValid(), !rhsV.IsValid()\n\tif lhsNotValid && rhsVNotValid {\n\t\treturn true\n\t}\n\tif (!lhsNotValid && rhsVNotValid) || (lhsNotValid && !rhsVNotValid) {\n\t\treturn false\n\t}\n\n\tlhsIsNil, rhsIsNil := isNil(lhsV), isNil(rhsV)\n\tif lhsIsNil && rhsIsNil {\n\t\treturn true\n\t}\n\tif (!lhsIsNil && rhsIsNil) || (lhsIsNil && !rhsIsNil) {\n\t\treturn false\n\t}\n\tif lhsV.Kind() == reflect.Interface || lhsV.Kind() == reflect.Ptr {\n\t\tlhsV = lhsV.Elem()\n\t}\n\tif rhsV.Kind() == reflect.Interface || rhsV.Kind() == reflect.Ptr {\n\t\trhsV = rhsV.Elem()\n\t}\n\n\t// Compare a string and a number.\n\t// This will attempt to convert the string to a number,\n\t// while leaving the other side alone. Code further\n\t// down takes care of converting ints and floats as needed.\n\tif isNum(lhsV) && rhsV.Kind() == reflect.String {\n\t\trhsF, err := tryToFloat64(rhsV)\n\t\tif err != nil {\n\t\t\t// Couldn't convert RHS to a float, they can't be compared.\n\t\t\treturn false\n\t\t}\n\t\trhsV = reflect.ValueOf(rhsF)\n\t} else if lhsV.Kind() == reflect.String && isNum(rhsV) {\n\t\t// If the LHS is a string formatted as an int, try that before trying float\n\t\tlhsI, err := tryToInt64(lhsV)\n\t\tif err != nil {\n\t\t\t// if LHS is a float, e.g. \"1.2\", we need to set lhsV to a float64\n\t\t\tlhsF, err := tryToFloat64(lhsV)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tlhsV = reflect.ValueOf(lhsF)\n\t\t} else {\n\t\t\tlhsV = reflect.ValueOf(lhsI)\n\t\t}\n\t}\n\n\tif isNum(lhsV) && isNum(rhsV) {\n\t\treturn fmt.Sprintf(\"%v\", lhsV) == fmt.Sprintf(\"%v\", rhsV)\n\t}\n\n\t// Try to compare bools to strings and numbers\n\tif lhsV.Kind() == reflect.Bool || rhsV.Kind() == reflect.Bool {\n\t\tlhsB, err := tryToBool(lhsV)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\trhsB, err := tryToBool(rhsV)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn lhsB == rhsB\n\t}\n\n\tif lhsV.CanInterface() && rhsV.CanInterface() {\n\t\treturn reflect.DeepEqual(lhsV.Interface(), rhsV.Interface())\n\t}\n\treturn reflect.DeepEqual(lhsV, rhsV)\n}", "func requireEqual(expected interface{}) require.ValueAssertionFunc {\n\treturn func(t require.TestingT, actual interface{}, msgAndArgs ...interface{}) {\n\t\trequire.Equal(t, expected, actual, msgAndArgs...)\n\t}\n}", "func (q1 Quat) ApproxEqualFunc(q2 Quat, f func(float32, float32) bool) bool {\n\treturn f(q1.W, q2.W) && q1.V.ApproxFuncEqual(q2.V, f)\n}", "func Equalf(t *testing.T, exp, act interface{}, msg string, v ...interface{}) {\n\tif diff := diff(exp, act); diff != \"\" {\n\t\tt.Fatalf(msg+\": %v\", append(v, diff)...)\n\t}\n}", "func Equal[T any](m1 Maybe[T], m2 Maybe[T], equalFunc func(T, T) bool) bool {\n\tif m1.IsNothing() {\n\t\treturn m2.IsNothing()\n\t}\n\n\tif m2.IsNothing() {\n\t\treturn false\n\t}\n\treturn equalFunc(m1.Value(), m2.Value())\n}", "func (tc *TestConfig) Equal(source interface{}, target interface{}) bool {\n\tvar a, b float64\n\tswitch source.(type) {\n\tcase int:\n\t\ta = float64(source.(int))\n\tcase float64:\n\t\ta = float64(source.(float64))\n\tdefault:\n\t\treturn false\n\t}\n\n\tswitch target.(type) {\n\tcase int:\n\t\tb = float64(target.(int))\n\tcase float64:\n\t\tb = float64(target.(float64))\n\tdefault:\n\t\treturn false\n\t}\n\n\treturn (a == b)\n}", "func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {\n\tif h, ok := t.(tHelper); ok {\n\t\th.Helper()\n\t}\n\treturn Equal(t, expected, actual, append([]interface{}{msg}, args...)...)\n}", "func (vn *VecN) ApproxEqualFunc(vn2 *VecN, comp func(float64, float64) bool) bool {\n\tif vn == nil || vn2 == nil || len(vn.vec) != len(vn2.vec) {\n\t\treturn false\n\t}\n\n\tfor i, el := range vn.vec {\n\t\tif !comp(el, vn2.vec[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func eq(o1, o2 interface{}) bool {\n\n\tf1, ok1 := ToFloat(o1)\n\tf2, ok2 := ToFloat(o2)\n\tif ok1 && ok2 {\n\t\treturn f1 == f2\n\t}\n\n\tb1, ok1 := ToBool(o1)\n\tb2, ok1 := ToBool(o2)\n\tif ok1 && ok2 {\n\t\treturn b1 == b2\n\t}\n\n\treturn o1 == o2\n}", "func (e *Expr) Equal(f *Expr) bool {\n\tif e.Kind == ExprError {\n\t\treturn false\n\t}\n\tif e.Kind != f.Kind {\n\t\treturn false\n\t}\n\tswitch e.Kind {\n\tdefault:\n\t\tpanic(\"error\")\n\tcase ExprIdent:\n\t\treturn e.Ident == f.Ident\n\tcase ExprBinop:\n\t\treturn e.Left.Equal(f.Left) && e.Right.Equal(f.Right)\n\tcase ExprUnop:\n\t\treturn e.Left.Equal(f.Left)\n\tcase ExprLit:\n\t\treturn e.Type.Equal(f.Type) && values.Equal(e.Val, f.Val)\n\tcase ExprAscribe:\n\t\treturn e.Left.Equal(f.Left) && e.Type.Equal(f.Type)\n\tcase ExprBlock:\n\t\tif len(e.Decls) != len(f.Decls) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range e.Decls {\n\t\t\tif !e.Decls[i].Equal(f.Decls[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn e.Left.Equal(f.Left)\n\tcase ExprFunc:\n\t\tif len(e.Args) != len(f.Args) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range e.Args {\n\t\t\tif !e.Args[i].Equal(f.Args[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn e.Left.Equal(f.Left)\n\tcase ExprList:\n\t\tif len(e.List) != len(f.List) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range e.List {\n\t\t\tif !e.List[i].Equal(f.List[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase ExprTuple:\n\t\tif len(e.Fields) != len(f.Fields) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range e.Fields {\n\t\t\tif !e.Fields[i].Expr.Equal(f.Fields[i].Expr) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase ExprStruct:\n\t\tif len(e.Fields) != len(f.Fields) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range e.Fields {\n\t\t\tif !e.Fields[i].Equal(f.Fields[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase ExprMap:\n\t\tif len(e.Map) != len(f.Map) {\n\t\t\treturn false\n\t\t}\n\t\t// TODO(marius: This is really ugly (and quadratic!);\n\t\t// it suggests we should store map literals differently.\n\t\tfor ek, ev := range e.Map {\n\t\t\tvar fk, fv *Expr\n\t\t\tfor k, v := range f.Map {\n\t\t\t\tif ek.Equal(k) {\n\t\t\t\t\tfk, fv = k, v\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif fk == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !ev.Equal(fv) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase ExprVariant:\n\t\tswitch {\n\t\tcase e.Ident != f.Ident:\n\t\t\treturn false\n\t\tcase e.Left == nil && f.Left == nil:\n\t\t\treturn true\n\t\tcase e.Left == nil || f.Left == nil:\n\t\t\t// One is nil, but the other is not, so they're not equal.\n\t\t\treturn false\n\t\tdefault:\n\t\t\treturn e.Left.Equal(f.Left)\n\t\t}\n\tcase ExprExec:\n\t\tif len(e.Decls) != len(f.Decls) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range e.Decls {\n\t\t\tif !e.Decls[i].Equal(f.Decls[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tif !e.Type.Equal(f.Type) {\n\t\t\treturn false\n\t\t}\n\t\treturn e.Template == f.Template\n\tcase ExprCond:\n\t\treturn e.Cond.Equal(f.Cond) && e.Left.Equal(f.Left) && e.Right.Equal(f.Right)\n\tcase ExprSwitch:\n\t\tif !e.Left.Equal(f.Left) {\n\t\t\treturn false\n\t\t}\n\t\tif len(e.CaseClauses) != len(f.CaseClauses) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range e.CaseClauses {\n\t\t\tif !e.CaseClauses[i].Equal(f.CaseClauses[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n}", "func equals(t types.Type, x, y value) bool {\n\tswitch x := x.(type) {\n\tcase bool:\n\t\treturn x == y.(bool)\n\tcase int:\n\t\treturn x == y.(int)\n\tcase int8:\n\t\treturn x == y.(int8)\n\tcase int16:\n\t\treturn x == y.(int16)\n\tcase int32:\n\t\treturn x == y.(int32)\n\tcase int64:\n\t\treturn x == y.(int64)\n\tcase uint:\n\t\treturn x == y.(uint)\n\tcase uint8:\n\t\treturn x == y.(uint8)\n\tcase uint16:\n\t\treturn x == y.(uint16)\n\tcase uint32:\n\t\treturn x == y.(uint32)\n\tcase uint64:\n\t\treturn x == y.(uint64)\n\tcase uintptr:\n\t\treturn x == y.(uintptr)\n\tcase float32:\n\t\treturn x == y.(float32)\n\tcase float64:\n\t\treturn x == y.(float64)\n\tcase complex64:\n\t\treturn x == y.(complex64)\n\tcase complex128:\n\t\treturn x == y.(complex128)\n\tcase string:\n\t\treturn x == y.(string)\n\tcase *value:\n\t\treturn x == y.(*value)\n\tcase chan value:\n\t\treturn x == y.(chan value)\n\tcase structure:\n\t\treturn x.eq(t, y)\n\tcase array:\n\t\treturn x.eq(t, y)\n\tcase iface:\n\t\treturn x.eq(t, y)\n\tcase rtype:\n\t\treturn x.eq(t, y)\n\t}\n\n\t// Since map, func and slice don't support comparison, this\n\t// case is only reachable if one of x or y is literally nil\n\t// (handled in eqnil) or via interface{} values.\n\tpanic(fmt.Sprintf(\"comparing uncomparable type %s\", t))\n}", "func NewBinaryEqualsFunc(key Key, values ...string) (Function, error) {\n\tsset := set.CreateStringSet(values...)\n\tif err := validateBinaryEqualsValues(binaryEquals, key, sset); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &binaryEqualsFunc{key, sset}, nil\n}", "func (ft *FieldType) Equal(other *FieldType) bool {\n\t// We do not need to compare whole `ft.flag == other.flag` when wrapping cast upon an Expression.\n\t// but need compare unsigned_flag of ft.flag.\n\t// When tp is float or double with decimal unspecified, do not check whether flen is equal,\n\t// because flen for them is useless.\n\t// The decimal field can be ignored if the type is int or string.\n\ttpEqual := (ft.GetType() == other.GetType()) || (ft.GetType() == mysql.TypeVarchar && other.GetType() == mysql.TypeVarString) || (ft.GetType() == mysql.TypeVarString && other.GetType() == mysql.TypeVarchar)\n\tflenEqual := ft.flen == other.flen || (ft.EvalType() == ETReal && ft.decimal == UnspecifiedLength)\n\tignoreDecimal := ft.EvalType() == ETInt || ft.EvalType() == ETString\n\tpartialEqual := tpEqual &&\n\t\t(ignoreDecimal || ft.decimal == other.decimal) &&\n\t\tft.charset == other.charset &&\n\t\tft.collate == other.collate &&\n\t\tflenEqual &&\n\t\tmysql.HasUnsignedFlag(ft.flag) == mysql.HasUnsignedFlag(other.flag)\n\tif !partialEqual || len(ft.elems) != len(other.elems) {\n\t\treturn false\n\t}\n\tfor i := range ft.elems {\n\t\tif ft.elems[i] != other.elems[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Equal(a, b interface{}) bool {\n\tif reflect.TypeOf(a) == reflect.TypeOf(b) {\n\t\treturn reflect.DeepEqual(a, b)\n\t}\n\tswitch a.(type) {\n\tcase int, int8, int16, int32, int64:\n\t\tswitch b.(type) {\n\t\tcase int, int8, int16, int32, int64:\n\t\t\treturn reflect.ValueOf(a).Int() == reflect.ValueOf(b).Int()\n\t\t}\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tswitch b.(type) {\n\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\treturn reflect.ValueOf(a).Uint() == reflect.ValueOf(b).Uint()\n\t\t}\n\tcase float32, float64:\n\t\tswitch b.(type) {\n\t\tcase float32, float64:\n\t\t\treturn reflect.ValueOf(a).Float() == reflect.ValueOf(b).Float()\n\t\t}\n\tcase string:\n\t\tswitch b.(type) {\n\t\tcase []byte:\n\t\t\treturn a.(string) == string(b.([]byte))\n\t\t}\n\tcase []byte:\n\t\tswitch b.(type) {\n\t\tcase string:\n\t\t\treturn b.(string) == string(a.([]byte))\n\t\t}\n\t}\n\treturn false\n}", "func newBinaryEqualsFunc(key Key, values ValueSet) (Function, error) {\n\tvalueStrings, err := valuesToStringSlice(binaryEquals, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewBinaryEqualsFunc(key, valueStrings...)\n}", "func equal(a, b float64) bool {\n\treturn math.Abs(a-b) <= equalityThreshold\n}", "func (f1 Feature) Equal(f2 Feature) bool {\n\tequal := C.OGR_F_Equal(f1.cval, f2.cval)\n\treturn equal != 0\n}", "func Equal(lhs, rhs Expression) Expression {\n\treturn NewCall(\"equal\", []Expression{lhs, rhs}, nil)\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase int:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase int64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float32:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase string, byte:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *T) Equalf(have, want interface{}, spec string, args ...interface{}) {\n\tt.EqualWithIgnoresf(have, want, nil, spec, args...)\n}", "func FEQUAL(x float64, y float64) float64 {\n\tif x == y {\n\t\treturn x\n\t} else {\n\t\treturn math.NaN()\n\t}\n}", "func IsEqual(t *testing.T, val1, val2 interface{}) bool {\n\tv1 := reflect.ValueOf(val1)\n\tv2 := reflect.ValueOf(val2)\n\n\tif v1.Kind() == reflect.Ptr {\n\t\tv1 = v1.Elem()\n\t}\n\n\tif v2.Kind() == reflect.Ptr {\n\t\tv2 = v2.Elem()\n\t}\n\n\tif !v1.IsValid() && !v2.IsValid() {\n\t\treturn true\n\t}\n\n\tswitch v1.Kind() {\n\tcase reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:\n\t\tif v1.IsNil() {\n\t\t\tv1 = reflect.ValueOf(nil)\n\t\t}\n\t}\n\n\tswitch v2.Kind() {\n\tcase reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:\n\t\tif v2.IsNil() {\n\t\t\tv2 = reflect.ValueOf(nil)\n\t\t}\n\t}\n\n\tv1Underlying := reflect.Zero(reflect.TypeOf(v1)).Interface()\n\tv2Underlying := reflect.Zero(reflect.TypeOf(v2)).Interface()\n\n\tif v1 == v1Underlying {\n\t\tif v2 == v2Underlying {\n\t\t\tgoto CASE4\n\t\t} else {\n\t\t\tgoto CASE3\n\t\t}\n\t} else {\n\t\tif v2 == v2Underlying {\n\t\t\tgoto CASE2\n\t\t} else {\n\t\t\tgoto CASE1\n\t\t}\n\t}\n\nCASE1:\n\t// fmt.Println(\"CASE 1\")\n\treturn reflect.DeepEqual(v1.Interface(), v2.Interface())\nCASE2:\n\t// fmt.Println(\"CASE 2\")\n\treturn reflect.DeepEqual(v1.Interface(), v2)\nCASE3:\n\t// fmt.Println(\"CASE 3\")\n\treturn reflect.DeepEqual(v1, v2.Interface())\nCASE4:\n\t// fmt.Println(\"CASE 4\")\n\treturn reflect.DeepEqual(v1, v2)\n}", "func Equal(a, b uint64) bool {\n\treturn a == b\n}", "func AlgMemequals() {\n\tf := func(){\n\t\tfmt.Println(\"ok\")\n\t}\n\t///usr/local/go/src/runtime/alg.go -->func efaceeq(t *_type, x, y unsafe.Pointer) bool { ...... }\n\t//runtime.f64equal\n\tc(T{1.0, f}, T{2.0, f})\n\tc(T{f, 1}, T{f, 2}) //runtime error: comparing uncomparable type func()\n\n\t//runtime.memequal64\n\tc(T{1, f}, T{2, f})\n\tc(T{f, 1}, T{f, 2}) //runtime error: comparing uncomparable type func()\n}", "func isFunc(a, b string) bool {\n\treturn a == b\n}", "func Equal(t t, want interface{}, have interface{}) {\n\tequal(t, want, have)\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func interfacesEqual(a, b interface{}, strict bool) (bool, error) {\n\tif a == nil && b == nil {\n\t\treturn true, nil\n\t} else if a == nil || b == nil {\n\t\treturn false, nil\n\t}\n\tswitch at := a.(type) {\n\tcase bool:\n\t\tif bt, ok := b.(bool); ok {\n\t\t\treturn at == bt, nil\n\t\t}\n\t\treturn false, newMismatchError(\"types \" + reflect.TypeOf(a).Name() + \" and \" + reflect.TypeOf(b).Name())\n\tcase string:\n\t\tif bt, ok := b.(string); ok {\n\t\t\treturn at == bt, nil\n\t\t}\n\t\treturn false, newMismatchError(\"types \" + reflect.TypeOf(a).Name() + \" and \" + reflect.TypeOf(b).Name())\n\tcase int:\n\t\tif bt, ok := b.(int); ok {\n\t\t\treturn at == bt, nil\n\t\t}\n\t\tif !strict {\n\t\t\treturn numbersEqual(float64(at), b)\n\t\t}\n\t\treturn false, newMismatchError(\"types \" + reflect.TypeOf(a).Name() + \" and \" + reflect.TypeOf(b).Name())\n\tcase float32:\n\t\tif bt, ok := b.(float32); ok {\n\t\t\treturn float64Equal(float64(at), float64(bt)), nil\n\t\t}\n\t\tif !strict {\n\t\t\treturn numbersEqual(float64(at), b)\n\t\t}\n\t\treturn false, newMismatchError(\"types \" + reflect.TypeOf(a).Name() + \" and \" + reflect.TypeOf(b).Name())\n\tcase float64:\n\t\tif bt, ok := b.(float64); ok {\n\t\t\treturn float64Equal(at, bt), nil\n\t\t}\n\t\tif !strict {\n\t\t\treturn numbersEqual(at, b)\n\t\t}\n\t\treturn false, newMismatchError(\"types \" + reflect.TypeOf(a).Name() + \" and \" + reflect.TypeOf(b).Name())\n\tdefault:\n\t\treturn false, newUnhandledError(\"type \" + reflect.TypeOf(a).Name())\n\t}\n}", "func EqualsRefOfFuncExpr(a, b *FuncExpr) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn a.Distinct == b.Distinct &&\n\t\tEqualsTableIdent(a.Qualifier, b.Qualifier) &&\n\t\tEqualsColIdent(a.Name, b.Name) &&\n\t\tEqualsSelectExprs(a.Exprs, b.Exprs)\n}", "func f[T interface{comparable; []byte|string}](x T) {\n _ = x == x\n}", "func ExpectEqual(alert func(format string, args ...interface{}),\n\texpected interface{}, actual interface{}) bool {\n\texpectedValue, actualValue := reflect.ValueOf(expected), reflect.ValueOf(actual)\n\tequal := false\n\tswitch {\n\tcase expected == nil && actual == nil:\n\t\treturn true\n\tcase expected != nil && actual == nil:\n\t\tequal = expectedValue.IsNil()\n\tcase expected == nil && actual != nil:\n\t\tequal = actualValue.IsNil()\n\tdefault:\n\t\tif actualType := reflect.TypeOf(actual); actualType != nil {\n\t\t\tif expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {\n\t\t\t\tequal = reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)\n\t\t\t}\n\t\t}\n\t}\n\tif !equal {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\talert(\"%s:%d: missmatch, expect %v but %v\", file, line, expected, actual)\n\t\treturn false\n\t}\n\treturn true\n}", "func ExpectEqual(alert func(format string, args ...interface{}),\n\texpected interface{}, actual interface{}) bool {\n\texpectedValue, actualValue := reflect.ValueOf(expected), reflect.ValueOf(actual)\n\tequal := false\n\tswitch {\n\tcase expected == nil && actual == nil:\n\t\treturn true\n\tcase expected != nil && actual == nil:\n\t\tequal = expectedValue.IsNil()\n\tcase expected == nil && actual != nil:\n\t\tequal = actualValue.IsNil()\n\tdefault:\n\t\tif actualType := reflect.TypeOf(actual); actualType != nil {\n\t\t\tif expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {\n\t\t\t\tequal = reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)\n\t\t\t}\n\t\t}\n\t}\n\tif !equal {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\talert(\"%s:%d: missmatch, expect %v but %v\", file, line, expected, actual)\n\t\treturn false\n\t}\n\treturn true\n}", "func ExpectEqual(alert func(format string, args ...interface{}),\n\texpected interface{}, actual interface{}) bool {\n\texpectedValue, actualValue := reflect.ValueOf(expected), reflect.ValueOf(actual)\n\tequal := false\n\tswitch {\n\tcase expected == nil && actual == nil:\n\t\treturn true\n\tcase expected != nil && actual == nil:\n\t\tequal = expectedValue.IsNil()\n\tcase expected == nil && actual != nil:\n\t\tequal = actualValue.IsNil()\n\tdefault:\n\t\tif actualType := reflect.TypeOf(actual); actualType != nil {\n\t\t\tif expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {\n\t\t\t\tequal = reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)\n\t\t\t}\n\t\t}\n\t}\n\tif !equal {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\talert(\"%s:%d: missmatch, expect %v but %v\", file, line, expected, actual)\n\t\treturn false\n\t}\n\treturn true\n}", "func equal(t *testing.T, expected, actual interface{}) {\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Errorf(\"Expected %v (type %v) - Got %v (type %v)\", expected, reflect.TypeOf(expected), actual, reflect.TypeOf(actual))\n\t}\n}", "func IsEqual(val, wantVal interface{}) bool {\n\t// check is nil\n\tif val == nil || wantVal == nil {\n\t\treturn val == wantVal\n\t}\n\n\tsv := reflect.ValueOf(val)\n\twv := reflect.ValueOf(wantVal)\n\n\t// don't compare func, struct\n\tif sv.Kind() == reflect.Func || sv.Kind() == reflect.Struct {\n\t\treturn false\n\t}\n\tif wv.Kind() == reflect.Func || wv.Kind() == reflect.Struct {\n\t\treturn false\n\t}\n\n\t// compare basic type: bool, int(X), uint(X), string, float(X)\n\tequal, err := Eq(sv, wv)\n\n\t// is not an basic type, eg: slice, array, map ...\n\tif err != nil {\n\t\texpBt, ok := val.([]byte)\n\t\tif !ok {\n\t\t\treturn reflect.DeepEqual(val, wantVal)\n\t\t}\n\n\t\tactBt, ok := wantVal.([]byte)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif expBt == nil || actBt == nil {\n\t\t\treturn expBt == nil && actBt == nil\n\t\t}\n\n\t\treturn bytes.Equal(expBt, actBt)\n\t}\n\n\treturn equal\n}", "func equal(lhs, rhs semantic.Expression) semantic.Expression {\n\treturn &semantic.BinaryOp{Type: semantic.BoolType, LHS: lhs, Operator: ast.OpEQ, RHS: rhs}\n}", "func fnEquals(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) < 1 || len(params) > 2 {\n\t\tctx.Log().Error(\"error_type\", \"func_equals\", \"op\", \"equals\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to equals function\"), \"equals\", params})\n\t\treturn nil\n\t}\n\tpattern, err := NewJDocFromString(extractStringParam(params[0]))\n\tif err != nil && len(params) == 1 {\n\t\tctx.Log().Error(\"error_type\", \"func_equals\", \"op\", \"equals\", \"cause\", \"non_json_parameter\", \"params\", params, \"error\", err.Error())\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"non json parameters in call to equals function\"), \"equals\", params})\n\t\treturn nil\n\t}\n\tif len(params) == 2 {\n\t\tdoc, err = NewJDocFromString(extractStringParam(params[1]))\n\t\tif err != nil {\n\t\t\t// if not json, just do string comparison (only makes sense for the 2-param version, otherwise must be json)\n\t\t\treturn extractStringParam(params[0]) == extractStringParam(params[1])\n\t\t}\n\t}\n\treturn doc.Equals(pattern)\n}", "func TestCompareFloats(t *testing.T) {\n\ttests := []testCase{\n\t\t{\n\t\t\tname: \"floats are equal (1)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(0, defaultCollation),\n\t\t\tout: &T, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(18)},\n\t\t},\n\t\t{\n\t\t\tname: \"floats are equal (2)\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(3549.9),\n\t\t\tout: &T, op: &EqualOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"floats are not equal (1)\",\n\t\t\tv1: NewLiteralFloat(7858.016), v2: NewLiteralFloat(8943298.56),\n\t\t\tout: &F, op: &EqualOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"floats are not equal (2)\",\n\t\t\tv1: NewLiteralFloat(351049.65), v2: NewLiteralFloat(62508.99),\n\t\t\tout: &T, op: &NotEqualOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"floats are not equal (3)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &F, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(16516.84), sqltypes.NewFloat64(219541.01)},\n\t\t},\n\t\t{\n\t\t\tname: \"float is less than float\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(8072),\n\t\t\tout: &T, op: &LessThanOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"float is not less than float\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(21.564),\n\t\t\tout: &F, op: &LessThanOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"float is less-equal to float (1)\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(9863),\n\t\t\tout: &T, op: &LessEqualOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"float is less-equal to float (2)\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(3549.9),\n\t\t\tout: &T, op: &LessEqualOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"float is greater than float\",\n\t\t\tv1: NewLiteralFloat(9808.549), v2: NewLiteralFloat(9808.540),\n\t\t\tout: &T, op: &GreaterThanOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"float is not greater than float\",\n\t\t\tv1: NewLiteralFloat(549.02), v2: NewLiteralFloat(21579.64),\n\t\t\tout: &F, op: &GreaterThanOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"float is greater-equal to float (1)\",\n\t\t\tv1: NewLiteralFloat(987.30), v2: NewLiteralFloat(15.5),\n\t\t\tout: &T, op: &GreaterEqualOp{},\n\t\t},\n\t\t{\n\t\t\tname: \"float is greater-equal to float (2)\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(3549.9),\n\t\t\tout: &T, op: &GreaterEqualOp{},\n\t\t},\n\t}\n\n\tfor i, tcase := range tests {\n\t\tt.Run(fmt.Sprintf(\"%d %s\", i, tcase.name), func(t *testing.T) {\n\t\t\ttcase.run(t)\n\t\t})\n\t}\n}", "func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {\n\tif h, ok := t.(tHelper); ok {\n\t\th.Helper()\n\t}\n\treturn EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)\n}", "func IsEqual(val, wantVal any) bool {\n\t// check is nil\n\tif val == nil || wantVal == nil {\n\t\treturn val == wantVal\n\t}\n\n\tsv := reflect.ValueOf(val)\n\twv := reflect.ValueOf(wantVal)\n\n\t// don't compare func, struct\n\tif sv.Kind() == reflect.Func || sv.Kind() == reflect.Struct {\n\t\treturn false\n\t}\n\tif wv.Kind() == reflect.Func || wv.Kind() == reflect.Struct {\n\t\treturn false\n\t}\n\n\t// compare basic type: bool, int(X), uint(X), string, float(X)\n\tequal, err := eq(sv, wv)\n\n\t// is not a basic type, eg: slice, array, map ...\n\tif err != nil {\n\t\texpBt, ok := val.([]byte)\n\t\tif !ok {\n\t\t\treturn reflect.DeepEqual(val, wantVal)\n\t\t}\n\n\t\tactBt, ok := wantVal.([]byte)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif expBt == nil || actBt == nil {\n\t\t\treturn expBt == nil && actBt == nil\n\t\t}\n\n\t\treturn bytes.Equal(expBt, actBt)\n\t}\n\n\treturn equal\n}", "func EqualsRefOfValuesFuncExpr(a, b *ValuesFuncExpr) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsRefOfColName(a.Name, b.Name)\n}", "func Equal(q *query.Query, expected interface{}) Assertion {\n\treturn assertFunc(q, func(v interface{}) error {\n\t\tif reflect.DeepEqual(v, expected) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif t := reflect.TypeOf(v); t != reflect.TypeOf(expected) {\n\t\t\t// handle enumeration strings\n\t\t\tif s, ok := expected.(string); ok {\n\t\t\t\tif enum, ok := v.(interface {\n\t\t\t\t\tString() string\n\t\t\t\t\tEnumDescriptor() ([]byte, []int)\n\t\t\t\t}); ok {\n\t\t\t\t\tif enum.String() == s {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// try type conversion\n\t\t\tconverted, err := convert(expected, t)\n\t\t\tif err == nil {\n\t\t\t\tif reflect.DeepEqual(v, converted) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn errors.Errorf(fmt.Sprintf(\"%s: expected %T (%+v) but got %T (%+v)\", q.String(), expected, expected, v, v))\n\t\t}\n\n\t\treturn errors.Errorf(fmt.Sprintf(\"%s: expected %+v but got %+v\", q.String(), expected, v))\n\t})\n}", "func (f Float) Equal(other Float) bool {\n\treturn f.Valid == other.Valid && (!f.Valid || f.Float64 == other.Float64)\n}", "func Equal(t *testing.T, expected, actual interface{}) {\n\tt.Helper()\n\n\tif expected != actual {\n\t\tt.Errorf(`%s: expected \"%v\" actual \"%v\"`, t.Name(), expected, actual)\n\t}\n}", "func Equal(fieldPtr interface{}, value interface{}) Filter {\n\treturn &ComparisonFilter{\n\t\tLeft: fieldPtr,\n\t\tComparison: \"=\",\n\t\tRight: value,\n\t}\n}", "func TestCompareNumerics(t *testing.T) {\n\ttests := []testCase{\n\t\t{\n\t\t\tname: \"decimal and float are equal\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(189.6), sqltypes.NewDecimal(\"189.6\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and float with negative values are equal\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(-98.1839), sqltypes.NewDecimal(\"-98.1839\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and float with negative values are not equal (1)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &F, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(-98.9381), sqltypes.NewDecimal(\"-98.1839\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and float with negative values are not equal (2)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &NotEqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(-98.9381), sqltypes.NewDecimal(\"-98.1839\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and integer are equal (1)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewInt64(8979), sqltypes.NewDecimal(\"8979\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and integer are equal (2)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"8979.0000\"), sqltypes.NewInt64(8979)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and unsigned integer are equal (1)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewUint64(901), sqltypes.NewDecimal(\"901\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and unsigned integer are equal (2)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"901.00\"), sqltypes.NewUint64(901)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and unsigned integer are not equal (1)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &NotEqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"192.129\"), sqltypes.NewUint64(192)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and unsigned integer are not equal (2)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &F, op: &EqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"192.129\"), sqltypes.NewUint64(192)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater than integer\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &GreaterThanOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"1.01\"), sqltypes.NewInt64(1)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater-equal to integer\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &GreaterEqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"1.00\"), sqltypes.NewInt64(1)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is less than integer\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &LessThanOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\".99\"), sqltypes.NewInt64(1)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is less-equal to integer\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &LessEqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"1.00\"), sqltypes.NewInt64(1)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater than float\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &GreaterThanOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"849.896\"), sqltypes.NewFloat64(86.568)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is not greater than float\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &F, op: &GreaterThanOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"15.23\"), sqltypes.NewFloat64(8689.5)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater-equal to float (1)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &GreaterEqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"65\"), sqltypes.NewFloat64(65)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater-equal to float (2)\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &GreaterEqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"65\"), sqltypes.NewFloat64(60)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is less than float\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &LessThanOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"0.998\"), sqltypes.NewFloat64(0.999)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is less-equal to float\",\n\t\t\tv1: NewColumn(0, defaultCollation), v2: NewColumn(1, defaultCollation),\n\t\t\tout: &T, op: &LessEqualOp{},\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"1.000101\"), sqltypes.NewFloat64(1.00101)},\n\t\t},\n\t}\n\n\tfor i, tcase := range tests {\n\t\tt.Run(fmt.Sprintf(\"%d %s\", i, tcase.name), func(t *testing.T) {\n\t\t\ttcase.run(t)\n\t\t})\n\t}\n}", "func Equal(t *testing.T, expected, result interface{}) {\n\tif !reflect.DeepEqual(result, expected) {\n\t\tt.Errorf(\"should be %v instead of %v\", expected, result)\n\t}\n}", "func equalish(a, b, tolerance float64) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\n\tdiff := math.Abs(a - b)\n\n\tif diff <= tolerance {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Equal(typ string, raw1, raw2 []byte) (bool, error) {\n\treturn EqualApprox(typ, raw1, raw2, 0)\n}", "func Command_Eq(script *rex.Script, params []*rex.Value) {\n\tif len(params) != 2 {\n\t\trex.ErrorParamCount(\"float:eq\", \"2\")\n\t}\n\n\tresult := params[0].Float64() == params[1].Float64()\n\tif result {\n\t\tscript.RetVal = rex.NewValueBool(true)\n\t\treturn\n\t}\n\tscript.RetVal = rex.NewValueBool(false)\n}", "func equal(x, y float32, tol float64) bool {\n\tavg := (math.Abs(float64(x+y)) / 2.0)\n\tsErr := math.Abs(float64(x-y)) / (avg + 1)\n\tif sErr > tol {\n\t\treturn false\n\t}\n\treturn true\n}", "func TestCompareNumerics(t *testing.T) {\n\ttests := []testCase{\n\t\t{\n\t\t\tname: \"decimal and float are equal\",\n\t\t\tv1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(189.6), sqltypes.NewDecimal(\"189.6\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and float with negative values are equal\",\n\t\t\tv1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(-98.1839), sqltypes.NewDecimal(\"-98.1839\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and float with negative values are not equal (1)\",\n\t\t\tv1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID),\n\t\t\tout: &F, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(-98.9381), sqltypes.NewDecimal(\"-98.1839\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and float with negative values are not equal (2)\",\n\t\t\tv1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.NotEqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(-98.9381), sqltypes.NewDecimal(\"-98.1839\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and integer are equal (1)\",\n\t\t\tv1: NewColumn(0, sqltypes.Int64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewInt64(8979), sqltypes.NewDecimal(\"8979\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and integer are equal (2)\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"8979.0000\"), sqltypes.NewInt64(8979)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and unsigned integer are equal (1)\",\n\t\t\tv1: NewColumn(0, sqltypes.Uint64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewUint64(901), sqltypes.NewDecimal(\"901\")},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and unsigned integer are equal (2)\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Uint64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"901.00\"), sqltypes.NewUint64(901)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and unsigned integer are not equal (1)\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Uint64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.NotEqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"192.129\"), sqltypes.NewUint64(192)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal and unsigned integer are not equal (2)\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Uint64, collations.CollationBinaryID),\n\t\t\tout: &F, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"192.129\"), sqltypes.NewUint64(192)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater than integer\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.GreaterThanOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"1.01\"), sqltypes.NewInt64(1)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater-equal to integer\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.GreaterEqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"1.00\"), sqltypes.NewInt64(1)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is less than integer\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.LessThanOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\".99\"), sqltypes.NewInt64(1)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is less-equal to integer\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.LessEqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"1.00\"), sqltypes.NewInt64(1)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater than float\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.GreaterThanOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"849.896\"), sqltypes.NewFloat64(86.568)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is not greater than float\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID),\n\t\t\tout: &F, op: sqlparser.GreaterThanOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"15.23\"), sqltypes.NewFloat64(8689.5)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater-equal to float (1)\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.GreaterEqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"65\"), sqltypes.NewFloat64(65)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is greater-equal to float (2)\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.GreaterEqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"65\"), sqltypes.NewFloat64(60)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is less than float\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.LessThanOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"0.998\"), sqltypes.NewFloat64(0.999)},\n\t\t},\n\t\t{\n\t\t\tname: \"decimal is less-equal to float\",\n\t\t\tv1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.LessEqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewDecimal(\"1.000101\"), sqltypes.NewFloat64(1.00101)},\n\t\t},\n\t\t{\n\t\t\tname: \"different int types are equal for 8 bit\",\n\t\t\tv1: NewColumn(0, sqltypes.Int8, collations.CollationBinaryID), v2: NewLiteralInt(0),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewInt8(0)},\n\t\t},\n\t\t{\n\t\t\tname: \"different int types are equal for 32 bit\",\n\t\t\tv1: NewColumn(0, sqltypes.Int32, collations.CollationBinaryID), v2: NewLiteralInt(0),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewInt32(0)},\n\t\t},\n\t\t{\n\t\t\tname: \"different int types are equal for float32 bit\",\n\t\t\tv1: NewColumn(0, sqltypes.Float32, collations.CollationBinaryID), v2: NewLiteralFloat(1.0),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.MakeTrusted(sqltypes.Float32, []byte(\"1.0\"))},\n\t\t},\n\t\t{\n\t\t\tname: \"different unsigned int types are equal for 8 bit\",\n\t\t\tv1: NewColumn(0, sqltypes.Uint8, collations.CollationBinaryID), v2: NewLiteralInt(0),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.MakeTrusted(sqltypes.Uint8, []byte(\"0\"))},\n\t\t},\n\t\t{\n\t\t\tname: \"different unsigned int types are equal for 32 bit\",\n\t\t\tv1: NewColumn(0, sqltypes.Uint32, collations.CollationBinaryID), v2: NewLiteralInt(0),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewUint32(0)},\n\t\t},\n\t}\n\n\tfor i, tcase := range tests {\n\t\tt.Run(fmt.Sprintf(\"%d %s\", i, tcase.name), func(t *testing.T) {\n\t\t\ttcase.run(t)\n\t\t})\n\t}\n}", "func Eq(a, b types.Expr) types.Expr {\n\tif a == nil || b == nil {\n\t\tif a == b {\n\t\t\treturn types.Symbol(\"t\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tswitch a := a.(type) {\n\tcase types.Floating:\n\t\tif b, ok := b.(types.Floating); ok && math.Abs(float64(a-b)) < 0.00000001 {\n\t\t\treturn types.Symbol(\"t\")\n\t\t}\n\tcase types.Integer:\n\t\tif b, ok := b.(types.Integer); ok && a == b {\n\t\t\treturn types.Symbol(\"t\")\n\t\t}\n\tcase types.String:\n\t\tif b, ok := b.(types.String); ok && a == b {\n\t\t\treturn types.Symbol(\"t\")\n\t\t}\n\tcase types.Symbol:\n\t\tif b, ok := b.(types.Symbol); ok && a == b {\n\t\t\treturn types.Symbol(\"t\")\n\t\t}\n\tcase types.Cons:\n\t\tif b, ok := b.(types.Cons); ok {\n\t\t\treturn eqCons(a, b)\n\t\t}\n\tcase types.Vector:\n\t\tif b, ok := b.(types.Vector); ok {\n\t\t\treturn eqVector(a, b)\n\t\t}\n\t}\n\treturn nil\n}", "func Equal(t Testing, expected, actual interface{}, formatAndArgs ...interface{}) bool {\n\tif !AreEqualObjects(expected, actual) {\n\t\treturn Fail(t,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Expected values are NOT equal.%s\",\n\t\t\t\tdiffValues(expected, actual),\n\t\t\t),\n\t\t\tformatAndArgs...)\n\t}\n\n\treturn true\n}", "func Equal(n1, n2 External) bool {\n\tif n1 == nil && n2 == nil {\n\t\treturn true\n\t} else if n1 == nil || n2 == nil {\n\t\treturn false\n\t}\n\tswitch n1 := n1.(type) {\n\tcase String:\n\t\tn2, ok := n2.(String)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Int:\n\t\tn2, ok := n2.(Int)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Uint:\n\t\tn2, ok := n2.(Uint)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Bool:\n\t\tn2, ok := n2.(Bool)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Float:\n\t\tn2, ok := n2.(Float)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Object:\n\t\tif n2, ok := n2.(Object); ok {\n\t\t\tif len(n1) != len(n2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif pointerOf(n1) == pointerOf(n2) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn n1.EqualObject(n2)\n\t\t}\n\t\tif _, ok := n2.(Node); ok {\n\t\t\treturn false\n\t\t}\n\t\treturn n1.Equal(n2)\n\tcase Array:\n\t\tif n2, ok := n2.(Array); ok {\n\t\t\tif len(n1) != len(n2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn len(n1) == 0 || &n1[0] == &n2[0] || n1.EqualArray(n2)\n\t\t}\n\t\tif _, ok := n2.(Node); ok {\n\t\t\treturn false\n\t\t}\n\t\treturn n1.Equal(n2)\n\tdefault:\n\t\tif Same(n1, n2) {\n\t\t\treturn true\n\t\t}\n\t}\n\tif n, ok := n1.(Node); ok {\n\t\treturn n.Equal(n2)\n\t} else if n, ok = n2.(Node); ok {\n\t\treturn n.Equal(n1)\n\t}\n\treturn equalExt(n1, n2)\n}", "func Equal(dt, dt2 DataType) bool {\n\treturn Hash(dt, false, true, true) == Hash(dt2, false, true, true)\n}", "func (f Fixed) Equal(f0 Fixed) bool {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn false\n\t}\n\treturn f.Cmp(f0) == 0\n}", "func (f Fixed) Equal(f0 Fixed) bool {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn false\n\t}\n\treturn f.Cmp(f0) == 0\n}", "func Equal[T any](t testing.TB, expected, actual T, msgAndArgs ...interface{}) {\n\tif objectsAreEqual(expected, actual) {\n\t\treturn\n\t}\n\tt.Helper()\n\tmsg := formatMsgAndArgs(\"Expected values to be equal:\", msgAndArgs...)\n\tt.Fatalf(\"%s\\n%s\", msg, diff(expected, actual))\n}", "func Equal(object, expected interface{}) error {\n\t// shortcuts\n\tif expected == nil && object == nil {\n\t\treturn nil\n\t}\n\n\tif (expected == nil && object != nil) || (expected != nil && object == nil) {\n\t\treturn xerrors.New(stringJoin(\"\\n\", \"not equal\", actualExpectedDiff(object, expected)))\n\t}\n\n\t// we might be able to convert this\n\tcompareData := misc.MakeTypeCopy(expected)\n\n\terr := converter.Convert(object, &compareData)\n\tif err == nil {\n\t\tobject = compareData\n\t}\n\tif !cmp.Equal(expected, object) {\n\t\treturn xerrors.New(stringJoin(\"\\n\", \"not equal\", actualExpectedDiff(object, expected)))\n\t}\n\treturn nil\n}", "func equal(a, b float64) bool {\n\tif math.IsNaN(a) && math.IsNaN(b) {\n\t\treturn true\n\t}\n\tif !math.IsNaN(a) && !math.IsNaN(b) {\n\t\treturn math.Abs(a-b) < eps\n\t}\n\treturn false\n}", "func expectEqual(value, expected interface{}) {\n\tif value != expected {\n\t\tfmt.Printf(\"Fehler: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t} else {\n\t\tfmt.Printf(\"OK: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t}\n}", "func NewCompareFn(nullsMax bool, fields ...Evaluator) CompareFn {\n\tvar aBytesBuf []byte\n\tvar pair coerce.Pair\n\tcomparefns := make(map[zed.Type]comparefn)\n\treturn func(ra *zed.Value, rb *zed.Value) int {\n\t\tfor _, resolver := range fields {\n\t\t\t// XXX return errors?\n\t\t\ta, _ := resolver.Eval(ra)\n\t\t\tif len(a.Bytes) > 0 {\n\t\t\t\t// a.Bytes's backing array might belonging to\n\t\t\t\t// resolver.Eval, so copy it before calling\n\t\t\t\t// resolver.Eval again.\n\t\t\t\taBytesBuf = append(aBytesBuf[:0], a.Bytes...)\n\t\t\t\ta.Bytes = aBytesBuf\n\t\t\t}\n\t\t\tb, _ := resolver.Eval(rb)\n\t\t\tv := compareValues(a, b, comparefns, &pair, nullsMax)\n\t\t\t// If the events don't match, then return the sort\n\t\t\t// info. Otherwise, they match and we continue on\n\t\t\t// on in the loop to the secondary key, etc.\n\t\t\tif v != 0 {\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t\t// All the keys matched with equality.\n\t\treturn 0\n\t}\n}", "func equalFloat(x float64, y float64, limit float64) bool {\n\n\tif limit <= 0.0 {\n\t\tlimit = math.SmallestNonzeroFloat64\n\t}\n\n\treturn math.Abs(x-y) <= (limit * math.Min(math.Abs(x), math.Abs(y)))\n}", "func TestEqual(t *testing.T) {\n\ttables := []struct {\n\t\tx []string\n\t\ty []string\n\t\texpected bool\n\t}{\n\t\t{[]string{}, []string{}, true},\n\t\t{[]string{}, []string{\"\"}, false},\n\t\t{[]string{\"\"}, []string{\"\"}, true},\n\t\t{[]string{\"\"}, []string{\"a\"}, false},\n\t\t{[]string{\"a\"}, []string{\"a\", \"a\"}, false},\n\t\t{[]string{\"b\"}, []string{\"a\"}, false},\n\t\t{[]string{\"\", \"\", \"\"}, []string{\"\", \"\", \"\"}, true},\n\t\t{[]string{\"a\", \"b\", \"c\"}, []string{\"a\", \"b\", \"e\"}, false},\n\t}\n\n\tfor _, table := range tables {\n\t\tresult := Equal(table.x, table.y)\n\t\tif result != table.expected {\n\t\t\tt.Errorf(\"Match failed for (%s, %s). Expected %t, got %t\",\n\t\t\t\ttable.x, table.y, table.expected, result)\n\t\t}\n\t}\n}", "func validateEqualArgs(expected, actual interface{}) error {\n\tif expected == nil && actual == nil {\n\t\treturn nil\n\t}\n\n\t// NOTE: in iam policy expression, we guarantee the value will not be Function!\n\t// if isFunction(expected) || isFunction(actual) {\n\t// \treturn errors.New(\"cannot take func type as argument\")\n\t// }\n\treturn nil\n}", "func Equal(value string) Matcher {\n\treturn func(cmp string) bool {\n\t\treturn cmp == value\n\t}\n}", "func expectSame(t *T, expected, actual interface{}) bool {\n\tif !reflect.DeepEqual(expected, actual) {\n\t\texpectf(t, expected, actual)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func TestCompareFloats(t *testing.T) {\n\ttests := []testCase{\n\t\t{\n\t\t\tname: \"floats are equal (1)\",\n\t\t\tv1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(18)},\n\t\t},\n\t\t{\n\t\t\tname: \"floats are equal (2)\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(3549.9),\n\t\t\tout: &T, op: sqlparser.EqualOp,\n\t\t},\n\t\t{\n\t\t\tname: \"floats are not equal (1)\",\n\t\t\tv1: NewLiteralFloat(7858.016), v2: NewLiteralFloat(8943298.56),\n\t\t\tout: &F, op: sqlparser.EqualOp,\n\t\t},\n\t\t{\n\t\t\tname: \"floats are not equal (2)\",\n\t\t\tv1: NewLiteralFloat(351049.65), v2: NewLiteralFloat(62508.99),\n\t\t\tout: &T, op: sqlparser.NotEqualOp,\n\t\t},\n\t\t{\n\t\t\tname: \"floats are not equal (3)\",\n\t\t\tv1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID),\n\t\t\tout: &F, op: sqlparser.EqualOp,\n\t\t\trow: []sqltypes.Value{sqltypes.NewFloat64(16516.84), sqltypes.NewFloat64(219541.01)},\n\t\t},\n\t\t{\n\t\t\tname: \"float is less than float\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(8072),\n\t\t\tout: &T, op: sqlparser.LessThanOp,\n\t\t},\n\t\t{\n\t\t\tname: \"float is not less than float\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(21.564),\n\t\t\tout: &F, op: sqlparser.LessThanOp,\n\t\t},\n\t\t{\n\t\t\tname: \"float is less-equal to float (1)\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(9863),\n\t\t\tout: &T, op: sqlparser.LessEqualOp,\n\t\t},\n\t\t{\n\t\t\tname: \"float is less-equal to float (2)\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(3549.9),\n\t\t\tout: &T, op: sqlparser.LessEqualOp,\n\t\t},\n\t\t{\n\t\t\tname: \"float is greater than float\",\n\t\t\tv1: NewLiteralFloat(9808.549), v2: NewLiteralFloat(9808.540),\n\t\t\tout: &T, op: sqlparser.GreaterThanOp,\n\t\t},\n\t\t{\n\t\t\tname: \"float is not greater than float\",\n\t\t\tv1: NewLiteralFloat(549.02), v2: NewLiteralFloat(21579.64),\n\t\t\tout: &F, op: sqlparser.GreaterThanOp,\n\t\t},\n\t\t{\n\t\t\tname: \"float is greater-equal to float (1)\",\n\t\t\tv1: NewLiteralFloat(987.30), v2: NewLiteralFloat(15.5),\n\t\t\tout: &T, op: sqlparser.GreaterEqualOp,\n\t\t},\n\t\t{\n\t\t\tname: \"float is greater-equal to float (2)\",\n\t\t\tv1: NewLiteralFloat(3549.9), v2: NewLiteralFloat(3549.9),\n\t\t\tout: &T, op: sqlparser.GreaterEqualOp,\n\t\t},\n\t}\n\n\tfor i, tcase := range tests {\n\t\tt.Run(fmt.Sprintf(\"%d %s\", i, tcase.name), func(t *testing.T) {\n\t\t\ttcase.run(t)\n\t\t})\n\t}\n}", "func (eps Accuracy) Equal(a, b float64) bool {\n\treturn math.Abs(a-b) < eps()\n}", "func Equal(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) bool {\n\tif err := validateEqualArgs(expected, actual); err != nil {\n\t\treturn failTest(t, 1, fmt.Sprintf(\"Equal: invalid operation `%#v` == `%#v` (%v)\", expected, actual, err), msgAndArgs...)\n\t}\n\n\tif !IsObjectEqual(expected, actual) {\n\t\treturn failTest(t, 1, fmt.Sprintf(\"Equal: expected `%#v`, actual `%#v`\", expected, actual), msgAndArgs...)\n\t}\n\n\treturn true\n}", "func Equal(values ...interface{}) (failureMessage string) {\n\tif values[0] != values[1] {\n\t\tfailureMessage = fmt.Sprintf(\"Expected `%v` to equal `%v`\", values[0], values[1])\n\t}\n\treturn\n}", "func compareEquality(expected, actual interface{}) bool {\n\n\tif expected == nil || actual == nil {\n\t\treturn expected == actual\n\t}\n\n\tif reflect.DeepEqual(expected, actual) {\n\t\treturn true\n\t}\n\n\texpectedValue := reflect.ValueOf(expected)\n\tactualValue := reflect.ValueOf(actual)\n\n\tif expectedValue == actualValue {\n\t\treturn true\n\t}\n\n\t// Attempt comparison after type conversion\n\tif actualValue.Type().ConvertibleTo(expectedValue.Type()) && expectedValue == actualValue.Convert(expectedValue.Type()) {\n\t\treturn true\n\t}\n\n\t// Last ditch effort\n\tif fmt.Sprintf(\"%#v\", expected) == fmt.Sprintf(\"%#v\", actual) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func equal(key, value string, params Parameter) bool {\n switch value {\n case \"nil\", \"empty\":\n return equalNilAndEmpty(key, value, params)\n default:\n return equalValue(key, value, params)\n }\n}", "func (value *Value) Equal(other *Value) bool {\n\treturn reflect.DeepEqual(value, other)\n}", "func Equal(t TestingT, expected, actual interface{}, extras ...interface{}) bool {\n\tif !DeepEqual(expected, actual) {\n\t\treturn Errorf(t, \"Expect to be equal\", []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"Diff\",\n\t\t\t\tcontent: diff(expected, actual),\n\t\t\t},\n\t\t})\n\t}\n\n\treturn true\n}", "func Equal(left Value, right Value) bool {\n\t// TODO: Stop-gap for now, this will need to be much more sophisticated.\n\treturn CoerceString(left) == CoerceString(right)\n}", "func RequireFilterEqual(t *testing.T, v1, v2 interface{}, ignoreTypes []interface{}) {\n\tt.Helper()\n\n\tdiff := cmp.Diff(v1, v2, cmpopts.IgnoreTypes(ignoreTypes...),\n\t\tcmp.Exporter(func(reflect.Type) bool { return true }))\n\tif diff != \"\" {\n\t\tt.Errorf(\"Not equal:\\n%s\", diff)\n\t\tt.FailNow()\n\t}\n}", "func TestCheckBinaryExprFloatNeqInt(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `2.0 != 4`, env, (2.0 != 4), ConstBool)\n}", "func TestCheckBinaryExprFloatNeqFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `2.0 != 2.0`, env, (2.0 != 2.0), ConstBool)\n}", "func (c *callback) Equal(b *callback) bool {\n\treturn c.TimestampEpoch == b.TimestampEpoch &&\n\t\tc.DeviceID == b.DeviceID &&\n\t\tc.IsDuplicate == b.IsDuplicate &&\n\t\tc.SNR == b.SNR &&\n\t\tc.RSSI == b.RSSI &&\n\t\tc.AverageSNR == b.AverageSNR &&\n\t\tc.StationID == b.StationID &&\n\t\tc.Data == b.Data &&\n\t\tc.Latitude == b.Latitude &&\n\t\tc.Longitude == b.Longitude &&\n\t\tc.SequenceNumber == b.SequenceNumber\n}", "func (f Fixed8) Equal(g Fixed8) bool {\n\treturn f == g\n}", "func Equal(t *testing.T, expected, actual interface{}, message ...string) {\n\tif !compareEquality(expected, actual) {\n\t\tt.Errorf(\"%v\\nExpected \\n\\t[%#v]\\nto be\\n\\t[%#v]\\n%v \", message, actual, expected, callerInfo(2 +callStackAdjust))\n\t}\n}", "func EqualFloat64(actual, expected, delta float64, typ int) (status bool) {\n\tswitch {\n\tcase math.IsNaN(actual) || math.IsNaN(expected):\n\t\tstatus = math.IsNaN(actual) == math.IsNaN(expected)\n\t\tbreak\n\tcase math.IsInf(actual, 0) || math.IsInf(expected, 0):\n\t\tstatus = math.IsInf(actual, 0) == math.IsInf(expected, 0)\n\t\tbreak\n\tcase expected == 0:\n\t\tstatus = math.Abs(actual-expected) < math.Abs(delta)\n\t\tbreak\n\tcase expected != 0:\n\t\tif typ == 0 {\n\t\t\tstatus = math.Abs(actual-expected) < math.Abs(delta)\n\t\t} else {\n\t\t\tstatus = math.Abs(actual-expected)/math.Abs(expected) < math.Abs(delta)\n\t\t}\n\t}\n\treturn\n}", "func equal(slice1, slice2 Coef) bool {\n\tfor index := range slice1 {\n\t\tif math.Abs(slice1[index]-slice2[index]) > epsilon {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func FuzzyEqual(a, b float64) bool {\n\t// handle case when a and b are near zero and on opposite sides of it\n\tif a*b < 0 {\n\t\ta += 1\n\t\tb += 1\n\t}\n\n\tif a < 0 {\n\t\ta = -a\n\t}\n\tif b < 0 {\n\t\tb = -b\n\t}\n\n\t// if values are small compare around one instead of zero\n\tif a < 1 || b < 1 {\n\t\ta += 1\n\t\tb += 1\n\t}\n\n\tif a < b {\n\t\treturn b-a <= 1e-12*a\n\t}\n\treturn a-b <= 1e-12*b\n}", "func Eqtype(t1, t2 *Type) bool" ]
[ "0.6721976", "0.6518533", "0.6493485", "0.64395815", "0.63705933", "0.6230326", "0.6185897", "0.6122447", "0.6104045", "0.6037134", "0.6017878", "0.58805865", "0.5862717", "0.5856781", "0.5835001", "0.57322305", "0.5723468", "0.56437725", "0.5623578", "0.56197405", "0.56125724", "0.55883926", "0.55763066", "0.5560826", "0.5514973", "0.5511845", "0.5506597", "0.54508543", "0.5447272", "0.54467565", "0.5443066", "0.54260826", "0.5385511", "0.5330066", "0.5280173", "0.5258787", "0.52465445", "0.5245314", "0.5213764", "0.5213764", "0.5213688", "0.52109283", "0.51976967", "0.5191381", "0.5191381", "0.5191381", "0.51789004", "0.51717705", "0.51554984", "0.5140569", "0.5140097", "0.5134268", "0.51159763", "0.5105096", "0.51002735", "0.50914925", "0.50891554", "0.50844735", "0.5082263", "0.50694644", "0.5059611", "0.50584245", "0.5057732", "0.5051137", "0.50316924", "0.5024214", "0.5015282", "0.49981445", "0.49978262", "0.4995382", "0.4995382", "0.4992526", "0.49860853", "0.49673736", "0.49590495", "0.495891", "0.495855", "0.495647", "0.49556005", "0.49552327", "0.49485227", "0.49435422", "0.49338332", "0.4930774", "0.49287847", "0.49280763", "0.49233812", "0.49169326", "0.49080756", "0.4907714", "0.4905981", "0.48957303", "0.4888994", "0.48881474", "0.48851836", "0.4876847", "0.4872892", "0.487105", "0.48648265", "0.48619577" ]
0.7783054
0
NewMockmonitorInterface creates a new mock instance
func NewMockmonitorInterface(ctrl *gomock.Controller) *MockmonitorInterface { mock := &MockmonitorInterface{ctrl: ctrl} mock.recorder = &MockmonitorInterfaceMockRecorder{mock} return mock }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {\n\tmon := &monitor{\n\t\tui: &cli.PrefixedUi{\n\t\t\tInfoPrefix: \"==> \",\n\t\t\tOutputPrefix: \" \",\n\t\t\tErrorPrefix: \"==> \",\n\t\t\tUi: ui,\n\t\t},\n\t\tclient: client,\n\t\tstate: newEvalState(),\n\t\tlength: length,\n\t}\n\treturn mon\n}", "func NewMock() *Mock {\n\tc := &Mock{\n\t\tFakeIncoming: func() chan []byte {\n\t\t\treturn make(chan []byte, 2)\n\t\t},\n\t\tFakeName: func() string {\n\t\t\treturn \"TestClient\"\n\t\t},\n\t\tFakeGame: func() string {\n\t\t\treturn \"test\"\n\t\t},\n\t\tFakeClose: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeStopTimer: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeRoom: func() interfaces.Room {\n\t\t\treturn nil\n\t\t},\n\t\tFakeSetRoom: func(interfaces.Room) {\n\n\t\t},\n\t}\n\n\tc.FakeWritePump = func() {\n\t\tfor range c.Incoming() {\n\t\t\t// Do nothing\n\t\t}\n\t}\n\n\tc.FakeSetName = func(string) interfaces.Client {\n\t\treturn c\n\t}\n\treturn c\n}", "func NewMock() *MockMetrics {\n\treturn &MockMetrics{}\n}", "func NewMock(now time.Time) *Mock {\n\treturn &Mock{\n\t\tnow: now,\n\t\tmockTimers: &timerHeap{},\n\t}\n}", "func NewMock() *Mock {\n\treturn &Mock{\n\t\tData: MockData{\n\t\t\tUptime: true,\n\t\t\tFile: true,\n\t\t\tTCPResponse: true,\n\t\t\tHTTPStatus: true,\n\t\t},\n\t}\n}", "func NewMock() *Mock {\n\treturn &Mock{VolumesMock: &VolumesServiceMock{}}\n}", "func NewMockInterface(t mockConstructorTestingTNewMockInterface) *MockInterface {\n\tmock := &MockInterface{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewMock(path string, nodes uint, replicas uint, vbuckets uint, specs ...BucketSpec) (m *Mock, err error) {\n\tvar lsn *net.TCPListener\n\tchAccept := make(chan bool)\n\tm = &Mock{}\n\n\tdefer func() {\n\t\tclose(chAccept)\n\t\tif lsn != nil {\n\t\t\tif err := lsn.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Failed to close listener: %v\", err)\n\t\t\t}\n\t\t}\n\t\texc := recover()\n\n\t\tif exc == nil {\n\t\t\t// No errors, everything is OK\n\t\t\treturn\n\t\t}\n\n\t\t// Close mock on error, destroying resources\n\t\tm.Close()\n\t\tif mExc, ok := exc.(mockError); !ok {\n\t\t\tpanic(mExc)\n\t\t} else {\n\t\t\tm = nil\n\t\t\terr = mExc\n\t\t}\n\t}()\n\n\tif lsn, err = net.ListenTCP(\"tcp\", &net.TCPAddr{Port: 0}); err != nil {\n\t\tthrowMockError(\"Couldn't set up listening socket\", err)\n\t}\n\t_, ctlPort, err := net.SplitHostPort(lsn.Addr().String())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to split host and port: %v\", err)\n\t}\n\tlog.Printf(\"Listening for control connection at %s\\n\", ctlPort)\n\n\tgo func() {\n\t\tvar err error\n\n\t\tdefer func() {\n\t\t\tchAccept <- false\n\t\t}()\n\t\tif m.conn, err = lsn.Accept(); err != nil {\n\t\t\tthrowMockError(\"Couldn't accept incoming control connection from mock\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tif len(specs) == 0 {\n\t\tspecs = []BucketSpec{{Name: \"default\", Type: BCouchbase}}\n\t}\n\n\toptions := []string{\n\t\t\"-jar\", path, \"--harakiri-monitor\", \"localhost:\" + ctlPort, \"--port\", \"0\",\n\t\t\"--replicas\", strconv.Itoa(int(replicas)),\n\t\t\"--vbuckets\", strconv.Itoa(int(vbuckets)),\n\t\t\"--nodes\", strconv.Itoa(int(nodes)),\n\t\t\"--buckets\", m.buildSpecStrings(specs),\n\t}\n\n\tlog.Printf(\"Invoking java %s\", strings.Join(options, \" \"))\n\tm.cmd = exec.Command(\"java\", options...)\n\n\tm.cmd.Stdout = os.Stdout\n\tm.cmd.Stderr = os.Stderr\n\n\tif err = m.cmd.Start(); err != nil {\n\t\tm.cmd = nil\n\t\tthrowMockError(\"Couldn't start command\", err)\n\t}\n\n\tselect {\n\tcase <-chAccept:\n\t\tbreak\n\n\tcase <-time.After(mockInitTimeout):\n\t\tthrowMockError(\"Timed out waiting for initialization\", errors.New(\"timeout\"))\n\t}\n\n\tm.rw = bufio.NewReadWriter(bufio.NewReader(m.conn), bufio.NewWriter(m.conn))\n\n\t// Read the port buffer, which is delimited by a NUL byte\n\tif portBytes, err := m.rw.ReadBytes(0); err != nil {\n\t\tthrowMockError(\"Couldn't get port information\", err)\n\t} else {\n\t\tportBytes = portBytes[:len(portBytes)-1]\n\t\tif entryPort, err := strconv.Atoi(string(portBytes)); err != nil {\n\t\t\tthrowMockError(\"Incorrectly formatted port from mock\", err)\n\t\t} else {\n\t\t\tm.EntryPort = uint16(entryPort)\n\t\t}\n\t}\n\n\tlog.Printf(\"Mock HTTP port at %d\\n\", m.EntryPort)\n\treturn\n}", "func NewMockMonitorListener(queueSize int) *MockMonitorListener {\n\treturn &MockMonitorListener{\n\t\tqueue: make(chan *payload.Payload, queueSize),\n\t}\n}", "func NewMock(t *testing.T) *MockT { return &MockT{t: t} }", "func NewMock() *Mock {\n\treturn &Mock{now: time.Unix(0, 0)}\n}", "func NewMockDefault() *Mock {\n\tmgr := new(Mock)\n\tvar pluginsMap = make(map[string]managerContracts.Plugin)\n\tvar cwPlugin = managerContracts.Plugin{\n\t\tHandler: cloudwatch.NewMockDefault(),\n\t}\n\tpluginsMap[CloudWatchId] = cwPlugin\n\n\tmgr.On(\"GetRegisteredPlugins\").Return(pluginsMap)\n\tmgr.On(\"Name\").Return(CloudWatchId)\n\tmgr.On(\"Execute\", mock.AnythingOfType(\"context.T\")).Return(nil)\n\tmgr.On(\"RequestStop\", mock.AnythingOfType(\"string\")).Return(nil)\n\tmgr.On(\"StopPlugin\", mock.AnythingOfType(\"string\"), mock.Anything).Return(nil)\n\tmgr.On(\"StartPlugin\", mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"task.CancelFlag\")).Return(nil)\n\treturn mgr\n}", "func NewMockMessageMonitor(ctrl *gomock.Controller) *MockMessageMonitor {\n\tmock := &MockMessageMonitor{ctrl: ctrl}\n\tmock.recorder = &MockMessageMonitorMockRecorder{mock}\n\treturn mock\n}", "func New() (*mock, error) {\n\treturn &mock{\n\t\tConfigService: ConfigService{},\n\t\tContainerService: ContainerService{},\n\t\tDistributionService: DistributionService{},\n\t\tImageService: ImageService{},\n\t\tNetworkService: NetworkService{},\n\t\tNodeService: NodeService{},\n\t\tPluginService: PluginService{},\n\t\tSecretService: SecretService{},\n\t\tServiceService: ServiceService{},\n\t\tSystemService: SystemService{},\n\t\tSwarmService: SwarmService{},\n\t\tVolumeService: VolumeService{},\n\t\tVersion: Version,\n\t}, nil\n}", "func NewMonitor(\n\tcctx *Context,\n\tiface string,\n\tdialer *system.Dialer,\n\twatchC <-chan netstate.Change,\n\tverbose bool,\n) *Monitor {\n\treturn &Monitor{\n\t\tcctx: cctx,\n\t\tiface: iface,\n\t\tverbose: verbose,\n\t\tdialer: dialer,\n\t\twatchC: watchC,\n\t\treadyC: make(chan struct{}),\n\n\t\t// By default use real time.\n\t\tnow: time.Now,\n\t}\n}", "func NewMonitor(p *Probe) (*Monitor, error) {\n\tvar err error\n\tm := &Monitor{\n\t\tprobe: p,\n\t}\n\n\t// instantiate a new load controller\n\tm.loadController, err = NewLoadController(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// instantiate a new event statistics monitor\n\tm.perfBufferMonitor, err = NewPerfBufferMonitor(p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't create the events statistics monitor: %w\", err)\n\t}\n\n\tif p.config.ActivityDumpEnabled {\n\t\tm.activityDumpManager, err = NewActivityDumpManager(p)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"couldn't create the activity dump manager: %w\", err)\n\t\t}\n\t}\n\n\tif p.config.RuntimeMonitor {\n\t\tm.runtimeMonitor = NewRuntimeMonitor(p.statsdClient)\n\t}\n\n\tm.discarderMonitor, err = NewDiscarderMonitor(p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't create the discarder monitor: %w\", err)\n\t}\n\n\treturn m, nil\n}", "func NewMockInterfaceProvider(managedInterfacesRegexp string, autoRefresh bool) (nt.InterfaceProvider,\n\tchan time.Time, error) {\n\tch := make(chan time.Time)\n\tip, err := nt.NewChanInterfaceProvider(ch, &MockInterfaceLister{}, managedInterfacesRegexp,\n\t\tautoRefresh)\n\treturn ip, ch, err\n}", "func newMockSubscriber() mockSubscriber {\n\treturn mockSubscriber{}\n}", "func newMockMatrix() io.ReadWriteCloser {\n\treturn &mockMatrix{\n\t\tstate: stateCmd,\n\t\tcount: 0,\n\t}\n}", "func New() *Mock {\n\treturn &Mock{\n\t\tm: mockMap{},\n\t\toldTransport: http.DefaultTransport,\n\t}\n}", "func New() *MockLibvirt {\n\tserv, conn := net.Pipe()\n\n\tm := &MockLibvirt{\n\t\tConn: conn,\n\t\tTest: serv,\n\t}\n\n\tgo m.handle(serv)\n\n\treturn m\n}", "func NewMock(response string) *Operator {\n\treturn &Operator{cli: client.NewMock(response)}\n}", "func New(cfg *Config) (*Monitor, error) {\n\tc, err := client.NewClient(cfg.Host, \"1.24\", nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tm := &Monitor{\n\t\tevents: cfg.Events,\n\t\tclient: c,\n\t\tlog: logrus.WithField(\"context\", \"docker\"),\n\t\tstop: cancel,\n\t\tstopCh: make(chan bool),\n\t}\n\tgo m.run(ctx)\n\treturn m, nil\n}", "func NewMock() Cache {\n\treturn &mock{}\n}", "func NewGatewayMock(t minimock.Tester) *GatewayMock {\n\tm := &GatewayMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.AutherMock = mGatewayMockAuther{mock: m}\n\n\tm.BeforeRunMock = mGatewayMockBeforeRun{mock: m}\n\tm.BeforeRunMock.callArgs = []*GatewayMockBeforeRunParams{}\n\n\tm.BootstrapperMock = mGatewayMockBootstrapper{mock: m}\n\n\tm.EphemeralModeMock = mGatewayMockEphemeralMode{mock: m}\n\tm.EphemeralModeMock.callArgs = []*GatewayMockEphemeralModeParams{}\n\n\tm.GetStateMock = mGatewayMockGetState{mock: m}\n\n\tm.NewGatewayMock = mGatewayMockNewGateway{mock: m}\n\tm.NewGatewayMock.callArgs = []*GatewayMockNewGatewayParams{}\n\n\tm.OnConsensusFinishedMock = mGatewayMockOnConsensusFinished{mock: m}\n\tm.OnConsensusFinishedMock.callArgs = []*GatewayMockOnConsensusFinishedParams{}\n\n\tm.OnPulseFromConsensusMock = mGatewayMockOnPulseFromConsensus{mock: m}\n\tm.OnPulseFromConsensusMock.callArgs = []*GatewayMockOnPulseFromConsensusParams{}\n\n\tm.OnPulseFromPulsarMock = mGatewayMockOnPulseFromPulsar{mock: m}\n\tm.OnPulseFromPulsarMock.callArgs = []*GatewayMockOnPulseFromPulsarParams{}\n\n\tm.RunMock = mGatewayMockRun{mock: m}\n\tm.RunMock.callArgs = []*GatewayMockRunParams{}\n\n\tm.UpdateStateMock = mGatewayMockUpdateState{mock: m}\n\tm.UpdateStateMock.callArgs = []*GatewayMockUpdateStateParams{}\n\n\treturn m\n}", "func NewMonitor(api model.API) {\n\tjob := interval.NewJob(requestGET(api), int(api.IntervalTime))\n\tapiJobMap[api.ID] = job\n}", "func NewMockprometheusInterface(ctrl *gomock.Controller) *MockprometheusInterface {\n\tmock := &MockprometheusInterface{ctrl: ctrl}\n\tmock.recorder = &MockprometheusInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMonitor(slackURL string) monitor {\n\treturn monitor{make(map[string]checkResult), slackURL}\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockInterface(ctrl *gomock.Controller) *MockInterface {\n\tmock := &MockInterface{ctrl: ctrl}\n\tmock.recorder = &MockInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockObject(uid, name, ns string, res api.Resource) api.Object {\n\treturn NewObject(uuid.NewFromString(uid), name, ns, res)\n}", "func (m *MockWatcherConstructor) New(arg0 Machine, arg1 string, arg2 []string, arg3, arg4, arg5 string, arg6 time.Duration, arg7 map[string]interface{}) (interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"New\", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\n\tret0, _ := ret[0].(interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func CreateNewSimpleMonitor(target string) *SimpleMonitor {\n\treturn &SimpleMonitor{\n\t\t//Resource: &Resource{ID: \"\"},\n\t\tName: target,\n\t\tProvider: &SimpleMonitorProvider{\n\t\t\tClass: \"simplemon\",\n\t\t},\n\t\tStatus: &SimpleMonitorStatus{\n\t\t\tTarget: target,\n\t\t},\n\n\t\tSettings: &SimpleMonitorSettings{\n\t\t\tSimpleMonitor: &SimpleMonitorSetting{\n\t\t\t\tHealthCheck: &SimpleMonitorHealthCheck{},\n\t\t\t\tEnabled: \"True\",\n\t\t\t\tNotifyEmail: &SimpleMonitorNotify{\n\t\t\t\t\tEnabled: \"False\",\n\t\t\t\t},\n\t\t\t\tNotifySlack: &SimpleMonitorNotify{\n\t\t\t\t\tEnabled: \"False\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tTagsType: &TagsType{},\n\t}\n\n}", "func NewMock(path string, logger log.FieldLogger) Listener {\n\treturn &mock{\n\t\tpath: path,\n\t\tlogger: logger,\n\t}\n}", "func New(reap, dryRun bool, namespace, hostIP string) *Monitor {\n\tm := &Monitor{\n\t\tc: make(chan error),\n\t\tdryRun: dryRun,\n\t\thostIP: hostIP,\n\t\tnamespace: namespace,\n\t\treap: reap,\n\t}\n\treturn m\n}", "func NewMonitor(c httpcache.Cache) *Monitor {\n\treturn &Monitor{c: c}\n}", "func NewMockSupport() *MockSupport {\n\treturn &MockSupport{\n\t\tPublisher: NewBlockPublisher(),\n\t}\n}", "func NewMock() Component {\n\treturn &MockComponent{\n\t\tlifecycleComponent: lifecycleComponent{\n\t\t\tterminateChannel: make(chan struct{}),\n\t\t},\n\t}\n}", "func New(opts Options) *Monitor {\n\tif opts.PID == 0 {\n\t\topts.PID = int32(os.Getpid())\n\t}\n\n\tif opts.RefreshInterval <= 0 {\n\t\topts.RefreshInterval = 2 * opts.RefreshInterval\n\t}\n\n\tif opts.ViewRefreshInterval <= 0 {\n\t\topts.ViewRefreshInterval = opts.RefreshInterval\n\t}\n\n\tviewRefreshIntervalBytes := []byte(fmt.Sprintf(\"%d\", opts.ViewRefreshInterval.Milliseconds()))\n\tviewBody := bytes.Replace(defaultViewBody, viewRefreshIntervalTmplVar, viewRefreshIntervalBytes, 1)\n\tviewAnimationIntervalBytes := []byte(fmt.Sprintf(\"%d\", opts.ViewAnimationInterval.Milliseconds()))\n\tviewBody = bytes.Replace(viewBody, viewAnimationIntervalTmplVar, viewAnimationIntervalBytes, 2)\n\tviewTitleBytes := []byte(opts.ViewTitle)\n\tviewBody = bytes.Replace(viewBody, viewTitleTmplVar, viewTitleBytes, 2)\n\tproc, err := process.NewProcess(opts.PID)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tsh := startNewStatsHolder(proc, opts.RefreshInterval)\n\tm := &Monitor{\n\t\topts: opts,\n\t\tHolder: sh,\n\t\tviewBody: viewBody,\n\t}\n\n\treturn m\n}", "func (m *MockmonitorInterface) EXPECT() *MockmonitorInterfaceMockRecorder {\n\treturn m.recorder\n}", "func NewMonitor(port int) *Monitor {\n\tmon := &Monitor{\n\t\tconnectionCountChannels: map[int]chan int{},\n\t\tconnectionCounts: map[int]int{},\n\t\taddChannel: make(chan int, 10),\n\t\tcountMutex: sync.Mutex{},\n\t\tchannelMutex: sync.Mutex{},\n\t}\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tmon.countMutex.Lock()\n\t\tbytes, err := json.MarshalIndent(mon.connectionCounts, \"\", \" \")\n\t\tmon.countMutex.Unlock()\n\t\tif err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\tw.Write([]byte(fmt.Sprintf(\"ERROR OCCURED: %v\", err)))\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(bytes)\n\t})\n\tgo func() {\n\t\tif err := http.ListenAndServe(fmt.Sprintf(\":%v\", port), nil); err != nil {\n\t\t\tfmt.Printf(\"Failed to Start Monitor Service: %v\", err)\n\t\t\tos.Exit(5)\n\t\t}\n\t}()\n\tgo mon.addSubRoutine()\n\treturn mon\n}", "func NewMockWatcher(ctrl *gomock.Controller) *MockWatcher {\n\tmock := &MockWatcher{ctrl: ctrl}\n\tmock.recorder = &MockWatcherMockRecorder{mock}\n\treturn mock\n}", "func NewMockWatcher(ctrl *gomock.Controller) *MockWatcher {\n\tmock := &MockWatcher{ctrl: ctrl}\n\tmock.recorder = &MockWatcherMockRecorder{mock}\n\treturn mock\n}", "func NewMockWatcher(ctrl *gomock.Controller) *MockWatcher {\n\tmock := &MockWatcher{ctrl: ctrl}\n\tmock.recorder = &MockWatcherMockRecorder{mock}\n\treturn mock\n}", "func NewMonitor() *Monitor {\n\treturn &Monitor{\n\t\tValidators: make([]string, 0),\n\t\treceiveChannel: make(chan *connection.Packet, maxChannelSize),\n\t\taccAlgorithm: accountability.NewAccountability(),\n\t}\n}", "func NewMock(serverHost string) (*MockClient, error) {\n\treturn &MockClient{}, nil\n}", "func NewMonitor(c context.Context) *Monitor {\n\treturn &Monitor{StatPB: make(chan pb.Stat, 1),\n\t\tticker: time.NewTicker(time.Duration(config.CfgWorker.LoadReport.LoadReportInterval) * time.Second),\n\t\tadapterIdx: -1,\n\t\tCtx: c,\n\t\tStat: stat.NewStat()}\n}", "func NewInspector(t *testing.T) *InspectorMock {\n\treturn &InspectorMock{\n\t\tNew(t, \"inspector\"),\n\t}\n}", "func NewMonitoringcClient(t testing.TB) *MonitoringcClient {\n\tmock := &MonitoringcClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newMock(deps mockDependencies, t testing.TB) (Component, error) {\n\tbackupConfig := config.NewConfig(\"\", \"\", strings.NewReplacer())\n\tbackupConfig.CopyConfig(config.Datadog)\n\n\tconfig.Datadog.CopyConfig(config.NewConfig(\"mock\", \"XXXX\", strings.NewReplacer()))\n\n\tconfig.SetFeatures(t, deps.Params.Features...)\n\n\t// call InitConfig to set defaults.\n\tconfig.InitConfig(config.Datadog)\n\tc := &cfg{\n\t\tConfig: config.Datadog,\n\t}\n\n\tif !deps.Params.SetupConfig {\n\n\t\tif deps.Params.ConfFilePath != \"\" {\n\t\t\tconfig.Datadog.SetConfigType(\"yaml\")\n\t\t\terr := config.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath))\n\t\t\tif err != nil {\n\t\t\t\t// The YAML was invalid, fail initialization of the mock config.\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\twarnings, _ := setupConfig(deps)\n\t\tc.warnings = warnings\n\t}\n\n\t// Overrides are explicit and will take precedence over any other\n\t// setting\n\tfor k, v := range deps.Params.Overrides {\n\t\tconfig.Datadog.Set(k, v)\n\t}\n\n\t// swap the existing config back at the end of the test.\n\tt.Cleanup(func() { config.Datadog.CopyConfig(backupConfig) })\n\n\treturn c, nil\n}", "func InitMock() error {\n\tmwc := newMockClient(networksMock())\n\tc, err := client.NewClient(\"http://127.0.0.1:2375\", \"v1.21\", mwc, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdockerClient = c\n\n\treturn nil\n}", "func NewOutboundMock(t minimock.Tester) *OutboundMock {\n\tm := &OutboundMock{t: t}\n\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.AsByteStringMock = mOutboundMockAsByteString{mock: m}\n\tm.CanAcceptMock = mOutboundMockCanAccept{mock: m}\n\tm.GetEndpointTypeMock = mOutboundMockGetEndpointType{mock: m}\n\tm.GetIPAddressMock = mOutboundMockGetIPAddress{mock: m}\n\tm.GetNameAddressMock = mOutboundMockGetNameAddress{mock: m}\n\tm.GetRelayIDMock = mOutboundMockGetRelayID{mock: m}\n\n\treturn m\n}", "func (m *GatewayMock) MinimockNewGatewayInspect() {\n\tfor _, e := range m.NewGatewayMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.NewGateway with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.NewGatewayMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterNewGatewayCounter) < 1 {\n\t\tif m.NewGatewayMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to GatewayMock.NewGateway\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.NewGateway with params: %#v\", *m.NewGatewayMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcNewGateway != nil && mm_atomic.LoadUint64(&m.afterNewGatewayCounter) < 1 {\n\t\tm.t.Error(\"Expected call to GatewayMock.NewGateway\")\n\t}\n}", "func NewMockUi() *MockUi {\n\tm := new(MockUi)\n\tm.once.Do(m.init)\n\treturn m\n}", "func NewRingMock(t minimock.Tester) *RingMock {\n\tm := &RingMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.DecryptMock = mRingMockDecrypt{mock: m}\n\tm.DecryptMock.callArgs = []*RingMockDecryptParams{}\n\n\tm.EncryptMock = mRingMockEncrypt{mock: m}\n\tm.EncryptMock.callArgs = []*RingMockEncryptParams{}\n\n\treturn m\n}", "func newFakeReconciler(initObjects ...runtime.Object) *ReconcileMachineRemediation {\n\tfakeClient := fake.NewFakeClient(initObjects...)\n\tremediator := &FakeRemedatior{}\n\treturn &ReconcileMachineRemediation{\n\t\tclient: fakeClient,\n\t\tremediator: remediator,\n\t\tnamespace: consts.NamespaceOpenshiftMachineAPI,\n\t}\n}", "func NewStateSwitcherMock(t minimock.Tester) *StateSwitcherMock {\n\tm := &StateSwitcherMock{t: t}\n\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.GetStateMock = mStateSwitcherMockGetState{mock: m}\n\tm.SetPulsarMock = mStateSwitcherMockSetPulsar{mock: m}\n\tm.SwitchToStateMock = mStateSwitcherMockSwitchToState{mock: m}\n\tm.setStateMock = mStateSwitcherMocksetState{mock: m}\n\n\treturn m\n}", "func NewSystemMock(lastUpdateDate string) *SystemMock {\n\tconst projectName = \"mock-project - 1\"\n\treturn NewSystemMockWithProjectName(lastUpdateDate, projectName)\n}", "func NewMonitor(inputChan chan *TripsOfSec) *Monitor {\n\tm := &Monitor{\n\t\tInputChan: inputChan,\n\t\tprocessingStats: map[int64]*ProcessingStat{},\n\t\tResultChan: make(chan Stat, 1024),\n\t}\n\tgo m.consume()\n\treturn m\n}", "func NewMock() Client {\n\treturn &mockClient{}\n}", "func newMockNetworks() (*MockNetwork, *MockNetwork) {\n\tc := mockCon.NewConn()\n\treturn &MockNetwork{c.Client}, &MockNetwork{c.Server}\n}", "func (mmNewGateway *mGatewayMockNewGateway) Inspect(f func(ctx context.Context, n1 insolar.NetworkState)) *mGatewayMockNewGateway {\n\tif mmNewGateway.mock.inspectFuncNewGateway != nil {\n\t\tmmNewGateway.mock.t.Fatalf(\"Inspect function is already set for GatewayMock.NewGateway\")\n\t}\n\n\tmmNewGateway.mock.inspectFuncNewGateway = f\n\n\treturn mmNewGateway\n}", "func New(cfg *Config,\n\tapiManager apimanager.Provider,\n\tlogger logger.Logger, registerer prometheus.Registerer) (Provider, error) {\n\tservice := &MockServer{\n\t\tcfg: cfg,\n\t\tregisterer: registerer,\n\t\tapiManager: apiManager,\n\t\tLogger: logger.NewLogger(\"httpMockServer\"),\n\t}\n\treturn service, nil\n}", "func newRunner(output string, err error) *MockRunner {\n\tm := &MockRunner{}\n\tm.On(\"Run\", mock.Anything).Return([]byte(output), err)\n\treturn m\n}", "func NewMockRemote(ctrl *gomock.Controller) *MockRemote {\n\tmock := &MockRemote{ctrl: ctrl}\n\tmock.recorder = &MockRemoteMockRecorder{mock}\n\treturn mock\n}", "func NewMockMock(ctrl *gomock.Controller) *MockMock {\n\tmock := &MockMock{ctrl: ctrl}\n\tmock.recorder = &MockMockMockRecorder{mock}\n\treturn mock\n}", "func TestUnitCreateAzureMonitor(t *testing.T) {\n\tt.Parallel()\n\tcreateAzureMonitorResponse := newMockResponse(t, testCreateAzureMonitor, http.StatusCreated)\n\tlinkedAccountIDAsInt, _ := strconv.Atoi(linkedAccountID)\n\tcreateAzureMonitorInput := CloudIntegrationsInput{\n\t\tAzure: CloudAzureIntegrationsInput{\n\t\t\tAzureMonitor: []CloudAzureMonitorIntegrationInput{{\n\t\t\t\tLinkedAccountId: linkedAccountIDAsInt,\n\t\t\t\tEnabled: true,\n\t\t\t\tExcludeTags: []string{\"env:staging\", \"env:testing\"},\n\t\t\t\tIncludeTags: []string{\"env:production\"},\n\t\t\t\tMetricsPollingInterval: 1200,\n\t\t\t\tResourceTypes: []string{\"microsoft.datashare/accounts\"},\n\t\t\t\tResourceGroups: []string{\"resource_groups\"},\n\t\t\t}},\n\t\t},\n\t}\n\n\tNRAccountIDInt, _ := strconv.Atoi(nrAccountID)\n\tactual, err := createAzureMonitorResponse.CloudConfigureIntegration(NRAccountIDInt, createAzureMonitorInput)\n\n\tresponseJSON, _ := json.Marshal(actual.Integrations[0])\n\tresponseJSONAsString := string(responseJSON)\n\tobjActual, objExpected, objError := unmarshalAzureCloudIntegrationJSON(responseJSONAsString, testCreateAzureMonitorIntegration)\n\n\tassert.NoError(t, err)\n\tassert.NoError(t, objError)\n\tassert.NotNil(t, actual)\n\tassert.Equal(t, objActual, objExpected)\n\n}", "func NewMockReplacer(\n\tctx context.Context,\n\tregion string,\n\tprofile string) *Replacer {\n\n\tasgroup := newAsg(region, profile)\n\tdeploy := fsm.NewDeploy(\"start\")\n\tasgroup.Ec2Api = &mockEC2iface{}\n\tasgroup.AsgAPI = &mockASGiface{}\n\tasgroup.EcsAPI = &mockECSiface{}\n\treturn &Replacer{\n\t\tctx: ctx,\n\t\tasg: asgroup,\n\t\tdeploy: deploy,\n\t}\n}", "func NewMock() MockClient {\n\treturn NewMockWithLogger(&noopLogger{})\n}", "func CreateMock(method interface{}, url interface{}, headers interface{}, body interface{}) *go_mock_yourself_http.Mock {\n\tmockRequest := new(go_mock_yourself_http.Request)\n\n\tif method != nil {\n\t\tmockRequest.SetMethod(method)\n\t}\n\n\tif url != nil {\n\t\tmockRequest.SetUrl(url)\n\t}\n\n\tif body != nil {\n\t\tmockRequest.SetBody(body)\n\t}\n\n\tif headers != nil {\n\t\tmockRequest.SetHeaders(headers)\n\t}\n\n\tmockResponse := new(go_mock_yourself_http.Response)\n\tmockResponse.SetStatusCode(222)\n\tmockResponse.SetBody(\"i'm a cute loving mock, almost as cute as mumi, bichi and rasti\")\n\n\tmock, _ := go_mock_yourself_http.NewMock(\"my lovely testing mock\", mockRequest, mockResponse)\n\treturn mock\n}", "func NewMock(opts ...ClientOpt) (*client, error) {\n\t// create new Docker runtime client\n\tc, err := New(opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create Docker client from the mock client\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/mock/docker#New\n\t_docker, err := mock.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// set the Docker client in the runtime client\n\tc.Docker = _docker\n\n\treturn c, nil\n}", "func NewMonitor(config *MonitorConfig) *Monitor {\n\tmonitor := &Monitor{\n\t\tstatuses: config.Statuses,\n\t\tusername: config.Username,\n\t\tpassword: config.Password,\n\t\tupdate: make(chan StatusUpdate),\n\t}\n\n\tconfig.Router.Handle(\"/status\", monitor.basicAuth(monitor.getStatusHandler(), true)).Methods(http.MethodGet)\n\tconfig.Router.Handle(\"/login\", monitor.basicAuth(login(), false)).Methods(http.MethodGet)\n\t// This is not very REST friendly but will make updating less complicated\n\tconfig.Router.Handle(\"/update/{id}/{status}\", monitor.basicAuth(monitor.updateStatusHandler(), true)).Methods(http.MethodGet)\n\n\tmonitor.display = NewDisplay(config.Name)\n\tconfig.Router.Handle(\"/live\", monitor.basicAuth(monitor.display.LiveStatus(), true))\n\tmonitor.display.RouteStatic(config.Router)\n\n\tgo monitor.updateListener()\n\n\treturn monitor\n}", "func NewMockRecorder() *MockRecorder {\n\treturn &MockRecorder{}\n}", "func TestMonitorPrivate(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\t// Configure as private probe\n\thelper.cfg.Blackbox.ScraperName = \"\"\n\thelper.cfg.Blackbox.ScraperSendUUID = true\n\n\thelper.addMonitorOnAPI(t)\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tidAgentMain, _ := helper.state.BleemeoCredentials()\n\tif idAgentMain == \"\" {\n\t\tt.Fatal(\"idAgentMain == '', want something\")\n\t}\n\n\tinitialMetrics := []metricPayload{\n\t\t// Metric from other probe are NOT present in API, because glouton private probe aren't allow to view them.\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"9149d491-3a6e-4f46-abf9-c1ea9b9f7227\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t}\n\n\tpushedPoints := []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"probe_success\"},\n\t\t\tlabels.Label{Name: types.LabelScraperUUID, Value: idAgentMain},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: newMonitor.URL},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: newMonitor.AgentID},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: newMonitor.AgentID},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"probe_duration\"},\n\t\t\tlabels.Label{Name: types.LabelScraperUUID, Value: idAgentMain},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: newMonitor.URL},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: newMonitor.AgentID},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: newMonitor.AgentID},\n\t\t),\n\t}\n\n\thelper.SetAPIMetrics(initialMetrics...)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\twant := []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_duration\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_duration\",\n\t\t},\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\twant = []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tDeactivatedAt: helper.Now(),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_duration\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tDeactivatedAt: helper.Now(),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_duration\",\n\t\t},\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n}", "func NewMockLocker(ctrl *gomock.Controller) *MockLocker {\n\tmock := &MockLocker{ctrl: ctrl}\n\tmock.recorder = &MockLockerMockRecorder{mock}\n\treturn mock\n}", "func NewMockLocker(ctrl *gomock.Controller) *MockLocker {\n\tmock := &MockLocker{ctrl: ctrl}\n\tmock.recorder = &MockLockerMockRecorder{mock}\n\treturn mock\n}", "func RunMockCephMonitor(t *testing.T, name string, etcdPort string) {\n\tvar err error\n\tcli, stdout, stdoutPipe := dockercli.NewClient()\n\tcephImage := \"deis/store-monitor:\" + utils.BuildTag()\n\tipaddr := utils.HostAddress()\n\tfmt.Printf(\"--- Running deis/mock-ceph-monitor at %s\\n\", ipaddr)\n\tdone2 := make(chan bool, 1)\n\tgo func() {\n\t\tdone2 <- true\n\t\t_ = cli.CmdRm(\"-f\", name)\n\t\terr = dockercli.RunContainer(cli,\n\t\t\t\"--name\", name,\n\t\t\t\"--rm\",\n\t\t\t\"-e\", \"HOST=\"+ipaddr,\n\t\t\t\"-e\", \"ETCD_PORT=\"+etcdPort,\n\t\t\t\"-e\", \"NUM_STORES=1\",\n\t\t\t\"--net=host\",\n\t\t\tcephImage)\n\t}()\n\tdockercli.PrintToStdout(t, stdout, stdoutPipe, \"monmap e1: 1 mons at\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func SetMonitor(monitor Monitor) {\n\tmonitorImpl = monitor\n}", "func registerMock(name string, priority CollectorPriority) *MockCollector {\n\tc := &MockCollector{}\n\tfactory := func() Collector { return c }\n\tregisterCollector(name, factory, priority)\n\treturn c\n}", "func (ml *MockMonitorListener) Close() {\n}", "func New(monitor monitor.Monitor) lua.Module {\n\tl := &logger{monitor}\n\tm := &lua.NativeModule{\n\t\tName: \"log\",\n\t\tVersion: \"1.0\",\n\t}\n\n\tm.Register(\"debug\", l.Debug)\n\tm.Register(\"info\", l.Info)\n\tm.Register(\"warning\", l.Warning)\n\tm.Register(\"error\", l.Error)\n\treturn m\n}", "func NewMonitorsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *MonitorsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Endpoint) == 0 {\n\t\tcp.Endpoint = arm.AzurePublicCloud\n\t}\n\tclient := &MonitorsClient{\n\t\tsubscriptionID: subscriptionID,\n\t\thost: string(cp.Endpoint),\n\t\tpl: armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, &cp),\n\t}\n\treturn client\n}", "func New(client plugins.Client, cryptor txcrypto.Cryptor) (*AppchainMonitor, error) {\n\tmeta, err := client.GetOutMeta()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get out interchainCounter from broker contract :%w\", err)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &AppchainMonitor{\n\t\tclient: client,\n\t\tinterchainCounter: meta,\n\t\tcryptor: cryptor,\n\t\trecvCh: make(chan *pb.IBTP, 1024),\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t}, nil\n}", "func New(pythonPkg string) *MonitorCore {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn &MonitorCore{\n\t\tlogger: log.StandardLogger(),\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tpythonPkg: pythonPkg,\n\t\tconfigCond: sync.Cond{L: &sync.Mutex{}},\n\t}\n\n}", "func NewMonitor(conf *config.Config, neomq, ethmq gomq.Consumer) (*Monitor, error) {\n\n\ttokenswapdb, err := createEngine(conf, \"tokenswapdb\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create tokenswap db engine error %s\", err)\n\t}\n\n\tethdb, err := createEngine(conf, \"ethdb\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create eth db engine error %s\", err)\n\t}\n\n\tneodb, err := createEngine(conf, \"neodb\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create neo db engine error %s\", err)\n\t}\n\n\tethKey, err := readETHKeyStore(conf, \"eth.keystore\", conf.GetString(\"eth.keystorepassword\", \"\"))\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create neo db engine error %s\", err)\n\t}\n\n\tneoKey, err := readNEOKeyStore(conf, \"neo.keystore\", conf.GetString(\"neo.keystorepassword\", \"\"))\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create neo db engine error %s\", err)\n\t}\n\n\tneo2ethtax, err := strconv.ParseFloat(conf.GetString(\"tokenswap.neo2ethtax\", \"0.001\"), 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ParseFloat neo2ethtax error %s\", err)\n\t}\n\n\teth2neotax, err := strconv.ParseFloat(conf.GetString(\"tokenswap.eth2neotax\", \"0.001\"), 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ParseFloat eth2neotax error %s\", err)\n\t}\n\n\treturn &Monitor{\n\t\tLogger: slf4go.Get(\"tokenswap-server\"),\n\t\tneomq: neomq,\n\t\tethmq: ethmq,\n\t\ttokenswapdb: tokenswapdb,\n\t\tethdb: ethdb,\n\t\tneodb: neodb,\n\t\ttncOfETH: conf.GetString(\"eth.tnc\", \"\"),\n\t\ttncOfNEO: conf.GetString(\"neo.tnc\", \"\"),\n\t\tETHKeyAddress: strings.ToLower(ethKey.Address),\n\t\tNEOKeyAddress: neoKey.Address,\n\t\tethClient: ethrpc.NewClient(conf.GetString(\"eth.node\", \"\")),\n\t\tneoClient: neorpc.NewClient(conf.GetString(\"neo.node\", \"\")),\n\t\tneo2ethtax: neo2ethtax,\n\t\teth2neotax: eth2neotax,\n\t\tconfig: conf,\n\t\tethConfirmCount: conf.GetInt64(\"tokenswap.ethConfirmCount\", 12),\n\t\tethGetBlockInterval: conf.GetInt64(\"tokenswap.ethGetBlockInterval\", 20),\n\t\tneoConfirmCount: conf.GetInt64(\"tokenswap.neoConfirmCount\", 12),\n\t\tneoGetBlockInterval: conf.GetInt64(\"tokenswap.neoGetBlockInterval\", 10),\n\t}, nil\n}", "func (n *MockLogNotifier) RegisterNewListener(l listener.MonitorListener) {\n\tn.monitorAgent.RegisterNewListener(l)\n}", "func NewMockBufferInterface(ctrl *gomock.Controller) *MockBufferInterface {\n\tmock := &MockBufferInterface{ctrl: ctrl}\n\tmock.recorder = &MockBufferInterfaceMockRecorder{mock}\n\treturn mock\n}", "func NewMockWriteBufferJsonBased(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockWriteBufferJsonBased {\n\tmock := &MockWriteBufferJsonBased{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewIndexLockerMock(t minimock.Tester) *IndexLockerMock {\n\tm := &IndexLockerMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.LockMock = mIndexLockerMockLock{mock: m}\n\tm.LockMock.callArgs = []*IndexLockerMockLockParams{}\n\n\tm.UnlockMock = mIndexLockerMockUnlock{mock: m}\n\tm.UnlockMock.callArgs = []*IndexLockerMockUnlockParams{}\n\n\treturn m\n}", "func newMockKvCapabilityVerifier(t mockConstructorTestingTnewMockKvCapabilityVerifier) *mockKvCapabilityVerifier {\n\tmock := &mockKvCapabilityVerifier{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}" ]
[ "0.6452872", "0.63626", "0.6360995", "0.63354677", "0.6195903", "0.6159239", "0.6151844", "0.6125414", "0.6114272", "0.6012288", "0.5978922", "0.5863247", "0.5803334", "0.5787612", "0.5735182", "0.57199854", "0.56633896", "0.56621176", "0.56521916", "0.5648153", "0.5636129", "0.5582028", "0.5579101", "0.55659217", "0.55479014", "0.55024195", "0.5457285", "0.5454454", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.5452904", "0.544796", "0.5443503", "0.5421116", "0.54034865", "0.539286", "0.53862286", "0.53647465", "0.5358579", "0.53433985", "0.5336614", "0.53304327", "0.5315155", "0.5315155", "0.5315155", "0.53052044", "0.5299185", "0.52709997", "0.5262692", "0.5252868", "0.5243705", "0.52430075", "0.5242566", "0.5233982", "0.5176478", "0.5161674", "0.51521206", "0.51414186", "0.5140895", "0.51267433", "0.51141274", "0.5110596", "0.5103208", "0.51026624", "0.510219", "0.5086576", "0.50816184", "0.50729775", "0.5069637", "0.5067443", "0.5065288", "0.5063805", "0.5063575", "0.5051342", "0.50480926", "0.50379777", "0.50379777", "0.5036895", "0.5030464", "0.5029256", "0.50215906", "0.5019315", "0.5019232", "0.50185025", "0.50089735", "0.50052005", "0.4997981", "0.49963978", "0.49961782", "0.4992193", "0.4990288" ]
0.76742446
0
EXPECT returns an object that allows the caller to indicate expected use
func (m *MockmonitorInterface) EXPECT() *MockmonitorInterfaceMockRecorder { return m.recorder }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mmGetObject *mClientMockGetObject) Expect(ctx context.Context, head insolar.Reference) *mClientMockGetObject {\n\tif mmGetObject.mock.funcGetObject != nil {\n\t\tmmGetObject.mock.t.Fatalf(\"ClientMock.GetObject mock is already set by Set\")\n\t}\n\n\tif mmGetObject.defaultExpectation == nil {\n\t\tmmGetObject.defaultExpectation = &ClientMockGetObjectExpectation{}\n\t}\n\n\tmmGetObject.defaultExpectation.params = &ClientMockGetObjectParams{ctx, head}\n\tfor _, e := range mmGetObject.expectations {\n\t\tif minimock.Equal(e.params, mmGetObject.defaultExpectation.params) {\n\t\t\tmmGetObject.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetObject.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetObject\n}", "func (r Requester) Assert(actual, expected interface{}) Requester {\n\t//r.actualResponse = actual\n\t//r.expectedResponse = expected\n\treturn r\n}", "func (r *Request) Expect(t *testing.T) *Response {\n\tr.apiTest.t = t\n\treturn r.apiTest.response\n}", "func (m *MockNotary) Notarize(arg0 string) (map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Notarize\", arg0)\n\tret0, _ := ret[0].(map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (tc TestCases) expect() {\n\tfmt.Println(cnt)\n\tcnt++\n\tif !reflect.DeepEqual(tc.resp, tc.respExp) {\n\t\ttc.t.Error(fmt.Sprintf(\"\\nRequested: \", tc.req, \"\\nExpected: \", tc.respExp, \"\\nFound: \", tc.resp))\n\t}\n}", "func (r *Request) Expect(t TestingT) *Response {\n\tr.apiTest.t = t\n\treturn r.apiTest.response\n}", "func Expect(t cbtest.T, actual interface{}, matcher matcher.Matcher, labelAndArgs ...interface{}) {\n\tt.Helper()\n\tres := ExpectE(t, actual, matcher, labelAndArgs...)\n\tif !res {\n\t\tt.FailNow()\n\t}\n}", "func (m *MockisObject_Obj) EXPECT() *MockisObject_ObjMockRecorder {\n\treturn m.recorder\n}", "func Expect(t *testing.T, v, m interface{}) {\n\tvt, vok := v.(Equaler)\n\tmt, mok := m.(Equaler)\n\n\tvar state bool\n\tif vok && mok {\n\t\tstate = vt.Equal(mt)\n\t} else {\n\t\tstate = reflect.DeepEqual(v, m)\n\t}\n\n\tif state {\n\t\tflux.FatalFailed(t, \"Value %+v and %+v are not a match\", v, m)\n\t\treturn\n\t}\n\tflux.LogPassed(t, \"Value %+v and %+v are a match\", v, m)\n}", "func (mmState *mClientMockState) Expect() *mClientMockState {\n\tif mmState.mock.funcState != nil {\n\t\tmmState.mock.t.Fatalf(\"ClientMock.State mock is already set by Set\")\n\t}\n\n\tif mmState.defaultExpectation == nil {\n\t\tmmState.defaultExpectation = &ClientMockStateExpectation{}\n\t}\n\n\treturn mmState\n}", "func (mmProvide *mContainerMockProvide) Expect(constructor interface{}) *mContainerMockProvide {\n\tif mmProvide.mock.funcProvide != nil {\n\t\tmmProvide.mock.t.Fatalf(\"ContainerMock.Provide mock is already set by Set\")\n\t}\n\n\tif mmProvide.defaultExpectation == nil {\n\t\tmmProvide.defaultExpectation = &ContainerMockProvideExpectation{}\n\t}\n\n\tmmProvide.defaultExpectation.params = &ContainerMockProvideParams{constructor}\n\tfor _, e := range mmProvide.expectations {\n\t\tif minimock.Equal(e.params, mmProvide.defaultExpectation.params) {\n\t\t\tmmProvide.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmProvide.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmProvide\n}", "func Mock() Env {\n\treturn mock.New()\n}", "func (mmGetCode *mClientMockGetCode) Expect(ctx context.Context, ref insolar.Reference) *mClientMockGetCode {\n\tif mmGetCode.mock.funcGetCode != nil {\n\t\tmmGetCode.mock.t.Fatalf(\"ClientMock.GetCode mock is already set by Set\")\n\t}\n\n\tif mmGetCode.defaultExpectation == nil {\n\t\tmmGetCode.defaultExpectation = &ClientMockGetCodeExpectation{}\n\t}\n\n\tmmGetCode.defaultExpectation.params = &ClientMockGetCodeParams{ctx, ref}\n\tfor _, e := range mmGetCode.expectations {\n\t\tif minimock.Equal(e.params, mmGetCode.defaultExpectation.params) {\n\t\t\tmmGetCode.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetCode.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetCode\n}", "func expect(t *testing.T, method, url string, testieOptions ...func(*http.Request)) *testie {\n\treq, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, opt := range testieOptions {\n\t\topt(req)\n\t}\n\n\treturn testReq(t, req)\n}", "func (_m *MockOStream) EXPECT() *MockOStreamMockRecorder {\n\treturn _m.recorder\n}", "func (mmGetUser *mStorageMockGetUser) Expect(ctx context.Context, userID int64) *mStorageMockGetUser {\n\tif mmGetUser.mock.funcGetUser != nil {\n\t\tmmGetUser.mock.t.Fatalf(\"StorageMock.GetUser mock is already set by Set\")\n\t}\n\n\tif mmGetUser.defaultExpectation == nil {\n\t\tmmGetUser.defaultExpectation = &StorageMockGetUserExpectation{}\n\t}\n\n\tmmGetUser.defaultExpectation.params = &StorageMockGetUserParams{ctx, userID}\n\tfor _, e := range mmGetUser.expectations {\n\t\tif minimock.Equal(e.params, mmGetUser.defaultExpectation.params) {\n\t\t\tmmGetUser.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetUser.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetUser\n}", "func (mmGetObject *mClientMockGetObject) Return(o1 ObjectDescriptor, err error) *ClientMock {\n\tif mmGetObject.mock.funcGetObject != nil {\n\t\tmmGetObject.mock.t.Fatalf(\"ClientMock.GetObject mock is already set by Set\")\n\t}\n\n\tif mmGetObject.defaultExpectation == nil {\n\t\tmmGetObject.defaultExpectation = &ClientMockGetObjectExpectation{mock: mmGetObject.mock}\n\t}\n\tmmGetObject.defaultExpectation.results = &ClientMockGetObjectResults{o1, err}\n\treturn mmGetObject.mock\n}", "func (mmGather *mGathererMockGather) Expect() *mGathererMockGather {\n\tif mmGather.mock.funcGather != nil {\n\t\tmmGather.mock.t.Fatalf(\"GathererMock.Gather mock is already set by Set\")\n\t}\n\n\tif mmGather.defaultExpectation == nil {\n\t\tmmGather.defaultExpectation = &GathererMockGatherExpectation{}\n\t}\n\n\treturn mmGather\n}", "func (m *MockParser) EXPECT() *MockParserMockRecorder {\n\treturn m.recorder\n}", "func (m *MockParser) EXPECT() *MockParserMockRecorder {\n\treturn m.recorder\n}", "func (mmWriteTo *mDigestHolderMockWriteTo) Expect(w io.Writer) *mDigestHolderMockWriteTo {\n\tif mmWriteTo.mock.funcWriteTo != nil {\n\t\tmmWriteTo.mock.t.Fatalf(\"DigestHolderMock.WriteTo mock is already set by Set\")\n\t}\n\n\tif mmWriteTo.defaultExpectation == nil {\n\t\tmmWriteTo.defaultExpectation = &DigestHolderMockWriteToExpectation{}\n\t}\n\n\tmmWriteTo.defaultExpectation.params = &DigestHolderMockWriteToParams{w}\n\tfor _, e := range mmWriteTo.expectations {\n\t\tif minimock.Equal(e.params, mmWriteTo.defaultExpectation.params) {\n\t\t\tmmWriteTo.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmWriteTo.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmWriteTo\n}", "func (rb *RequestBuilder) EXPECT() *ResponseAsserter {\n\treq := httptest.NewRequest(rb.method, rb.path, rb.body)\n\tfor k, v := range rb.hdr {\n\t\treq.Header[k] = v\n\t}\n\n\trec := httptest.NewRecorder()\n\trb.cas.h.ServeHTTP(rec, req)\n\n\treturn &ResponseAsserter{\n\t\trec: rec,\n\t\treq: req,\n\t\tb: rb,\n\t\tfail: rb.fail.\n\t\t\tCopy().\n\t\t\tWithRequest(req).\n\t\t\tWithResponse(rec),\n\t}\n}", "func (mmGetState *mGatewayMockGetState) Expect() *mGatewayMockGetState {\n\tif mmGetState.mock.funcGetState != nil {\n\t\tmmGetState.mock.t.Fatalf(\"GatewayMock.GetState mock is already set by Set\")\n\t}\n\n\tif mmGetState.defaultExpectation == nil {\n\t\tmmGetState.defaultExpectation = &GatewayMockGetStateExpectation{}\n\t}\n\n\treturn mmGetState\n}", "func (m *mParcelMockGetSign) Expect() *mParcelMockGetSign {\n\tm.mock.GetSignFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetSignExpectation{}\n\t}\n\n\treturn m\n}", "func (mmCreateTag *mTagCreatorMockCreateTag) Expect(t1 semantic.Tag) *mTagCreatorMockCreateTag {\n\tif mmCreateTag.mock.funcCreateTag != nil {\n\t\tmmCreateTag.mock.t.Fatalf(\"TagCreatorMock.CreateTag mock is already set by Set\")\n\t}\n\n\tif mmCreateTag.defaultExpectation == nil {\n\t\tmmCreateTag.defaultExpectation = &TagCreatorMockCreateTagExpectation{}\n\t}\n\n\tmmCreateTag.defaultExpectation.params = &TagCreatorMockCreateTagParams{t1}\n\tfor _, e := range mmCreateTag.expectations {\n\t\tif minimock.Equal(e.params, mmCreateTag.defaultExpectation.params) {\n\t\t\tmmCreateTag.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmCreateTag.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmCreateTag\n}", "func (m *MockActorUsecase) EXPECT() *MockActorUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockGetCaller) Expect() *mParcelMockGetCaller {\n\tm.mock.GetCallerFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetCallerExpectation{}\n\t}\n\n\treturn m\n}", "func mockAlwaysRun() bool { return true }", "func (m *MockArg) EXPECT() *MockArgMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (st *SDKTester) Test(resp interface{}) {\n\tif resp == nil || st.respWant == nil {\n\t\tst.t.Logf(\"response want/got is nil, abort\\n\")\n\t\treturn\n\t}\n\n\trespMap := st.getFieldMap(resp)\n\tfor i, v := range st.respWant {\n\t\tif reflect.DeepEqual(v, respMap[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tswitch x := respMap[i].(type) {\n\t\tcase Stringer:\n\t\t\tif !assert.Equal(st.t, v, x.String()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tif value, ok := x[\"Value\"]; ok {\n\t\t\t\tif !assert.Equal(st.t, v, value) {\n\t\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t\t}\n\t\t\t}\n\t\tcase Inter:\n\t\t\tif !assert.Equal(st.t, v, x.Int()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tdefault:\n\t\t\tif !assert.Equal(st.t, v, respMap[i]) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockCreator) EXPECT() *MockCreatorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockCreator) EXPECT() *MockCreatorMockRecorder {\n\treturn m.recorder\n}", "func TestCallFunc_arguments(t *testing.T) {\n\n}", "func (m *mParcelMockGetSender) Expect() *mParcelMockGetSender {\n\tm.mock.GetSenderFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetSenderExpectation{}\n\t}\n\n\treturn m\n}", "func TestGetNone4A(t *testing.T) {\n}", "func expectEqual(value, expected interface{}) {\n\tif value != expected {\n\t\tfmt.Printf(\"Fehler: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t} else {\n\t\tfmt.Printf(\"OK: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t}\n}", "func (mmHasPendings *mClientMockHasPendings) Expect(ctx context.Context, object insolar.Reference) *mClientMockHasPendings {\n\tif mmHasPendings.mock.funcHasPendings != nil {\n\t\tmmHasPendings.mock.t.Fatalf(\"ClientMock.HasPendings mock is already set by Set\")\n\t}\n\n\tif mmHasPendings.defaultExpectation == nil {\n\t\tmmHasPendings.defaultExpectation = &ClientMockHasPendingsExpectation{}\n\t}\n\n\tmmHasPendings.defaultExpectation.params = &ClientMockHasPendingsParams{ctx, object}\n\tfor _, e := range mmHasPendings.expectations {\n\t\tif minimock.Equal(e.params, mmHasPendings.defaultExpectation.params) {\n\t\t\tmmHasPendings.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmHasPendings.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmHasPendings\n}", "func (mmGetPacketSignature *mPacketParserMockGetPacketSignature) Expect() *mPacketParserMockGetPacketSignature {\n\tif mmGetPacketSignature.mock.funcGetPacketSignature != nil {\n\t\tmmGetPacketSignature.mock.t.Fatalf(\"PacketParserMock.GetPacketSignature mock is already set by Set\")\n\t}\n\n\tif mmGetPacketSignature.defaultExpectation == nil {\n\t\tmmGetPacketSignature.defaultExpectation = &PacketParserMockGetPacketSignatureExpectation{}\n\t}\n\n\treturn mmGetPacketSignature\n}", "func Run(t testing.TB, cloud cloud.Client, src string, opts ...RunOption) {\n\n\tif cloud == nil {\n\t\tcloud = mockcloud.Client(nil)\n\t}\n\n\tvm := otto.New()\n\n\tpkg, err := godotto.Apply(context.Background(), vm, cloud)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvm.Set(\"cloud\", pkg)\n\tvm.Set(\"equals\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tgot, err := call.Argument(0).Export()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\twant, err := call.Argument(1).Export()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\tok, cause := deepEqual(got, want)\n\t\tif ok {\n\t\t\treturn otto.UndefinedValue()\n\t\t}\n\t\tmsg := \"assertion failed!\\n\" + cause\n\n\t\tif len(call.ArgumentList) > 2 {\n\t\t\tformat, err := call.ArgumentList[2].ToString()\n\t\t\tif err != nil {\n\t\t\t\tottoutil.Throw(vm, err.Error())\n\t\t\t}\n\t\t\tmsg += \"\\n\" + format\n\t\t}\n\t\tottoutil.Throw(vm, msg)\n\t\treturn otto.UndefinedValue()\n\t})\n\tvm.Set(\"assert\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tv, err := call.Argument(0).ToBoolean()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\tif v {\n\t\t\treturn otto.UndefinedValue()\n\t\t}\n\t\tmsg := \"assertion failed!\"\n\t\tif len(call.ArgumentList) > 1 {\n\t\t\tformat, err := call.ArgumentList[1].ToString()\n\t\t\tif err != nil {\n\t\t\t\tottoutil.Throw(vm, err.Error())\n\t\t\t}\n\t\t\tmsg += \"\\n\" + format\n\t\t}\n\t\tottoutil.Throw(vm, msg)\n\t\treturn otto.UndefinedValue()\n\t})\n\tscript, err := vm.Compile(\"\", src)\n\tif err != nil {\n\t\tt.Fatalf(\"invalid code: %v\", err)\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(vm); err != nil {\n\t\t\tt.Fatalf(\"can't apply option: %v\", err)\n\t\t}\n\t}\n\n\tif _, err := vm.Run(script); err != nil {\n\t\tif oe, ok := err.(*otto.Error); ok {\n\t\t\tt.Fatal(oe.String())\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestSetGoodArgs(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetGoodArgs\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\n\t// Testing the init. It always return true. No parameters in init. \n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\"),\n\t[]byte(\"agentInfo.atype\"),[]byte(\"1.2.3.4\"),\n\t[]byte(\"agentInfo.id\"),[]byte(\"agentidentifier\"),\n\t[]byte(\"agentinfo.name\"),[]byte(\"7.8.9\"),\n\t[]byte(\"agentinfo.idp\"),[]byte(\"urn:tiani-spirit:sts\"),\n\t[]byte(\"locationInfo.id\"),[]byte(\"urn:oid:1.2.3\"),\n\t[]byte(\"locationInfo.name\"),[]byte(\"General Hospital\"),\n\t[]byte(\"locationInfo.locality\"),[]byte(\"Nashville, TN\"),\n\t[]byte(\"locationInfo.docid\"),[]byte(\"1.2.3\"),\n\t[]byte(\"action\"),[]byte(\"ex:CREATE\"),\n\t[]byte(\"date\"),[]byte(\"2018-11-10T12:15:55.028Z\")})\n\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n}", "func (mmRegisterResult *mClientMockRegisterResult) Expect(ctx context.Context, request insolar.Reference, result RequestResult) *mClientMockRegisterResult {\n\tif mmRegisterResult.mock.funcRegisterResult != nil {\n\t\tmmRegisterResult.mock.t.Fatalf(\"ClientMock.RegisterResult mock is already set by Set\")\n\t}\n\n\tif mmRegisterResult.defaultExpectation == nil {\n\t\tmmRegisterResult.defaultExpectation = &ClientMockRegisterResultExpectation{}\n\t}\n\n\tmmRegisterResult.defaultExpectation.params = &ClientMockRegisterResultParams{ctx, request, result}\n\tfor _, e := range mmRegisterResult.expectations {\n\t\tif minimock.Equal(e.params, mmRegisterResult.defaultExpectation.params) {\n\t\t\tmmRegisterResult.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmRegisterResult.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmRegisterResult\n}", "func Mock() Cluster { return mockCluster{} }", "func (m *MockS3API) EXPECT() *MockS3APIMockRecorder {\n\treturn m.recorder\n}", "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func (mmGetPendings *mClientMockGetPendings) Expect(ctx context.Context, objectRef insolar.Reference) *mClientMockGetPendings {\n\tif mmGetPendings.mock.funcGetPendings != nil {\n\t\tmmGetPendings.mock.t.Fatalf(\"ClientMock.GetPendings mock is already set by Set\")\n\t}\n\n\tif mmGetPendings.defaultExpectation == nil {\n\t\tmmGetPendings.defaultExpectation = &ClientMockGetPendingsExpectation{}\n\t}\n\n\tmmGetPendings.defaultExpectation.params = &ClientMockGetPendingsParams{ctx, objectRef}\n\tfor _, e := range mmGetPendings.expectations {\n\t\tif minimock.Equal(e.params, mmGetPendings.defaultExpectation.params) {\n\t\t\tmmGetPendings.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetPendings.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetPendings\n}", "func (m *MockOrg) EXPECT() *MockOrgMockRecorder {\n\treturn m.recorder\n}", "func (mmGetUserLocation *mStorageMockGetUserLocation) Expect(ctx context.Context, userID int64) *mStorageMockGetUserLocation {\n\tif mmGetUserLocation.mock.funcGetUserLocation != nil {\n\t\tmmGetUserLocation.mock.t.Fatalf(\"StorageMock.GetUserLocation mock is already set by Set\")\n\t}\n\n\tif mmGetUserLocation.defaultExpectation == nil {\n\t\tmmGetUserLocation.defaultExpectation = &StorageMockGetUserLocationExpectation{}\n\t}\n\n\tmmGetUserLocation.defaultExpectation.params = &StorageMockGetUserLocationParams{ctx, userID}\n\tfor _, e := range mmGetUserLocation.expectations {\n\t\tif minimock.Equal(e.params, mmGetUserLocation.defaultExpectation.params) {\n\t\t\tmmGetUserLocation.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetUserLocation.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetUserLocation\n}", "func (mmCreate *mPaymentRepositoryMockCreate) Expect(ctx context.Context, from int64, to int64, amount int64) *mPaymentRepositoryMockCreate {\n\tif mmCreate.mock.funcCreate != nil {\n\t\tmmCreate.mock.t.Fatalf(\"PaymentRepositoryMock.Create mock is already set by Set\")\n\t}\n\n\tif mmCreate.defaultExpectation == nil {\n\t\tmmCreate.defaultExpectation = &PaymentRepositoryMockCreateExpectation{}\n\t}\n\n\tmmCreate.defaultExpectation.params = &PaymentRepositoryMockCreateParams{ctx, from, to, amount}\n\tfor _, e := range mmCreate.expectations {\n\t\tif minimock.Equal(e.params, mmCreate.defaultExpectation.params) {\n\t\t\tmmCreate.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmCreate.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmCreate\n}", "func (mmAuther *mGatewayMockAuther) Expect() *mGatewayMockAuther {\n\tif mmAuther.mock.funcAuther != nil {\n\t\tmmAuther.mock.t.Fatalf(\"GatewayMock.Auther mock is already set by Set\")\n\t}\n\n\tif mmAuther.defaultExpectation == nil {\n\t\tmmAuther.defaultExpectation = &GatewayMockAutherExpectation{}\n\t}\n\n\treturn mmAuther\n}", "func TestObjectsMeetReq(t *testing.T) {\n\tvar kr verifiable.StorageReader\n\tvar kw verifiable.StorageWriter\n\n\tvar m verifiable.MutatorService\n\n\tvar o verifiable.AuthorizationOracle\n\n\tkr = &memory.TransientStorage{}\n\tkw = &memory.TransientStorage{}\n\n\tkr = &bolt.Storage{}\n\tkw = &bolt.Storage{}\n\n\tkr = &badger.Storage{}\n\tkw = &badger.Storage{}\n\n\tm = &instant.Mutator{}\n\tm = (&batch.Mutator{}).MustCreate()\n\n\to = policy.Open\n\to = &policy.Static{}\n\n\tlog.Println(kr, kw, m, o) // \"use\" these so that go compiler will be quiet\n}", "func (mmInvoke *mContainerMockInvoke) Expect(function interface{}) *mContainerMockInvoke {\n\tif mmInvoke.mock.funcInvoke != nil {\n\t\tmmInvoke.mock.t.Fatalf(\"ContainerMock.Invoke mock is already set by Set\")\n\t}\n\n\tif mmInvoke.defaultExpectation == nil {\n\t\tmmInvoke.defaultExpectation = &ContainerMockInvokeExpectation{}\n\t}\n\n\tmmInvoke.defaultExpectation.params = &ContainerMockInvokeParams{function}\n\tfor _, e := range mmInvoke.expectations {\n\t\tif minimock.Equal(e.params, mmInvoke.defaultExpectation.params) {\n\t\t\tmmInvoke.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmInvoke.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmInvoke\n}", "func (mmGetPosition *mStoreMockGetPosition) Expect(account string, contractID string) *mStoreMockGetPosition {\n\tif mmGetPosition.mock.funcGetPosition != nil {\n\t\tmmGetPosition.mock.t.Fatalf(\"StoreMock.GetPosition mock is already set by Set\")\n\t}\n\n\tif mmGetPosition.defaultExpectation == nil {\n\t\tmmGetPosition.defaultExpectation = &StoreMockGetPositionExpectation{}\n\t}\n\n\tmmGetPosition.defaultExpectation.params = &StoreMockGetPositionParams{account, contractID}\n\tfor _, e := range mmGetPosition.expectations {\n\t\tif minimock.Equal(e.params, mmGetPosition.defaultExpectation.params) {\n\t\t\tmmGetPosition.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetPosition.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetPosition\n}", "func (mmGetAbandonedRequest *mClientMockGetAbandonedRequest) Expect(ctx context.Context, objectRef insolar.Reference, reqRef insolar.Reference) *mClientMockGetAbandonedRequest {\n\tif mmGetAbandonedRequest.mock.funcGetAbandonedRequest != nil {\n\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"ClientMock.GetAbandonedRequest mock is already set by Set\")\n\t}\n\n\tif mmGetAbandonedRequest.defaultExpectation == nil {\n\t\tmmGetAbandonedRequest.defaultExpectation = &ClientMockGetAbandonedRequestExpectation{}\n\t}\n\n\tmmGetAbandonedRequest.defaultExpectation.params = &ClientMockGetAbandonedRequestParams{ctx, objectRef, reqRef}\n\tfor _, e := range mmGetAbandonedRequest.expectations {\n\t\tif minimock.Equal(e.params, mmGetAbandonedRequest.defaultExpectation.params) {\n\t\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetAbandonedRequest.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetAbandonedRequest\n}", "func (mmSend *mSenderMockSend) Expect(ctx context.Context, email Email) *mSenderMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"SenderMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &SenderMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &SenderMockSendParams{ctx, email}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func callAndVerify(msg string, client pb.GreeterClient, shouldFail bool) error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\t_, err := client.SayHello(ctx, &pb.HelloRequest{Name: msg})\n\tif want, got := shouldFail == true, err != nil; got != want {\n\t\treturn fmt.Errorf(\"want and got mismatch, want shouldFail=%v, got fail=%v, rpc error: %v\", want, got, err)\n\t}\n\treturn nil\n}", "func (m *Mockrequester) EXPECT() *MockrequesterMockRecorder {\n\treturn m.recorder\n}", "func expectEqual(actual interface{}, extra interface{}, explain ...interface{}) {\n\tgomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)\n}", "func (m *MockstackDescriber) EXPECT() *MockstackDescriberMockRecorder {\n\treturn m.recorder\n}", "func (req *outgoingRequest) Assert(t *testing.T, fixture *fixture) {\n\tassert.Equal(t, req.path, fixture.calledPath, \"called path not as expected\")\n\tassert.Equal(t, req.method, fixture.calledMethod, \"called path not as expected\")\n\tassert.Equal(t, req.body, fixture.requestBody, \"call body no as expected\")\n}", "func (mmVerify *mDelegationTokenFactoryMockVerify) Expect(parcel mm_insolar.Parcel) *mDelegationTokenFactoryMockVerify {\n\tif mmVerify.mock.funcVerify != nil {\n\t\tmmVerify.mock.t.Fatalf(\"DelegationTokenFactoryMock.Verify mock is already set by Set\")\n\t}\n\n\tif mmVerify.defaultExpectation == nil {\n\t\tmmVerify.defaultExpectation = &DelegationTokenFactoryMockVerifyExpectation{}\n\t}\n\n\tmmVerify.defaultExpectation.params = &DelegationTokenFactoryMockVerifyParams{parcel}\n\tfor _, e := range mmVerify.expectations {\n\t\tif minimock.Equal(e.params, mmVerify.defaultExpectation.params) {\n\t\t\tmmVerify.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmVerify.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmVerify\n}", "func (mmRead *mDigestHolderMockRead) Expect(p []byte) *mDigestHolderMockRead {\n\tif mmRead.mock.funcRead != nil {\n\t\tmmRead.mock.t.Fatalf(\"DigestHolderMock.Read mock is already set by Set\")\n\t}\n\n\tif mmRead.defaultExpectation == nil {\n\t\tmmRead.defaultExpectation = &DigestHolderMockReadExpectation{}\n\t}\n\n\tmmRead.defaultExpectation.params = &DigestHolderMockReadParams{p}\n\tfor _, e := range mmRead.expectations {\n\t\tif minimock.Equal(e.params, mmRead.defaultExpectation.params) {\n\t\t\tmmRead.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmRead.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmRead\n}", "func (mmSend *mClientMockSend) Expect(ctx context.Context, n *Notification) *mClientMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"ClientMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &ClientMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &ClientMockSendParams{ctx, n}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func (mmAsByteString *mDigestHolderMockAsByteString) Expect() *mDigestHolderMockAsByteString {\n\tif mmAsByteString.mock.funcAsByteString != nil {\n\t\tmmAsByteString.mock.t.Fatalf(\"DigestHolderMock.AsByteString mock is already set by Set\")\n\t}\n\n\tif mmAsByteString.defaultExpectation == nil {\n\t\tmmAsByteString.defaultExpectation = &DigestHolderMockAsByteStringExpectation{}\n\t}\n\n\treturn mmAsByteString\n}", "func Expect(msg string) error {\n\tif msg != \"\" {\n\t\treturn errors.New(msg)\n\t} else {\n\t\treturn nil\n\t}\n}", "func (mmEncrypt *mRingMockEncrypt) Expect(t1 secrets.Text) *mRingMockEncrypt {\n\tif mmEncrypt.mock.funcEncrypt != nil {\n\t\tmmEncrypt.mock.t.Fatalf(\"RingMock.Encrypt mock is already set by Set\")\n\t}\n\n\tif mmEncrypt.defaultExpectation == nil {\n\t\tmmEncrypt.defaultExpectation = &RingMockEncryptExpectation{}\n\t}\n\n\tmmEncrypt.defaultExpectation.params = &RingMockEncryptParams{t1}\n\tfor _, e := range mmEncrypt.expectations {\n\t\tif minimock.Equal(e.params, mmEncrypt.defaultExpectation.params) {\n\t\t\tmmEncrypt.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmEncrypt.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmEncrypt\n}", "func (mmBootstrapper *mGatewayMockBootstrapper) Expect() *mGatewayMockBootstrapper {\n\tif mmBootstrapper.mock.funcBootstrapper != nil {\n\t\tmmBootstrapper.mock.t.Fatalf(\"GatewayMock.Bootstrapper mock is already set by Set\")\n\t}\n\n\tif mmBootstrapper.defaultExpectation == nil {\n\t\tmmBootstrapper.defaultExpectation = &GatewayMockBootstrapperExpectation{}\n\t}\n\n\treturn mmBootstrapper\n}", "func (m *MockNotary) EXPECT() *MockNotaryMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockSetSender) Expect(p insolar.Reference) *mParcelMockSetSender {\n\tm.mock.SetSenderFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockSetSenderExpectation{}\n\t}\n\tm.mainExpectation.input = &ParcelMockSetSenderInput{p}\n\treturn m\n}", "func (mmGetPacketType *mPacketParserMockGetPacketType) Expect() *mPacketParserMockGetPacketType {\n\tif mmGetPacketType.mock.funcGetPacketType != nil {\n\t\tmmGetPacketType.mock.t.Fatalf(\"PacketParserMock.GetPacketType mock is already set by Set\")\n\t}\n\n\tif mmGetPacketType.defaultExpectation == nil {\n\t\tmmGetPacketType.defaultExpectation = &PacketParserMockGetPacketTypeExpectation{}\n\t}\n\n\treturn mmGetPacketType\n}", "func (mmParsePacketBody *mPacketParserMockParsePacketBody) Expect() *mPacketParserMockParsePacketBody {\n\tif mmParsePacketBody.mock.funcParsePacketBody != nil {\n\t\tmmParsePacketBody.mock.t.Fatalf(\"PacketParserMock.ParsePacketBody mock is already set by Set\")\n\t}\n\n\tif mmParsePacketBody.defaultExpectation == nil {\n\t\tmmParsePacketBody.defaultExpectation = &PacketParserMockParsePacketBodyExpectation{}\n\t}\n\n\treturn mmParsePacketBody\n}", "func (mmAsBytes *mDigestHolderMockAsBytes) Expect() *mDigestHolderMockAsBytes {\n\tif mmAsBytes.mock.funcAsBytes != nil {\n\t\tmmAsBytes.mock.t.Fatalf(\"DigestHolderMock.AsBytes mock is already set by Set\")\n\t}\n\n\tif mmAsBytes.defaultExpectation == nil {\n\t\tmmAsBytes.defaultExpectation = &DigestHolderMockAsBytesExpectation{}\n\t}\n\n\treturn mmAsBytes\n}", "func (m *MockArticleLogic) EXPECT() *MockArticleLogicMockRecorder {\n\treturn m.recorder\n}", "func (mmKey *mIteratorMockKey) Expect() *mIteratorMockKey {\n\tif mmKey.mock.funcKey != nil {\n\t\tmmKey.mock.t.Fatalf(\"IteratorMock.Key mock is already set by Set\")\n\t}\n\n\tif mmKey.defaultExpectation == nil {\n\t\tmmKey.defaultExpectation = &IteratorMockKeyExpectation{}\n\t}\n\n\treturn mmKey\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *mOutboundMockCanAccept) Expect(p Inbound) *mOutboundMockCanAccept {\n\tm.mock.CanAcceptFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockCanAcceptExpectation{}\n\t}\n\tm.mainExpectation.input = &OutboundMockCanAcceptInput{p}\n\treturn m\n}", "func (m *MockLoaderFactory) EXPECT() *MockLoaderFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockPKG) EXPECT() *MockPKGMockRecorder {\n\treturn m.recorder\n}", "func (m *MockbucketDescriber) EXPECT() *MockbucketDescriberMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockType) Expect() *mParcelMockType {\n\tm.mock.TypeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockTypeExpectation{}\n\t}\n\n\treturn m\n}", "func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {\n\treturn m.recorder\n}", "func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {\n\treturn m.recorder\n}", "func (mmExchange *mMDNSClientMockExchange) Expect(msg *mdns.Msg, address string) *mMDNSClientMockExchange {\n\tif mmExchange.mock.funcExchange != nil {\n\t\tmmExchange.mock.t.Fatalf(\"MDNSClientMock.Exchange mock is already set by Set\")\n\t}\n\n\tif mmExchange.defaultExpectation == nil {\n\t\tmmExchange.defaultExpectation = &MDNSClientMockExchangeExpectation{}\n\t}\n\n\tmmExchange.defaultExpectation.params = &MDNSClientMockExchangeParams{msg, address}\n\tfor _, e := range mmExchange.expectations {\n\t\tif minimock.Equal(e.params, mmExchange.defaultExpectation.params) {\n\t\t\tmmExchange.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmExchange.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmExchange\n}", "func (m *MockStream) EXPECT() *MockStreamMockRecorder {\n\treturn m.recorder\n}", "func (c Chkr) Expect(v validator, args ...interface{}) {\n\tif c.runTest(v, args...) {\n\t\tc.Fail()\n\t}\n}", "func (mmClone *mStorageMockClone) Expect(ctx context.Context, from insolar.PulseNumber, to insolar.PulseNumber, keepActual bool) *mStorageMockClone {\n\tif mmClone.mock.funcClone != nil {\n\t\tmmClone.mock.t.Fatalf(\"StorageMock.Clone mock is already set by Set\")\n\t}\n\n\tif mmClone.defaultExpectation == nil {\n\t\tmmClone.defaultExpectation = &StorageMockCloneExpectation{}\n\t}\n\n\tmmClone.defaultExpectation.params = &StorageMockCloneParams{ctx, from, to, keepActual}\n\tfor _, e := range mmClone.expectations {\n\t\tif minimock.Equal(e.params, mmClone.defaultExpectation.params) {\n\t\t\tmmClone.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmClone.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmClone\n}", "func (m *MockCodeGenerator) EXPECT() *MockCodeGeneratorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockNodeAttestor) EXPECT() *MockNodeAttestorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockNodeAttestor) EXPECT() *MockNodeAttestorMockRecorder {\n\treturn m.recorder\n}", "func (_m *MockIStream) EXPECT() *MockIStreamMockRecorder {\n\treturn _m.recorder\n}", "func (m *mOutboundMockGetEndpointType) Expect() *mOutboundMockGetEndpointType {\n\tm.mock.GetEndpointTypeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockGetEndpointTypeExpectation{}\n\t}\n\n\treturn m\n}", "func (m *MockAZInfoProvider) EXPECT() *MockAZInfoProviderMockRecorder {\n\treturn m.recorder\n}" ]
[ "0.58157563", "0.5714918", "0.5672776", "0.5639812", "0.56273276", "0.5573085", "0.5567367", "0.5529613", "0.55066866", "0.5486919", "0.54729885", "0.54647803", "0.5460882", "0.54414886", "0.5440682", "0.5405729", "0.54035264", "0.53890616", "0.53831995", "0.53831995", "0.5369224", "0.53682834", "0.5358863", "0.5340405", "0.5338385", "0.5327707", "0.53230935", "0.53132576", "0.5307127", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.53035146", "0.5295391", "0.5295391", "0.5291368", "0.52822006", "0.52821374", "0.52767164", "0.5273333", "0.5273239", "0.5265769", "0.52593946", "0.52572596", "0.5256972", "0.52545565", "0.5249454", "0.52421427", "0.52410823", "0.5238541", "0.52360845", "0.5235068", "0.5227199", "0.5227038", "0.52227145", "0.52144563", "0.5212412", "0.52120364", "0.5211835", "0.5211705", "0.5208191", "0.5194654", "0.5190334", "0.51877177", "0.5187148", "0.5185659", "0.51827794", "0.51817787", "0.5175451", "0.51730126", "0.5169131", "0.5167294", "0.5162394", "0.51599216", "0.51597583", "0.5159494", "0.51442164", "0.51442164", "0.51442164", "0.5143891", "0.51437116", "0.51395434", "0.51341194", "0.5133995", "0.51337904", "0.51337904", "0.51298875", "0.5129523", "0.5128482", "0.5123544", "0.51224196", "0.51162475", "0.51162475", "0.51148367", "0.51146877", "0.51091874" ]
0.0
-1
WatchProemetheus mocks base method
func (m *MockmonitorInterface) WatchProemetheus() (chan []envoyTraffic, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WatchProemetheus") ret0, _ := ret[0].(chan []envoyTraffic) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockAPI) HostMonitoring() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"HostMonitoring\")\n}", "func runTestcase(t *testing.T) {\n\n\tgaugeStatHandler = new(GaugeStats)\n\n\tinitGaugeStats(METRICS_CONFIG_FILE, gaugeStatHandler)\n\n\t// read raw-metrics from mock data gen, create observer and channel prometeus metric ingestion and processing\n\trawMetrics := getRawMetrics()\n\tnsWatcher := new(NamespaceWatcher)\n\tlObserver := &Observer{}\n\tch := make(chan prometheus.Metric, 1000)\n\tpass2Metrics := requestInfoNamespaces(rawMetrics)\n\n\tnsWatcher.passTwoKeys(rawMetrics)\n\n\tnsInfoKeys := createNamespacePassTwoExpectedOutputs(rawMetrics)\n\n\t// outputs := nsWatcher.passTwoKeys(pass2Metrics)\n\t// assert.Equal(t, outputs, expectedOutputs)\n\n\terr := nsWatcher.refresh(lObserver, nsInfoKeys, rawMetrics, ch)\n\n\tif err == nil {\n\t\t// map of string ==> map[\"namespace/metric-name\"][\"<VALUE>\"]\n\t\t// map of string ==> map[\"namespace/metric-name\"][\"<Label>\"]\n\t\t// both used to assert the return values from actual code against calculated values\n\t\tlOutputValues := map[string]string{}\n\t\tlOutputLabels := map[string]string{}\n\n\t\t// reads data from the Prom channel and creates a map of strings so we can assert in the below loop\n\t\tdomore := 1\n\n\t\tfor domore == 1 {\n\t\t\tselect {\n\n\t\t\tcase nsMetric := <-ch:\n\t\t\t\tdescription := nsMetric.Desc().String()\n\t\t\t\tvar protobuffer dto.Metric\n\t\t\t\terr := nsMetric.Write(&protobuffer)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\" unable to get metric \", description, \" data into protobuf \", err)\n\t\t\t\t}\n\n\t\t\t\tmetricValue := \"\"\n\t\t\t\tmetricLabel := fmt.Sprintf(\"%s\", protobuffer.Label)\n\t\t\t\tif protobuffer.Gauge != nil {\n\t\t\t\t\tmetricValue = fmt.Sprintf(\"%.0f\", *protobuffer.Gauge.Value)\n\t\t\t\t} else if protobuffer.Counter != nil {\n\t\t\t\t\tmetricValue = fmt.Sprintf(\"%.0f\", *protobuffer.Counter.Value)\n\t\t\t\t}\n\n\t\t\t\t// Desc{fqName: \"aerospike_namespac_memory_free_pct\", help: \"memory free pct\", constLabels: {}, variableLabels: [cluster_name service ns]}\n\t\t\t\tmetricNameFromDesc := extractMetricNameFromDesc(description)\n\t\t\t\tnamespaceFromLabel := extractNamespaceFromLabel(metricLabel)\n\n\t\t\t\t// key will be like namespace/<metric_name>, this we use this check during assertion\n\t\t\t\tkeyName := makeKeyname(namespaceFromLabel, metricNameFromDesc, true)\n\t\t\t\tlOutputValues[keyName] = metricValue\n\t\t\t\tlOutputLabels[keyName] = metricLabel\n\n\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\tdomore = 0\n\n\t\t\t} // end select\n\t\t}\n\n\t\t// loop each namespace and compare the label and value\n\t\tarrNames := strings.Split(pass2Metrics[\"namespaces\"], \";\")\n\n\t\tfor nsIndex := range arrNames {\n\t\t\ttnsForNamespace := arrNames[nsIndex]\n\t\t\tlExpectedMetricNamedValues, lExpectedMetricLabels := createNamespaceWatcherExpectedOutputs(tnsForNamespace, true)\n\n\t\t\tfor key := range lOutputValues {\n\t\t\t\texpectedValues := lExpectedMetricNamedValues[key]\n\t\t\t\texpectedLabels := lExpectedMetricLabels[key]\n\t\t\t\toutputMetricValues := lOutputValues[key]\n\t\t\t\toutpuMetricLabels := lOutputLabels[key]\n\n\t\t\t\t// assert - only if the value belongs to the namespace we read expected values and processing\n\t\t\t\tif strings.HasPrefix(key, tnsForNamespace) {\n\t\t\t\t\tassert.Contains(t, expectedValues, outputMetricValues)\n\t\t\t\t\tassert.Contains(t, expectedLabels, outpuMetricLabels)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\tfmt.Println(\" Failed Refreshing, error: \", err)\n\t}\n}", "func (m *MockWatcher) Watch(repo, reference string, startHeight, endHeight uint64) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Watch\", repo, reference, startHeight, endHeight)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *CloudWatchLogsServiceMock) CreateNewServiceIfUnHealthy() {\n\n}", "func (m *MockDeviceStore) Watch(arg0 chan<- *device.ListResponse) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Watch\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func MockGetMetricStatistics(t *testing.T, mockMatcher *sdk.MockCloudWatchAPI, metric float64, statistic string, numberOfMetrics int) {\n\tlog.Logger.Warningf(\"Mocking AWS iface: GetMetricStatistics\")\n\tif numberOfMetrics == 0 {\n\t\tnumberOfMetrics = 1\n\t}\n\n\tresult := &cloudwatch.GetMetricStatisticsOutput{}\n\tresult.Label = aws.String(\"fake\")\n\tresult.Datapoints = make([]*cloudwatch.Datapoint, numberOfMetrics)\n\tfor i := 0; i < numberOfMetrics; i++ {\n\t\td := &cloudwatch.Datapoint{}\n\n\t\tswitch statistic {\n\t\tcase cloudwatch.StatisticMaximum:\n\t\t\td.Maximum = aws.Float64(metric)\n\t\tcase cloudwatch.StatisticMinimum:\n\t\t\td.Minimum = aws.Float64(metric)\n\t\tcase cloudwatch.StatisticSum:\n\t\t\td.Sum = aws.Float64(metric)\n\t\tcase cloudwatch.StatisticSampleCount:\n\t\t\td.SampleCount = aws.Float64(metric)\n\t\tcase cloudwatch.StatisticAverage:\n\t\t\td.Average = aws.Float64(metric)\n\t\tdefault:\n\t\t\tt.Fatalf(\"Wrong metric statistic: %s\", statistic)\n\t\t}\n\t\td.Timestamp = aws.Time(time.Now().UTC())\n\t\td.Unit = aws.String(cloudwatch.StandardUnitPercent)\n\t\tresult.Datapoints[i] = d\n\t}\n\n\t// Mock as expected with our result\n\tmockMatcher.EXPECT().GetMetricStatistics(gomock.Any()).Do(func(input interface{}) {\n\t\tgotInput := input.(*cloudwatch.GetMetricStatisticsInput)\n\t\t// Check API received parameters are fine\n\t\tif aws.StringValue(gotInput.Namespace) == \"\" {\n\t\t\tt.Fatalf(\"Expected namespace, got nothing\")\n\t\t}\n\n\t\tif aws.StringValue(gotInput.MetricName) == \"\" {\n\t\t\tt.Fatalf(\"Expected metric name, got nothing\")\n\t\t}\n\n\t\tif aws.StringValue(gotInput.Unit) == \"\" {\n\t\t\tt.Fatalf(\"Expected unit, got nothing\")\n\t\t}\n\n\t\tif len(gotInput.Statistics) != 1 {\n\t\t\tt.Fatalf(\"Wrong statistics name\")\n\t\t}\n\n\t}).AnyTimes().Return(result, nil)\n\n}", "func (m *MockMetricsProvider) ObserveGithubRequestDuration(method, handler, statusCode string, elapsed float64) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"ObserveGithubRequestDuration\", method, handler, statusCode, elapsed)\n}", "func (m *MockServicer) CalculateHashAndDuration(startTime time.Time, fiveSecTimer *time.Timer, password string) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"CalculateHashAndDuration\", startTime, fiveSecTimer, password)\n}", "func (m *MockWatcher) Watch(ctx context.Context) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Watch\", ctx)\n}", "func Test_GloutonStart(t *testing.T) {\n\tconst (\n\t\tresultName = \"my_rule_metric\"\n\t\tsourceMetric = \"cpu_used\"\n\t)\n\n\tstore := store.New(time.Hour, time.Hour)\n\tctx := context.Background()\n\tt0 := time.Now().Truncate(time.Second)\n\truleManager := newManager(ctx, store, defaultLinuxRecordingRules, t0)\n\tresPoints := []types.MetricPoint{}\n\n\tstore.PushPoints(context.Background(), []types.MetricPoint{\n\t\t{\n\t\t\tPoint: types.Point{\n\t\t\t\tTime: t0,\n\t\t\t\tValue: 700,\n\t\t\t},\n\t\t\tLabels: map[string]string{\n\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPoint: types.Point{\n\t\t\t\tTime: t0.Add(2 * time.Minute),\n\t\t\t\tValue: 800,\n\t\t\t},\n\t\t\tLabels: map[string]string{\n\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPoint: types.Point{\n\t\t\t\tTime: t0.Add(3 * time.Minute),\n\t\t\t\tValue: 800,\n\t\t\t},\n\t\t\tLabels: map[string]string{\n\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPoint: types.Point{\n\t\t\t\tTime: t0.Add(4 * time.Minute),\n\t\t\t\tValue: 800,\n\t\t\t},\n\t\t\tLabels: map[string]string{\n\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t},\n\t\t},\n\t})\n\n\tmetricList := []PromQLRule{\n\t\t{\n\t\t\tAlertingRuleID: \"509701d5-3cb0-449b-a858-0290f4dc3cff\",\n\t\t\tName: resultName,\n\t\t\tWarningQuery: fmt.Sprintf(\"%s > 50\", sourceMetric),\n\t\t\tWarningDelay: 5 * time.Minute,\n\t\t\tCriticalQuery: fmt.Sprintf(\"%s > 500\", sourceMetric),\n\t\t\tCriticalDelay: 5 * time.Minute,\n\t\t\tResolution: 10 * time.Second,\n\t\t\tInstanceID: agentID,\n\t\t},\n\t}\n\n\tstore.AddNotifiee(func(mp []types.MetricPoint) {\n\t\tresPoints = append(resPoints, mp...)\n\t})\n\n\terr := ruleManager.RebuildPromQLRules(metricList)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tfor i := 0; i < 6; i++ {\n\t\truleManager.now = func() time.Time { return t0.Add(time.Duration(i) * time.Minute) }\n\n\t\tif err := ruleManager.Collect(ctx, store.Appender(ctx)); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\t// Manager should not create ok points for the next 5 minutes after start,\n\t// as we do not provide a way for prometheus to know previous values before start.\n\t// This test should be changed in the future if we implement a persistent store,\n\t// as critical and warning points would be allowed.\n\tif len(resPoints) != 0 {\n\t\tt.Errorf(\"Unexpected number of points generated: expected 0, got %d:\\n%v\", len(resPoints), resPoints)\n\t}\n\n\truleManager.now = func() time.Time { return t0.Add(time.Duration(7) * time.Minute) }\n\n\terr = ruleManager.Collect(ctx, store.Appender(ctx))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif len(resPoints) == 0 {\n\t\tt.Errorf(\"Unexpected number of points generated: expected >0, got 0:\\n%v\", resPoints)\n\t}\n}", "func (m *MockRefSync) Watch(repo, reference string, startHeight, endHeight uint64) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Watch\", repo, reference, startHeight, endHeight)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockHealthCheck) RegisterStats() {\n\tm.ctrl.Call(m, \"RegisterStats\")\n}", "func (m *MockWatcherConstructor) New(arg0 Machine, arg1 string, arg2 []string, arg3, arg4, arg5 string, arg6 time.Duration, arg7 map[string]interface{}) (interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"New\", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\n\tret0, _ := ret[0].(interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func TestPrometheusMetrics(t *testing.T) {\n\tbytesProcessedMetric.WithLabelValues(\"x\")\n\tcacheHitMetric.WithLabelValues(\"x\")\n\tuploadedBytesMetric.WithLabelValues(\"x\")\n\tqueryTotalMetric.WithLabelValues(\"x\")\n\tqueryProcessedMetric.WithLabelValues(\"x\")\n\tinFlightUploadsHistogram.WithLabelValues(\"x\")\n\tuploadQueueSizeHistogram.WithLabelValues(\"x\")\n\n\tpromtest.LintMetrics(t)\n}", "func Test_NoUnknownOnStart(t *testing.T) {\n\tconst (\n\t\tresultName = \"copy_of_node_cpu_seconds_global\"\n\t\tsourceMetric = \"node_cpu_seconds_global\"\n\t)\n\n\tvar (\n\t\tresPoints []types.MetricPoint\n\t\tl sync.Mutex\n\t)\n\n\tstore := store.New(time.Hour, time.Hour)\n\n\treg, err := registry.New(registry.Option{\n\t\tPushPoint: pushFunction(func(ctx context.Context, points []types.MetricPoint) {\n\t\t\tl.Lock()\n\t\t\tdefer l.Unlock()\n\n\t\t\tresPoints = append(resPoints, points...)\n\t\t}),\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx := context.Background()\n\tt0 := time.Now().Truncate(time.Second)\n\n\t// we always boot the manager with 10 seconds resolution\n\truleManager := newManager(ctx, store, defaultLinuxRecordingRules, t0)\n\n\tpromqlRules := []PromQLRule{\n\t\t{\n\t\t\tAlertingRuleID: \"509701d5-3cb0-449b-a858-0290f4dc3cff\",\n\t\t\tName: resultName,\n\t\t\tWarningQuery: fmt.Sprintf(\"%s > 0\", sourceMetric),\n\t\t\tWarningDelay: 5 * time.Minute,\n\t\t\tCriticalQuery: fmt.Sprintf(\"%s > 100\", sourceMetric),\n\t\t\tCriticalDelay: 5 * time.Minute,\n\t\t\tResolution: 10 * time.Second,\n\t\t\tInstanceID: agentID,\n\t\t},\n\t}\n\n\terr = ruleManager.RebuildPromQLRules(promqlRules)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tid, err := reg.RegisterAppenderCallback(\n\t\tregistry.RegistrationOption{\n\t\t\tNoLabelsAlteration: true,\n\t\t\tDisablePeriodicGather: true,\n\t\t},\n\t\tregistry.AppenderRegistrationOption{},\n\t\truleManager,\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor currentTime := t0; currentTime.Before(t0.Add(9 * time.Minute)); currentTime = currentTime.Add(10 * time.Second) {\n\t\tif currentTime.After(t0.Add(1 * time.Minute)) {\n\t\t\t// Took one full minute before first points.\n\t\t\tstore.PushPoints(context.Background(), []types.MetricPoint{\n\t\t\t\t{\n\t\t\t\t\tPoint: types.Point{\n\t\t\t\t\t\tTime: currentTime,\n\t\t\t\t\t\tValue: 30,\n\t\t\t\t\t},\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\truleManager.now = func() time.Time { return currentTime }\n\n\t\treg.InternalRunScrape(ctx, currentTime, id)\n\t}\n\n\tvar hadResult bool\n\n\tfor _, p := range resPoints {\n\t\tif p.Labels[types.LabelName] != resultName {\n\t\t\tt.Errorf(\"unexpected point with labels: %v\", p.Labels)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Annotations.Status.CurrentStatus == types.StatusWarning {\n\t\t\thadResult = true\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Annotations.Status.CurrentStatus == types.StatusUnknown {\n\t\t\tt.Errorf(\"point status = %v want %v\", p.Annotations.Status.CurrentStatus, types.StatusWarning)\n\t\t}\n\t}\n\n\tif !hadResult {\n\t\tt.Errorf(\"rule never returned any points\")\n\t}\n}", "func TestMonitorPrivate(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\t// Configure as private probe\n\thelper.cfg.Blackbox.ScraperName = \"\"\n\thelper.cfg.Blackbox.ScraperSendUUID = true\n\n\thelper.addMonitorOnAPI(t)\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tidAgentMain, _ := helper.state.BleemeoCredentials()\n\tif idAgentMain == \"\" {\n\t\tt.Fatal(\"idAgentMain == '', want something\")\n\t}\n\n\tinitialMetrics := []metricPayload{\n\t\t// Metric from other probe are NOT present in API, because glouton private probe aren't allow to view them.\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"9149d491-3a6e-4f46-abf9-c1ea9b9f7227\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t}\n\n\tpushedPoints := []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"probe_success\"},\n\t\t\tlabels.Label{Name: types.LabelScraperUUID, Value: idAgentMain},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: newMonitor.URL},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: newMonitor.AgentID},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: newMonitor.AgentID},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"probe_duration\"},\n\t\t\tlabels.Label{Name: types.LabelScraperUUID, Value: idAgentMain},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: newMonitor.URL},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: newMonitor.AgentID},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: newMonitor.AgentID},\n\t\t),\n\t}\n\n\thelper.SetAPIMetrics(initialMetrics...)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\twant := []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_duration\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_duration\",\n\t\t},\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\twant = []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tDeactivatedAt: helper.Now(),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_duration\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tDeactivatedAt: helper.Now(),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_duration\",\n\t\t},\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n}", "func (m *MockFlushTimesManager) Watch() (watch.Watch, error) {\n\tret := m.ctrl.Call(m, \"Watch\")\n\tret0, _ := ret[0].(watch.Watch)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockPromMetrics) IncUpdate() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"IncUpdate\")\n}", "func (m *MockWatcher) Start() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Start\")\n}", "func Test_NoStatusChangeOnStart(t *testing.T) {\n\tconst (\n\t\tresultName = \"copy_of_node_cpu_seconds_global\"\n\t\tsourceMetric = \"node_cpu_seconds_global\"\n\t)\n\n\tfor _, resolutionSecond := range []int{10, 30, 60} {\n\t\tt.Run(fmt.Sprintf(\"resolution=%d\", resolutionSecond), func(t *testing.T) {\n\t\t\tvar (\n\t\t\t\tresPoints []types.MetricPoint\n\t\t\t\tl sync.Mutex\n\t\t\t)\n\n\t\t\tstore := store.New(time.Hour, time.Hour)\n\t\t\treg, err := registry.New(registry.Option{\n\t\t\t\tPushPoint: pushFunction(func(ctx context.Context, points []types.MetricPoint) {\n\t\t\t\t\tl.Lock()\n\t\t\t\t\tdefer l.Unlock()\n\n\t\t\t\t\tresPoints = append(resPoints, points...)\n\t\t\t\t}),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tctx := context.Background()\n\t\t\tt0 := time.Now().Truncate(time.Second)\n\n\t\t\truleManager := newManager(ctx, store, defaultLinuxRecordingRules, t0)\n\n\t\t\tpromqlRules := []PromQLRule{\n\t\t\t\t{\n\t\t\t\t\tAlertingRuleID: \"509701d5-3cb0-449b-a858-0290f4dc3cff\",\n\t\t\t\t\tName: resultName,\n\t\t\t\t\tWarningQuery: fmt.Sprintf(\"%s > 0\", sourceMetric),\n\t\t\t\t\tWarningDelay: 5 * time.Minute,\n\t\t\t\t\tCriticalQuery: fmt.Sprintf(\"%s > 100\", sourceMetric),\n\t\t\t\t\tCriticalDelay: 5 * time.Minute,\n\t\t\t\t\tResolution: time.Duration(resolutionSecond) * time.Second,\n\t\t\t\t\tInstanceID: agentID,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr = ruleManager.RebuildPromQLRules(promqlRules)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\n\t\t\tid, err := reg.RegisterAppenderCallback(\n\t\t\t\tregistry.RegistrationOption{\n\t\t\t\t\tNoLabelsAlteration: true,\n\t\t\t\t\tDisablePeriodicGather: true,\n\t\t\t\t},\n\t\t\t\tregistry.AppenderRegistrationOption{},\n\t\t\t\truleManager,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tfor currentTime := t0; currentTime.Before(t0.Add(7 * time.Minute)); currentTime = currentTime.Add(time.Second * time.Duration(resolutionSecond)) {\n\t\t\t\tif !currentTime.Equal(t0) {\n\t\t\t\t\t// cpu_used need two gather to be calculated, skip first point.\n\t\t\t\t\tstore.PushPoints(context.Background(), []types.MetricPoint{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPoint: types.Point{\n\t\t\t\t\t\t\t\tTime: currentTime,\n\t\t\t\t\t\t\t\tValue: 30,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\t\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tif currentTime.Sub(t0) > 6*time.Minute {\n\t\t\t\t\tlogger.V(0).Printf(\"Number of points: %d\", len(resPoints))\n\t\t\t\t}\n\n\t\t\t\truleManager.now = func() time.Time { return currentTime }\n\t\t\t\treg.InternalRunScrape(ctx, currentTime, id)\n\t\t\t}\n\n\t\t\tvar hadResult bool\n\n\t\t\t// Manager should not create ok points since the metric is always in critical.\n\t\t\t// This test might be changed in the future if we implement a persistent store,\n\t\t\t// as it would allow to known the exact hold state of the Prometheus rule.\n\t\t\tfor _, p := range resPoints {\n\t\t\t\tif p.Labels[types.LabelName] != resultName {\n\t\t\t\t\tt.Errorf(\"unexpected point with labels: %v\", p.Labels)\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif p.Annotations.Status.CurrentStatus == types.StatusWarning {\n\t\t\t\t\thadResult = true\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tt.Errorf(\"point status = %v want %v\", p.Annotations.Status.CurrentStatus, types.StatusWarning)\n\t\t\t}\n\n\t\t\tif !hadResult {\n\t\t\t\tt.Errorf(\"rule never returned any points\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (m *MockMetrics) AddToGauge(arg0 string, arg1 float64, arg2 ...string) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"AddToGauge\", varargs...)\n}", "func TestPollTimer_Ticker(t *testing.T) {\n}", "func (m *MockSession) AnnounceWorkerStopped() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AnnounceWorkerStopped\")\n}", "func (m *MockKeystore) WatchPrefix(prefix string, onUpdate func(*keystoreregistry.KeyValueVersion, *keystoreregistry.KeyValueVersion)) {\n\tm.ctrl.Call(m, \"WatchPrefix\", prefix, onUpdate)\n}", "func (p *netPolPromVals) testPrometheusMetrics(t *testing.T) {\n\tnumPolicies, err := metrics.GetNumPolicies()\n\tpromutil.NotifyIfErrors(t, err)\n\tif numPolicies != p.expectedNumPolicies {\n\t\trequire.FailNowf(t, \"\", \"Number of policies didn't register correctly in Prometheus. Expected %d. Got %d.\", p.expectedNumPolicies, numPolicies)\n\t}\n\n\taddExecCount, err := metrics.GetControllerPolicyExecCount(metrics.CreateOp, false)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, p.expectedAddExecCount, addExecCount, \"Count for add execution time didn't register correctly in Prometheus\")\n\n\taddErrorExecCount, err := metrics.GetControllerPolicyExecCount(metrics.CreateOp, true)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, 0, addErrorExecCount, \"Count for add error execution time should be 0\")\n\n\tupdateExecCount, err := metrics.GetControllerPolicyExecCount(metrics.UpdateOp, false)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, p.expectedUpdateExecCount, updateExecCount, \"Count for update execution time didn't register correctly in Prometheus\")\n\n\tupdateErrorExecCount, err := metrics.GetControllerPolicyExecCount(metrics.UpdateOp, true)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, 0, updateErrorExecCount, \"Count for update error execution time should be 0\")\n\n\tdeleteExecCount, err := metrics.GetControllerPolicyExecCount(metrics.DeleteOp, false)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, p.expectedDeleteExecCount, deleteExecCount, \"Count for delete execution time didn't register correctly in Prometheus\")\n\n\tdeleteErrorExecCount, err := metrics.GetControllerPolicyExecCount(metrics.DeleteOp, true)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, 0, deleteErrorExecCount, \"Count for delete error execution time should be 0\")\n}", "func (m *MockProvider) OnServiceUpdate(arg0, arg1 *v1.Service) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceUpdate\", arg0, arg1)\n}", "func (m *MockPodInterface) Watch(arg0 v10.ListOptions) (watch.Interface, error) {\n\tret := m.ctrl.Call(m, \"Watch\", arg0)\n\tret0, _ := ret[0].(watch.Interface)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockConnectionTracer) UpdatedMetrics(arg0 *utils.RTTStats, arg1, arg2 protocol.ByteCount, arg3 int) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"UpdatedMetrics\", arg0, arg1, arg2, arg3)\n}", "func (m *MockVirtualServiceClient) Watch(arg0 string, arg1 clients.WatchOpts) (<-chan v1.VirtualServiceList, <-chan error, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Watch\", arg0, arg1)\n\tret0, _ := ret[0].(<-chan v1.VirtualServiceList)\n\tret1, _ := ret[1].(<-chan error)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (m *MockHealthCheck) RegisterStats() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RegisterStats\")\n}", "func (m *MockMetrics) MeasureSince(arg0 []string, arg1 time.Time) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"MeasureSince\", arg0, arg1)\n}", "func (m *MockMetrics) CountStart() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"CountStart\")\n}", "func init() {\n\tpromRegistry := prometheus.NewRegistry()\n\tpromRegistry.MustRegister(uptime, reqCount, passCount, blockCount, reqDuration)\n\tgo recordUptime()\n\tpromHandler = promhttp.InstrumentMetricHandler(promRegistry, promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}))\n}", "func (m *MockTopoServer) Watch(arg0 *topo.WatchRequest, arg1 topo.Topo_WatchServer) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Watch\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockCerebroker) Resample() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Resample\")\n}", "func (m *MockHandler) Process(arg0 *models.Event) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Reduce\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockMetricsProvider) IncreaseGithubCacheMisses(method, handler string) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"IncreaseGithubCacheMisses\", method, handler)\n}", "func (m *MockIBlade) BladeWSWatchConn(ctx context.Context) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"BladeWSWatchConn\", ctx)\n}", "func (m *MockCache) Watch(ch chan<- stream.Event, replay bool) (stream.Context, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Watch\", ch, replay)\n\tret0, _ := ret[0].(stream.Context)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClientInterface) Watch(ctx context.Context, pipeline interface{}, opts ...*options.ChangeStreamOptions) (*mongo.ChangeStream, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, pipeline}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Watch\", varargs...)\n\tret0, _ := ret[0].(*mongo.ChangeStream)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockHealthCheck) WaitForInitialStatsUpdates() {\n\tm.ctrl.Call(m, \"WaitForInitialStatsUpdates\")\n}", "func (m *MockTopoClient) Watch(ctx context.Context, in *topo.WatchRequest, opts ...grpc.CallOption) (topo.Topo_WatchClient, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Watch\", varargs...)\n\tret0, _ := ret[0].(topo.Topo_WatchClient)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockChoriaProvider) PrometheusTextFileDir() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"PrometheusTextFileDir\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestWatcher(t *testing.T) {\n\twatcher := ImokWatch(500*time.Millisecond, 5)\n\twatcher.Watch(make(chan bool))\n}", "func (m *MockServiceBoard) WebSocketNotification(arg0 models.UserInput, arg1 echo.Context) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WebSocketNotification\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockMetricsProvider) IncreaseWebhookRequest(name string) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"IncreaseWebhookRequest\", name)\n}", "func BaseTest(t *testing.T,\n\tgetCollector getCollectorFn,\n\tcheckResult func(t *testing.T, samples []stats.SampleContainer, expectedOutput, output string),\n) {\n\tt.Helper()\n\ttestNamespace := \"testing.things.\" // to be dynamic\n\n\taddr, err := net.ResolveUDPAddr(\"udp\", \"localhost:0\")\n\trequire.NoError(t, err)\n\tlistener, err := net.ListenUDP(\"udp\", addr) // we want to listen on a random port\n\trequire.NoError(t, err)\n\tch := make(chan string, 20)\n\tend := make(chan struct{})\n\tdefer close(end)\n\n\tgo func() {\n\t\tdefer close(ch)\n\t\tvar buf [4096]byte\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-end:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tn, _, err := listener.ReadFromUDP(buf[:])\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tch <- string(buf[:n])\n\t\t\t}\n\t\t}\n\t}()\n\n\tpushInterval := types.NullDurationFrom(time.Millisecond * 10)\n\tcollector, err := getCollector(\n\t\ttestutils.NewLogger(t),\n\t\tnull.StringFrom(listener.LocalAddr().String()),\n\t\tnull.StringFrom(testNamespace),\n\t\tnull.IntFrom(5),\n\t\tpushInterval,\n\t)\n\trequire.NoError(t, err)\n\trequire.NoError(t, collector.Init())\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tgo collector.Run(ctx)\n\tnewSample := func(m *stats.Metric, value float64, tags map[string]string) stats.Sample {\n\t\treturn stats.Sample{\n\t\t\tTime: time.Now(),\n\t\t\tMetric: m, Value: value, Tags: stats.IntoSampleTags(&tags),\n\t\t}\n\t}\n\n\tmyCounter := stats.New(\"my_counter\", stats.Counter)\n\tmyGauge := stats.New(\"my_gauge\", stats.Gauge)\n\tmyTrend := stats.New(\"my_trend\", stats.Trend)\n\tmyRate := stats.New(\"my_rate\", stats.Rate)\n\tmyCheck := stats.New(\"my_check\", stats.Rate)\n\ttestMatrix := []struct {\n\t\tinput []stats.SampleContainer\n\t\toutput string\n\t}{\n\t\t{\n\t\t\tinput: []stats.SampleContainer{\n\t\t\t\tnewSample(myCounter, 12, map[string]string{\n\t\t\t\t\t\"tag1\": \"value1\",\n\t\t\t\t\t\"tag3\": \"value3\",\n\t\t\t\t}),\n\t\t\t},\n\t\t\toutput: \"testing.things.my_counter:12|c\",\n\t\t},\n\t\t{\n\t\t\tinput: []stats.SampleContainer{\n\t\t\t\tnewSample(myGauge, 13, map[string]string{\n\t\t\t\t\t\"tag1\": \"value1\",\n\t\t\t\t\t\"tag3\": \"value3\",\n\t\t\t\t}),\n\t\t\t},\n\t\t\toutput: \"testing.things.my_gauge:13.000000|g\",\n\t\t},\n\t\t{\n\t\t\tinput: []stats.SampleContainer{\n\t\t\t\tnewSample(myTrend, 14, map[string]string{\n\t\t\t\t\t\"tag1\": \"value1\",\n\t\t\t\t\t\"tag3\": \"value3\",\n\t\t\t\t}),\n\t\t\t},\n\t\t\toutput: \"testing.things.my_trend:14.000000|ms\",\n\t\t},\n\t\t{\n\t\t\tinput: []stats.SampleContainer{\n\t\t\t\tnewSample(myRate, 15, map[string]string{\n\t\t\t\t\t\"tag1\": \"value1\",\n\t\t\t\t\t\"tag3\": \"value3\",\n\t\t\t\t}),\n\t\t\t},\n\t\t\toutput: \"testing.things.my_rate:15|c\",\n\t\t},\n\t\t{\n\t\t\tinput: []stats.SampleContainer{\n\t\t\t\tnewSample(myCheck, 16, map[string]string{\n\t\t\t\t\t\"tag1\": \"value1\",\n\t\t\t\t\t\"tag3\": \"value3\",\n\t\t\t\t\t\"check\": \"max<100\",\n\t\t\t\t}),\n\t\t\t\tnewSample(myCheck, 0, map[string]string{\n\t\t\t\t\t\"tag1\": \"value1\",\n\t\t\t\t\t\"tag3\": \"value3\",\n\t\t\t\t\t\"check\": \"max>100\",\n\t\t\t\t}),\n\t\t\t},\n\t\t\toutput: \"testing.things.check.max<100.pass:1|c\\ntesting.things.check.max>100.fail:1|c\",\n\t\t},\n\t}\n\tfor _, test := range testMatrix {\n\t\tcollector.Collect(test.input)\n\t\ttime.Sleep((time.Duration)(pushInterval.Duration))\n\t\toutput := <-ch\n\t\tcheckResult(t, test.input, test.output, output)\n\t}\n}", "func setupMetrics() Metrics {\n\tm := Metrics{}\n\tm.LastBackupDuration = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"clickhouse_backup\",\n\t\tName: \"last_backup_duration\",\n\t\tHelp: \"Backup duration in nanoseconds.\",\n\t})\n\tm.LastBackupSuccess = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"clickhouse_backup\",\n\t\tName: \"last_backup_success\",\n\t\tHelp: \"Last backup success boolean: 0=failed, 1=success, 2=unknown.\",\n\t})\n\tm.LastBackupStart = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"clickhouse_backup\",\n\t\tName: \"last_backup_start\",\n\t\tHelp: \"Last backup start timestamp.\",\n\t})\n\tm.LastBackupEnd = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"clickhouse_backup\",\n\t\tName: \"last_backup_end\",\n\t\tHelp: \"Last backup end timestamp.\",\n\t})\n\tm.SuccessfulBackups = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: \"clickhouse_backup\",\n\t\tName: \"successful_backups\",\n\t\tHelp: \"Number of Successful Backups.\",\n\t})\n\tm.FailedBackups = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: \"clickhouse_backup\",\n\t\tName: \"failed_backups\",\n\t\tHelp: \"Number of Failed Backups.\",\n\t})\n\tprometheus.MustRegister(\n\t\tm.LastBackupDuration,\n\t\tm.LastBackupStart,\n\t\tm.LastBackupEnd,\n\t\tm.LastBackupSuccess,\n\t\tm.SuccessfulBackups,\n\t\tm.FailedBackups,\n\t)\n\tm.LastBackupSuccess.Set(2) // 0=failed, 1=success, 2=unknown\n\treturn m\n}", "func (m *MockJobClient) Watch(arg0 string, arg1 v10.ListOptions) (watch.Interface, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Watch\", arg0, arg1)\n\tret0, _ := ret[0].(watch.Interface)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestPrometheusScaler(t *testing.T) {\n\t// Create kubernetes resources\n\tkc := GetKubernetesClient(t)\n\tprometheus.Install(t, kc, prometheusServerName, testNamespace)\n\n\t// Create kubernetes resources for testing\n\tdata, templates := getTemplateData()\n\tKubectlApplyMultipleWithTemplate(t, data, templates)\n\tassert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredAppName, testNamespace, 1, 60, 3),\n\t\t\"replica count should be %d after 3 minutes\", minReplicaCount)\n\tassert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3),\n\t\t\"replica count should be %d after 3 minutes\", minReplicaCount)\n\n\ttestActivation(t, kc, data)\n\ttestScaleOut(t, kc, data)\n\ttestScaleIn(t, kc)\n\n\t// cleanup\n\tKubectlDeleteMultipleWithTemplate(t, data, templates)\n\tprometheus.Uninstall(t, prometheusServerName, testNamespace)\n}", "func StartMockups() {\n\tenabledMocks = true\n}", "func mockAlwaysRun() bool { return true }", "func NewMockDefault() *Mock {\n\tmgr := new(Mock)\n\tvar pluginsMap = make(map[string]managerContracts.Plugin)\n\tvar cwPlugin = managerContracts.Plugin{\n\t\tHandler: cloudwatch.NewMockDefault(),\n\t}\n\tpluginsMap[CloudWatchId] = cwPlugin\n\n\tmgr.On(\"GetRegisteredPlugins\").Return(pluginsMap)\n\tmgr.On(\"Name\").Return(CloudWatchId)\n\tmgr.On(\"Execute\", mock.AnythingOfType(\"context.T\")).Return(nil)\n\tmgr.On(\"RequestStop\", mock.AnythingOfType(\"string\")).Return(nil)\n\tmgr.On(\"StopPlugin\", mock.AnythingOfType(\"string\"), mock.Anything).Return(nil)\n\tmgr.On(\"StartPlugin\", mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"task.CancelFlag\")).Return(nil)\n\treturn mgr\n}", "func (m *MockMetrics) ObserveUpdateDurationByType(kind string, duration time.Duration) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"ObserveUpdateDurationByType\", kind, duration)\n}", "func TestMetricSimpleSync(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\thelper.preregisterAgent(t)\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// list metrics and register agent_status\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 2)\n\n\tidAgentMain, _ := helper.state.BleemeoCredentials()\n\n\tmetrics := helper.MetricsFromAPI()\n\twant := []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"1\",\n\t\t\t\tAgentID: helper.s.agentID,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t}\n\n\tif diff := cmp.Diff(want, metrics); diff != \"\" {\n\t\tt.Errorf(\"metrics mismatch (-want +got):\\n%s\", diff)\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"cpu_system\"},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: idAgentMain},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We do 2 request: list metrics, list inactive metrics\n\t// and register new metric\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 3)\n\n\tmetrics = helper.MetricsFromAPI()\n\twant = []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"1\",\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"2\",\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: \"cpu_system\",\n\t\t},\n\t}\n\n\tif diff := cmp.Diff(want, metrics); diff != \"\" {\n\t\tt.Errorf(\"metrics mismatch (-want +got):\\n%s\", diff)\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\n\t// Register 1000 metrics\n\tfor n := 0; n < 1000; n++ {\n\t\thelper.pushPoints(t, []labels.Labels{\n\t\t\tlabels.New(\n\t\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric\"},\n\t\t\t\tlabels.Label{Name: types.LabelItem, Value: strconv.FormatInt(int64(n), 10)},\n\t\t\t\tlabels.Label{Name: types.LabelMetaBleemeoItem, Value: strconv.FormatInt(int64(n), 10)},\n\t\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: idAgentMain},\n\t\t\t),\n\t\t})\n\t}\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We do 1003 request: 3 for listing and 1000 registration\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 1003)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 1002 {\n\t\tt.Errorf(\"len(metrics) = %v, want %v\", len(metrics), 1002)\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\thelper.store.DropAllMetrics()\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We do 1001 request: 1001 to mark inactive all metrics but agent_status\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 1001)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 1002 {\n\t\tt.Errorf(\"len(metrics) = %v, want %v\", len(metrics), 1002)\n\t}\n\n\tfor _, m := range metrics {\n\t\tif m.DeactivatedAt.IsZero() && m.Name != agentStatusName {\n\t\t\tt.Errorf(\"%v should be deactivated\", m)\n\n\t\t\tbreak\n\t\t} else if !m.DeactivatedAt.IsZero() && m.Name == agentStatusName {\n\t\t\tt.Errorf(\"%v should not be deactivated\", m)\n\t\t}\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\n\t// re-activate one metric + register one\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"cpu_system\"},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: idAgentMain},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"disk_used\"},\n\t\t\tlabels.Label{Name: types.LabelItem, Value: \"/home\"},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoItem, Value: \"/home\"},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: idAgentMain},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We do 3 request: 1 to re-enable metric,\n\t// 1 search for metric before registration, 1 to register metric\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 3)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 1003 {\n\t\tt.Errorf(\"len(metrics) = %v, want %v\", len(metrics), 1002)\n\t}\n\n\tfor _, m := range metrics {\n\t\tif m.Name == agentStatusName || m.Name == \"cpu_system\" || m.Name == \"disk_used\" {\n\t\t\tif !m.DeactivatedAt.IsZero() {\n\t\t\t\tt.Errorf(\"%v should be active\", m)\n\t\t\t}\n\t\t} else if m.DeactivatedAt.IsZero() {\n\t\t\tt.Errorf(\"%v should be deactivated\", m)\n\n\t\t\tbreak\n\t\t}\n\n\t\tif m.Name == \"disk_used\" {\n\t\t\tif m.Item != \"/home\" {\n\t\t\t\tt.Errorf(\"%v miss item=/home\", m)\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockWatcher) Run(arg0 context.Context, arg1 *sync.WaitGroup) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Run\", arg0, arg1)\n}", "func TestKubernetesMetrics(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\t// Glouton is configured as Kubernetes cluster\n\thelper.facts.SetFact(facts.FactKubernetesCluster, testK8SClusterName)\n\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tidAgentMain, _ := helper.state.BleemeoCredentials()\n\tif idAgentMain == \"\" {\n\t\tt.Fatal(\"idAgentMain == '', want something\")\n\t}\n\n\thelper.assertFactsInAPI(t, []bleemeoTypes.AgentFact{\n\t\t{\n\t\t\tID: idAny,\n\t\t\tAgentID: idAgentMain,\n\t\t\tKey: \"fqdn\",\n\t\t\tValue: testAgentFQDN,\n\t\t},\n\t\t{\n\t\t\tID: idAny,\n\t\t\tAgentID: idAgentMain,\n\t\t\tKey: facts.FactKubernetesCluster,\n\t\t\tValue: testK8SClusterName,\n\t\t},\n\t})\n\n\t// API does a lead-election and notify us that we are leader\n\thelper.AddTime(time.Second)\n\n\tfor _, agent := range helper.AgentsFromAPI() {\n\t\tif agent.ID == idAgentMain {\n\t\t\tagent.IsClusterLeader = true\n\t\t\thelper.api.resources[mockAPIResourceAgent].AddStore(agent)\n\n\t\t\t// Currently on leader status change, API sent \"config-changed\" message\n\t\t\thelper.s.NotifyConfigUpdate(true)\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// API create a global Kubernetes agent\n\thelper.api.resources[mockAPIResourceAgent].AddStore(testK8SAgent)\n\n\thelper.AddTime(10 * time.Second)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodAgent); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif !helper.cache.Agent().IsClusterLeader {\n\t\tt.Fatal(\"agent isn't ClusterLeader\")\n\t}\n\n\tpushedPoints := []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"kubernetes_kubelet_status\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"cpu_used\"},\n\t\t),\n\t\t// Note: we have both meta-label & normal label because we need to simulare Registry.applyRelabel()\n\t\t// (we need both annotation & getDefaultRelabelConfig())\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"kubernetes_cpu_limits\"},\n\t\t\tlabels.Label{Name: types.LabelOwnerKind, Value: \"daemonset\"},\n\t\t\tlabels.Label{Name: types.LabelOwnerName, Value: \"glouton\"},\n\t\t\tlabels.Label{Name: types.LabelNamespace, Value: \"default\"},\n\t\t\tlabels.Label{Name: types.LabelMetaKubernetesCluster, Value: testK8SClusterName},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgent, Value: testK8SClusterName},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: testK8SAgent.ID},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: testK8SClusterName},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: testK8SAgent.ID},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"kubernetes_cpu_requests\"},\n\t\t\tlabels.Label{Name: types.LabelOwnerKind, Value: \"daemonset\"},\n\t\t\tlabels.Label{Name: types.LabelOwnerName, Value: \"glouton\"},\n\t\t\tlabels.Label{Name: types.LabelNamespace, Value: \"default\"},\n\t\t\tlabels.Label{Name: types.LabelMetaKubernetesCluster, Value: testK8SClusterName},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgent, Value: testK8SClusterName},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: testK8SAgent.ID},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: testK8SClusterName},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: testK8SAgent.ID},\n\t\t),\n\t}\n\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\twant := []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: \"cpu_used\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: \"kubernetes_kubelet_status\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: testK8SAgent.ID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"kubernetes_cpu_limits\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",namespace=\\\"default\\\",owner_kind=\\\"daemonset\\\",owner_name=\\\"glouton\\\"\",\n\t\t\t\t\ttestK8SClusterName,\n\t\t\t\t\ttestK8SAgent.ID,\n\t\t\t\t),\n\t\t\t},\n\t\t\tName: \"kubernetes_cpu_limits\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: testK8SAgent.ID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"kubernetes_cpu_requests\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",namespace=\\\"default\\\",owner_kind=\\\"daemonset\\\",owner_name=\\\"glouton\\\"\",\n\t\t\t\t\ttestK8SClusterName,\n\t\t\t\t\ttestK8SAgent.ID,\n\t\t\t\t),\n\t\t\t},\n\t\t\tName: \"kubernetes_cpu_requests\",\n\t\t},\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\n\t// One metrics become inactive (kubernetes_cpu_requests)\n\tpushedPoints = pushedPoints[:len(pushedPoints)-1]\n\twant[len(want)-1].DeactivatedAt = helper.Now()\n\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\t// I'm no longer leader\n\thelper.AddTime(time.Second)\n\n\tfor _, agent := range helper.AgentsFromAPI() {\n\t\tif agent.ID == idAgentMain {\n\t\t\tagent.IsClusterLeader = false\n\t\t\thelper.api.resources[mockAPIResourceAgent].AddStore(agent)\n\n\t\t\t// Currently on leader status change, API sent \"config-changed\" message\n\t\t\thelper.s.NotifyConfigUpdate(true)\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Therefor Glouton stop emitting all kubernetes global metric, but it won't change which metrics is active or not\n\tpushedPoints = []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"kubernetes_kubelet_status\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"cpu_used\"},\n\t\t),\n\t}\n\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n}", "func (m *MockKeystore) WatchKey(ctxt context.Context, key string, onUpdate func(*keystoreregistry.KeyValueVersion, *keystoreregistry.KeyValueVersion)) {\n\tm.ctrl.Call(m, \"WatchKey\", ctxt, key, onUpdate)\n}", "func (m *MockHooks) OnUpdate(existing, new proto.Message) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnUpdate\", existing, new)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Setup(k8sClient *k8s.Client, prometheusURL *string, watchInterval time.Duration) *Client {\n\tclient := &Client{\n\t\tmetrics: setupMetrics(),\n\t\tservices: []k8s.FunctionStatus{},\n\t\twatchInterval: watchInterval,\n\t\tk8sClient: k8sClient,\n\t}\n\n\tif prometheusURL != nil {\n\t\tclient.promrc = resty.New().\n\t\t\tSetHostURL(*prometheusURL + \"/api/v1\").\n\t\t\tSetLogger(ioutil.Discard).\n\t\t\tSetRetryCount(3).\n\t\t\tSetTimeout(10 * time.Second)\n\t}\n\n\tprometheus.MustRegister(client)\n\treturn client\n}", "func (_m *PrometheusAlertClient) ReloadPrometheus() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockPromMetrics) IncCreate() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"IncCreate\")\n}", "func (m *MockStreamFlowController) MaybeQueueWindowUpdate() {\n\tm.ctrl.Call(m, \"MaybeQueueWindowUpdate\")\n}", "func (m *MockProvider) OnServiceSynced() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceSynced\")\n}", "func (m *MockWatcher) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockArgusdClient) CreateWatch(arg0 context.Context, arg1 *golang.ArgusdConfig, arg2 ...grpc.CallOption) (*golang.ArgusdHandle, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"CreateWatch\", varargs...)\n\tret0, _ := ret[0].(*golang.ArgusdHandle)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockmonitorInterface) UpdatePloy(arg0 string, arg1 int) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"UpdatePloy\", arg0, arg1)\n}", "func (m *MockClient) Gauge(arg0 string, arg1 float64, arg2 []string, arg3 float64) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Gauge\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockmonitorInterface) NoticeProxyService(name, endpoint string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NoticeProxyService\", name, endpoint)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockMetricsProvider) IncreaseGithubCacheHits(method, handler string) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"IncreaseGithubCacheHits\", method, handler)\n}", "func (m *MockMetrics) MultiCreateSuccessResponseCounter() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"MultiCreateSuccessResponseCounter\")\n}", "func (m *MockMetricsProvider) ObserveHTTPRequestDuration(method, handler, statusCode string, elapsed float64) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"ObserveHTTPRequestDuration\", method, handler, statusCode, elapsed)\n}", "func MockMetrics() []optic.Metric {\n\tmetricsEvents := make([]optic.Metric, 0)\n\tmetricsEvents = append(metricsEvents, TestMetric(1.0))\n\treturn metricsEvents\n}", "func (m *MockMetricsProvider) ObserveCronTaskDuration(name string, elapsed float64) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"ObserveCronTaskDuration\", name, elapsed)\n}", "func (m *MockMetrics) UpdateSuccessResponseCounter() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"UpdateSuccessResponseCounter\")\n}", "func (m *MockHandler) AddMetricsEvent(ctx context.Context, clusterID strfmt.UUID, hostID *strfmt.UUID, severity, msg string, eventTime time.Time, props ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, clusterID, hostID, severity, msg, eventTime}\n\tfor _, a := range props {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"AddMetricsEvent\", varargs...)\n}", "func TestMetricTooMany(t *testing.T) { //nolint:maintidx\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\thelper.preregisterAgent(t)\n\thelper.initSynchronizer(t)\n\n\tmetricResource, _ := helper.api.resources[mockAPIResourceMetric].(*genericResource)\n\tdefaultPatchHook := metricResource.PatchHook\n\n\t// API always reject more than 3 active metrics\n\tmetricResource.PatchHook = func(r *http.Request, body []byte, valuePtr interface{}, oldValue interface{}) error {\n\t\tif defaultPatchHook != nil {\n\t\t\terr := defaultPatchHook(r, body, valuePtr, oldValue)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tmetric, _ := valuePtr.(*metricPayload)\n\n\t\tif metric.DeactivatedAt.IsZero() {\n\t\t\tmetrics := helper.MetricsFromAPI()\n\t\t\tcountActive := 0\n\n\t\t\tfor _, m := range metrics {\n\t\t\t\tif m.DeactivatedAt.IsZero() && m.ID != metric.ID {\n\t\t\t\t\tcountActive++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif countActive >= 3 {\n\t\t\t\treturn clientError{\n\t\t\t\t\tbody: `{\"label\":[\"Too many non standard metrics\"]}`,\n\t\t\t\t\tstatusCode: http.StatusBadRequest,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tmetricResource.CreateHook = func(r *http.Request, body []byte, valuePtr interface{}) error {\n\t\treturn metricResource.PatchHook(r, body, valuePtr, nil)\n\t}\n\n\thelper.AddTime(time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// list active metrics, register agent_status + 2x metrics to register\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 6)\n\n\tmetrics := helper.MetricsFromAPI()\n\tif len(metrics) != 3 { // 2 + agent_status\n\t\tt.Errorf(\"len(metrics) = %d, want 3\", len(metrics))\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric4\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif helper.api.ClientErrorCount == 0 {\n\t\tt.Errorf(\"We should have some client error, had %d\", helper.api.ClientErrorCount)\n\t}\n\n\t// list active metrics + 2x metrics to register\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 5)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 3 { // 2 + agent_status\n\t\tt.Errorf(\"len(metrics) = %d, want 3\", len(metrics))\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodNotRun(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.SetTimeToNextFullSync()\n\t// drop all because normally store drop inactive metrics and\n\t// metric1 don't emitted for 70 minutes\n\thelper.store.DropAllMetrics()\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric4\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We need two sync: one to deactivate the metric, one to regsiter another one\n\thelper.AddTime(15 * time.Second)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 { // metric1 is now disabled, another get registered\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif !m.DeactivatedAt.IsZero() && m.Name != \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is deactivated, want active\", m.Name)\n\t\t}\n\n\t\tif m.DeactivatedAt.IsZero() && m.Name == \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is active, want deactivated\", m.Name)\n\t\t}\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif helper.api.ClientErrorCount == 0 {\n\t\tt.Errorf(\"We should have some client error, had %d\", helper.api.ClientErrorCount)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 { // metric1 is still disabled and no newly registered\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif !m.DeactivatedAt.IsZero() && m.Name != \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is deactivated, want active\", m.Name)\n\t\t}\n\n\t\tif m.DeactivatedAt.IsZero() && m.Name == \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is active, want deactivated\", m.Name)\n\t\t}\n\t}\n\n\t// We do not retry to register them\n\thelper.AddTime(5 * time.Minute)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodNotRun(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Excepted ONE per full-sync\n\thelper.SetTimeToNextFullSync()\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric4\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif helper.api.ClientErrorCount != 1 {\n\t\tt.Errorf(\"had %d client error, want 1\", helper.api.ClientErrorCount)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 {\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif !m.DeactivatedAt.IsZero() && m.Name != \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is deactivated, want active\", m.Name)\n\t\t}\n\n\t\tif m.DeactivatedAt.IsZero() && m.Name == \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is active, want deactivated\", m.Name)\n\t\t}\n\t}\n\n\t// list active metrics + check existence of the metric we want to reg +\n\t// retry to register\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 3)\n}", "func (m *MockNotifier) Notify(arg0 context.Context, arg1 uint64, arg2 tasks.Event) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Notify\", arg0, arg1, arg2)\n}", "func (m *MockServicer) CalculateStats() *service.Stats {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CalculateStats\")\n\tret0, _ := ret[0].(*service.Stats)\n\treturn ret0\n}", "func (m *MockInternalScheduler) Start() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Start\")\n}", "func (s *statsReporterMock) Shutdown() {\n}", "func (s) TestWatchCallAnotherWatch(t *testing.T) {\n\tapiClientCh, cleanup := overrideNewAPIClient()\n\tdefer cleanup()\n\n\tclient, err := New(clientOpts(testXDSServer, false))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tdefer client.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := apiClientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"timeout when waiting for API client to be created: %v\", err)\n\t}\n\tapiClient := c.(*testAPIClient)\n\n\tclusterUpdateCh := testutils.NewChannel()\n\tfirstTime := true\n\tclient.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {\n\t\tclusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})\n\t\t// Calls another watch inline, to ensure there's deadlock.\n\t\tclient.WatchCluster(\"another-random-name\", func(ClusterUpdate, error) {})\n\n\t\tif _, err := apiClient.addWatches[ClusterResource].Receive(ctx); firstTime && err != nil {\n\t\t\tt.Fatalf(\"want new watch to start, got error %v\", err)\n\t\t}\n\t\tfirstTime = false\n\t})\n\tif _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {\n\t\tt.Fatalf(\"want new watch to start, got error %v\", err)\n\t}\n\n\twantUpdate := ClusterUpdate{ServiceName: testEDSName}\n\tclient.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate})\n\tif err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twantUpdate2 := ClusterUpdate{ServiceName: testEDSName + \"2\"}\n\tclient.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2})\n\tif err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func registerMock(name string, priority CollectorPriority) *MockCollector {\n\tc := &MockCollector{}\n\tfactory := func() Collector { return c }\n\tregisterCollector(name, factory, priority)\n\treturn c\n}", "func (m *MockProvider) OnEndpointsUpdate(arg0, arg1 *v1.Endpoints) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnEndpointsUpdate\", arg0, arg1)\n}", "func (m *MockProvider) OnServiceAdd(arg0 *v1.Service) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnServiceAdd\", arg0)\n}", "func newMockSubscriber() mockSubscriber {\n\treturn mockSubscriber{}\n}", "func (m *MockAnonymous) PublishWebSocketEvent(arg0 string, arg1 map[string]interface{}, arg2 *model.WebsocketBroadcast) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"PublishWebSocketEvent\", arg0, arg1, arg2)\n}", "func setupMetrics() *Metrics {\n\t// Requests duration\n\tduration := prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\tName: \"http_request_duration\",\n\t\tHelp: \"Duration of the http requests processed.\",\n\t},\n\t\t[]string{\"status\", \"method\", \"path\"},\n\t)\n\tprometheus.MustRegister(duration)\n\n\treturn &Metrics{\n\t\tduration: duration,\n\t}\n}", "func (m *MockSession) Healthy() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Healthy\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockSender) AddMetricsEvent(ctx context.Context, clusterID strfmt.UUID, hostID *strfmt.UUID, severity, msg string, eventTime time.Time, props ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, clusterID, hostID, severity, msg, eventTime}\n\tfor _, a := range props {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"AddMetricsEvent\", varargs...)\n}", "func (m *MockCustomerService) Health(arg0 context.Context) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Health\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockProc) OnSvcConfigUpdate(arg0 *service.Config) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"OnSvcConfigUpdate\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockConnectionTracer) UpdatedPTOCount(arg0 uint32) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"UpdatedPTOCount\", arg0)\n}", "func NewMockHealthReporter() *MockHealthReporter {\n\treturn &MockHealthReporter{\n\t\tnotify: make(chan Update),\n\t}\n}", "func (n *mockAgent) memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error {\n\treturn nil\n}", "func (m *MockTopo_WatchServer) Send(arg0 *topo.WatchResponse) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Send\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRPCServer) registerServices() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"registerServices\")\n}", "func (m *MockArgusdClient) DestroyWatch(arg0 context.Context, arg1 *golang.ArgusdConfig, arg2 ...grpc.CallOption) (*golang.Empty, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DestroyWatch\", varargs...)\n\tret0, _ := ret[0].(*golang.Empty)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockEventManager) Start(arg0 <-chan struct{}) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Start\", arg0)\n}" ]
[ "0.6021289", "0.5938068", "0.5859339", "0.58569264", "0.58546066", "0.5847917", "0.5827378", "0.5765352", "0.574002", "0.5726965", "0.5704174", "0.5674335", "0.56560504", "0.5651777", "0.56483954", "0.5647268", "0.5639421", "0.5606013", "0.5597646", "0.5596387", "0.5585159", "0.55841786", "0.5570474", "0.55576605", "0.55568254", "0.5553063", "0.55346715", "0.5532795", "0.55295295", "0.5525736", "0.5520755", "0.5516658", "0.54965526", "0.5475502", "0.54651713", "0.5451811", "0.54515254", "0.54421616", "0.54284936", "0.54260576", "0.5420781", "0.5418398", "0.54112303", "0.5409291", "0.5404438", "0.54037786", "0.53934693", "0.5385292", "0.53752756", "0.5356074", "0.53320235", "0.53288627", "0.5324715", "0.53065073", "0.53036606", "0.530198", "0.52866185", "0.5286591", "0.5284068", "0.52760655", "0.5267538", "0.52524614", "0.52353543", "0.5230333", "0.52186835", "0.5216447", "0.52155817", "0.52134454", "0.520373", "0.52020586", "0.51967543", "0.5189361", "0.51878166", "0.5186832", "0.5186388", "0.51841146", "0.518033", "0.5178361", "0.5164424", "0.5160793", "0.51601774", "0.51598966", "0.51595026", "0.5152378", "0.5144525", "0.5143259", "0.5142505", "0.5134608", "0.51247203", "0.51246", "0.5121979", "0.51207227", "0.5116876", "0.51160944", "0.5114833", "0.5109809", "0.5107099", "0.5107015", "0.5105943", "0.5101069" ]
0.7292405
0
WatchProemetheus indicates an expected call of WatchProemetheus
func (mr *MockmonitorInterfaceMockRecorder) WatchProemetheus() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchProemetheus", reflect.TypeOf((*MockmonitorInterface)(nil).WatchProemetheus)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockmonitorInterface) WatchProemetheus() (chan []envoyTraffic, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WatchProemetheus\")\n\tret0, _ := ret[0].(chan []envoyTraffic)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Test_NoStatusChangeOnStart(t *testing.T) {\n\tconst (\n\t\tresultName = \"copy_of_node_cpu_seconds_global\"\n\t\tsourceMetric = \"node_cpu_seconds_global\"\n\t)\n\n\tfor _, resolutionSecond := range []int{10, 30, 60} {\n\t\tt.Run(fmt.Sprintf(\"resolution=%d\", resolutionSecond), func(t *testing.T) {\n\t\t\tvar (\n\t\t\t\tresPoints []types.MetricPoint\n\t\t\t\tl sync.Mutex\n\t\t\t)\n\n\t\t\tstore := store.New(time.Hour, time.Hour)\n\t\t\treg, err := registry.New(registry.Option{\n\t\t\t\tPushPoint: pushFunction(func(ctx context.Context, points []types.MetricPoint) {\n\t\t\t\t\tl.Lock()\n\t\t\t\t\tdefer l.Unlock()\n\n\t\t\t\t\tresPoints = append(resPoints, points...)\n\t\t\t\t}),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tctx := context.Background()\n\t\t\tt0 := time.Now().Truncate(time.Second)\n\n\t\t\truleManager := newManager(ctx, store, defaultLinuxRecordingRules, t0)\n\n\t\t\tpromqlRules := []PromQLRule{\n\t\t\t\t{\n\t\t\t\t\tAlertingRuleID: \"509701d5-3cb0-449b-a858-0290f4dc3cff\",\n\t\t\t\t\tName: resultName,\n\t\t\t\t\tWarningQuery: fmt.Sprintf(\"%s > 0\", sourceMetric),\n\t\t\t\t\tWarningDelay: 5 * time.Minute,\n\t\t\t\t\tCriticalQuery: fmt.Sprintf(\"%s > 100\", sourceMetric),\n\t\t\t\t\tCriticalDelay: 5 * time.Minute,\n\t\t\t\t\tResolution: time.Duration(resolutionSecond) * time.Second,\n\t\t\t\t\tInstanceID: agentID,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr = ruleManager.RebuildPromQLRules(promqlRules)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\n\t\t\tid, err := reg.RegisterAppenderCallback(\n\t\t\t\tregistry.RegistrationOption{\n\t\t\t\t\tNoLabelsAlteration: true,\n\t\t\t\t\tDisablePeriodicGather: true,\n\t\t\t\t},\n\t\t\t\tregistry.AppenderRegistrationOption{},\n\t\t\t\truleManager,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tfor currentTime := t0; currentTime.Before(t0.Add(7 * time.Minute)); currentTime = currentTime.Add(time.Second * time.Duration(resolutionSecond)) {\n\t\t\t\tif !currentTime.Equal(t0) {\n\t\t\t\t\t// cpu_used need two gather to be calculated, skip first point.\n\t\t\t\t\tstore.PushPoints(context.Background(), []types.MetricPoint{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPoint: types.Point{\n\t\t\t\t\t\t\t\tTime: currentTime,\n\t\t\t\t\t\t\t\tValue: 30,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\t\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tif currentTime.Sub(t0) > 6*time.Minute {\n\t\t\t\t\tlogger.V(0).Printf(\"Number of points: %d\", len(resPoints))\n\t\t\t\t}\n\n\t\t\t\truleManager.now = func() time.Time { return currentTime }\n\t\t\t\treg.InternalRunScrape(ctx, currentTime, id)\n\t\t\t}\n\n\t\t\tvar hadResult bool\n\n\t\t\t// Manager should not create ok points since the metric is always in critical.\n\t\t\t// This test might be changed in the future if we implement a persistent store,\n\t\t\t// as it would allow to known the exact hold state of the Prometheus rule.\n\t\t\tfor _, p := range resPoints {\n\t\t\t\tif p.Labels[types.LabelName] != resultName {\n\t\t\t\t\tt.Errorf(\"unexpected point with labels: %v\", p.Labels)\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif p.Annotations.Status.CurrentStatus == types.StatusWarning {\n\t\t\t\t\thadResult = true\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tt.Errorf(\"point status = %v want %v\", p.Annotations.Status.CurrentStatus, types.StatusWarning)\n\t\t\t}\n\n\t\t\tif !hadResult {\n\t\t\t\tt.Errorf(\"rule never returned any points\")\n\t\t\t}\n\t\t})\n\t}\n}", "func TestPrometheusMetrics(t *testing.T) {\n\tbytesProcessedMetric.WithLabelValues(\"x\")\n\tcacheHitMetric.WithLabelValues(\"x\")\n\tuploadedBytesMetric.WithLabelValues(\"x\")\n\tqueryTotalMetric.WithLabelValues(\"x\")\n\tqueryProcessedMetric.WithLabelValues(\"x\")\n\tinFlightUploadsHistogram.WithLabelValues(\"x\")\n\tuploadQueueSizeHistogram.WithLabelValues(\"x\")\n\n\tpromtest.LintMetrics(t)\n}", "func ShouldGetPrometheusConfig(t *testing.T, pod corev1.Pod) {\n\tg := NewGomegaWithT(t)\n\tr, err := PrometheusApiRequest(pod, \"/api/v1/status/config\")\n\tg.Expect(err).NotTo(HaveOccurred())\n\tg.Expect(r.Status).To(Equal(\"success\"))\n}", "func Test_NoUnknownOnStart(t *testing.T) {\n\tconst (\n\t\tresultName = \"copy_of_node_cpu_seconds_global\"\n\t\tsourceMetric = \"node_cpu_seconds_global\"\n\t)\n\n\tvar (\n\t\tresPoints []types.MetricPoint\n\t\tl sync.Mutex\n\t)\n\n\tstore := store.New(time.Hour, time.Hour)\n\n\treg, err := registry.New(registry.Option{\n\t\tPushPoint: pushFunction(func(ctx context.Context, points []types.MetricPoint) {\n\t\t\tl.Lock()\n\t\t\tdefer l.Unlock()\n\n\t\t\tresPoints = append(resPoints, points...)\n\t\t}),\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx := context.Background()\n\tt0 := time.Now().Truncate(time.Second)\n\n\t// we always boot the manager with 10 seconds resolution\n\truleManager := newManager(ctx, store, defaultLinuxRecordingRules, t0)\n\n\tpromqlRules := []PromQLRule{\n\t\t{\n\t\t\tAlertingRuleID: \"509701d5-3cb0-449b-a858-0290f4dc3cff\",\n\t\t\tName: resultName,\n\t\t\tWarningQuery: fmt.Sprintf(\"%s > 0\", sourceMetric),\n\t\t\tWarningDelay: 5 * time.Minute,\n\t\t\tCriticalQuery: fmt.Sprintf(\"%s > 100\", sourceMetric),\n\t\t\tCriticalDelay: 5 * time.Minute,\n\t\t\tResolution: 10 * time.Second,\n\t\t\tInstanceID: agentID,\n\t\t},\n\t}\n\n\terr = ruleManager.RebuildPromQLRules(promqlRules)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tid, err := reg.RegisterAppenderCallback(\n\t\tregistry.RegistrationOption{\n\t\t\tNoLabelsAlteration: true,\n\t\t\tDisablePeriodicGather: true,\n\t\t},\n\t\tregistry.AppenderRegistrationOption{},\n\t\truleManager,\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor currentTime := t0; currentTime.Before(t0.Add(9 * time.Minute)); currentTime = currentTime.Add(10 * time.Second) {\n\t\tif currentTime.After(t0.Add(1 * time.Minute)) {\n\t\t\t// Took one full minute before first points.\n\t\t\tstore.PushPoints(context.Background(), []types.MetricPoint{\n\t\t\t\t{\n\t\t\t\t\tPoint: types.Point{\n\t\t\t\t\t\tTime: currentTime,\n\t\t\t\t\t\tValue: 30,\n\t\t\t\t\t},\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\truleManager.now = func() time.Time { return currentTime }\n\n\t\treg.InternalRunScrape(ctx, currentTime, id)\n\t}\n\n\tvar hadResult bool\n\n\tfor _, p := range resPoints {\n\t\tif p.Labels[types.LabelName] != resultName {\n\t\t\tt.Errorf(\"unexpected point with labels: %v\", p.Labels)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Annotations.Status.CurrentStatus == types.StatusWarning {\n\t\t\thadResult = true\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Annotations.Status.CurrentStatus == types.StatusUnknown {\n\t\t\tt.Errorf(\"point status = %v want %v\", p.Annotations.Status.CurrentStatus, types.StatusWarning)\n\t\t}\n\t}\n\n\tif !hadResult {\n\t\tt.Errorf(\"rule never returned any points\")\n\t}\n}", "func (p *netPolPromVals) testPrometheusMetrics(t *testing.T) {\n\tnumPolicies, err := metrics.GetNumPolicies()\n\tpromutil.NotifyIfErrors(t, err)\n\tif numPolicies != p.expectedNumPolicies {\n\t\trequire.FailNowf(t, \"\", \"Number of policies didn't register correctly in Prometheus. Expected %d. Got %d.\", p.expectedNumPolicies, numPolicies)\n\t}\n\n\taddExecCount, err := metrics.GetControllerPolicyExecCount(metrics.CreateOp, false)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, p.expectedAddExecCount, addExecCount, \"Count for add execution time didn't register correctly in Prometheus\")\n\n\taddErrorExecCount, err := metrics.GetControllerPolicyExecCount(metrics.CreateOp, true)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, 0, addErrorExecCount, \"Count for add error execution time should be 0\")\n\n\tupdateExecCount, err := metrics.GetControllerPolicyExecCount(metrics.UpdateOp, false)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, p.expectedUpdateExecCount, updateExecCount, \"Count for update execution time didn't register correctly in Prometheus\")\n\n\tupdateErrorExecCount, err := metrics.GetControllerPolicyExecCount(metrics.UpdateOp, true)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, 0, updateErrorExecCount, \"Count for update error execution time should be 0\")\n\n\tdeleteExecCount, err := metrics.GetControllerPolicyExecCount(metrics.DeleteOp, false)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, p.expectedDeleteExecCount, deleteExecCount, \"Count for delete execution time didn't register correctly in Prometheus\")\n\n\tdeleteErrorExecCount, err := metrics.GetControllerPolicyExecCount(metrics.DeleteOp, true)\n\tpromutil.NotifyIfErrors(t, err)\n\trequire.Equal(t, 0, deleteErrorExecCount, \"Count for delete error execution time should be 0\")\n}", "func Test_GloutonStart(t *testing.T) {\n\tconst (\n\t\tresultName = \"my_rule_metric\"\n\t\tsourceMetric = \"cpu_used\"\n\t)\n\n\tstore := store.New(time.Hour, time.Hour)\n\tctx := context.Background()\n\tt0 := time.Now().Truncate(time.Second)\n\truleManager := newManager(ctx, store, defaultLinuxRecordingRules, t0)\n\tresPoints := []types.MetricPoint{}\n\n\tstore.PushPoints(context.Background(), []types.MetricPoint{\n\t\t{\n\t\t\tPoint: types.Point{\n\t\t\t\tTime: t0,\n\t\t\t\tValue: 700,\n\t\t\t},\n\t\t\tLabels: map[string]string{\n\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPoint: types.Point{\n\t\t\t\tTime: t0.Add(2 * time.Minute),\n\t\t\t\tValue: 800,\n\t\t\t},\n\t\t\tLabels: map[string]string{\n\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPoint: types.Point{\n\t\t\t\tTime: t0.Add(3 * time.Minute),\n\t\t\t\tValue: 800,\n\t\t\t},\n\t\t\tLabels: map[string]string{\n\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPoint: types.Point{\n\t\t\t\tTime: t0.Add(4 * time.Minute),\n\t\t\t\tValue: 800,\n\t\t\t},\n\t\t\tLabels: map[string]string{\n\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t},\n\t\t},\n\t})\n\n\tmetricList := []PromQLRule{\n\t\t{\n\t\t\tAlertingRuleID: \"509701d5-3cb0-449b-a858-0290f4dc3cff\",\n\t\t\tName: resultName,\n\t\t\tWarningQuery: fmt.Sprintf(\"%s > 50\", sourceMetric),\n\t\t\tWarningDelay: 5 * time.Minute,\n\t\t\tCriticalQuery: fmt.Sprintf(\"%s > 500\", sourceMetric),\n\t\t\tCriticalDelay: 5 * time.Minute,\n\t\t\tResolution: 10 * time.Second,\n\t\t\tInstanceID: agentID,\n\t\t},\n\t}\n\n\tstore.AddNotifiee(func(mp []types.MetricPoint) {\n\t\tresPoints = append(resPoints, mp...)\n\t})\n\n\terr := ruleManager.RebuildPromQLRules(metricList)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tfor i := 0; i < 6; i++ {\n\t\truleManager.now = func() time.Time { return t0.Add(time.Duration(i) * time.Minute) }\n\n\t\tif err := ruleManager.Collect(ctx, store.Appender(ctx)); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\t// Manager should not create ok points for the next 5 minutes after start,\n\t// as we do not provide a way for prometheus to know previous values before start.\n\t// This test should be changed in the future if we implement a persistent store,\n\t// as critical and warning points would be allowed.\n\tif len(resPoints) != 0 {\n\t\tt.Errorf(\"Unexpected number of points generated: expected 0, got %d:\\n%v\", len(resPoints), resPoints)\n\t}\n\n\truleManager.now = func() time.Time { return t0.Add(time.Duration(7) * time.Minute) }\n\n\terr = ruleManager.Collect(ctx, store.Appender(ctx))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif len(resPoints) == 0 {\n\t\tt.Errorf(\"Unexpected number of points generated: expected >0, got 0:\\n%v\", resPoints)\n\t}\n}", "func (stats *Stats) RegisterPrometheus() error {\n\terr := prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"events_total\",\n\t\tHelp: \"events collected by tracee-ebpf\",\n\t}, func() float64 { return float64(stats.EventCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"netevents_total\",\n\t\tHelp: \"net events collected by tracee-ebpf\",\n\t}, func() float64 { return float64(stats.NetEvCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"lostevents_total\",\n\t\tHelp: \"events lost in the submission buffer\",\n\t}, func() float64 { return float64(stats.LostEvCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"write_lostevents_total\",\n\t\tHelp: \"events lost in the write buffer\",\n\t}, func() float64 { return float64(stats.LostWrCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"network_lostevents_total\",\n\t\tHelp: \"events lost in the network buffer\",\n\t}, func() float64 { return float64(stats.LostNtCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"errors_total\",\n\t\tHelp: \"errors accumulated by tracee-ebpf\",\n\t}, func() float64 { return float64(stats.ErrorCount.Read()) }))\n\n\treturn err\n}", "func validatePrometheusRule(t *testing.T, arName string) {\n\terr := framework.Poll(time.Second, 2*time.Minute, func() error {\n\t\tar, pr, err := getResources(arName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tarGroups := ar.Spec.Groups\n\t\tprGroups := pr.Spec.Groups\n\n\t\tif len(arGroups) != len(prGroups) {\n\t\t\treturn fmt.Errorf(\"Different groups count (%d != %d)\", len(arGroups), len(prGroups))\n\t\t}\n\n\t\tfor j, arGroup := range arGroups {\n\t\t\tprGroup := prGroups[j]\n\n\t\t\tif arGroup.Name != prGroup.Name {\n\t\t\t\treturn fmt.Errorf(\"Groups have different names (%s != %s)\", arGroup.Name, prGroup.Name)\n\t\t\t}\n\n\t\t\tif string(arGroup.Interval) != string(*prGroup.Interval) {\n\t\t\t\treturn fmt.Errorf(\"Different group interval (%s != %s)\", arGroup.Interval, *prGroup.Interval)\n\t\t\t}\n\n\t\t\tarRules := arGroup.Rules\n\t\t\tprRules := prGroup.Rules\n\n\t\t\tif len(arRules) != len(prRules) {\n\t\t\t\treturn fmt.Errorf(\"Different rules count (%d != %d)\", len(arRules), len(prRules))\n\t\t\t}\n\n\t\t\tfor k, rule := range arRules {\n\t\t\t\tarRule := rule.DeepCopy()\n\t\t\t\t// CMO sets this labels to all rules\n\t\t\t\tarRule.Labels[\"openshift_io_user_alert\"] = \"true\"\n\t\t\t\tprRule := prRules[k]\n\n\t\t\t\tif arRule.Alert != prRule.Alert {\n\t\t\t\t\treturn fmt.Errorf(\"Rules have different names (%s != %s)\", arRule.Alert, prRule.Alert)\n\t\t\t\t}\n\n\t\t\t\tif string(arRule.For) != string(*prRule.For) {\n\t\t\t\t\treturn fmt.Errorf(\"Rules have different for (%s != %s)\", arRule.For, *prRule.For)\n\t\t\t\t}\n\n\t\t\t\tif !reflect.DeepEqual(arRule.Annotations, prRule.Annotations) {\n\t\t\t\t\treturn fmt.Errorf(\"Rules have different annotations expected %+v, got %+v\", arRule.Annotations, prRule.Annotations)\n\t\t\t\t}\n\n\t\t\t\tif !reflect.DeepEqual(arRule.Labels, prRule.Labels) {\n\t\t\t\t\treturn fmt.Errorf(\"Rules have different labels expected %+v, got %+v\", arRule.Labels, prRule.Labels)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tt.Fatal(errors.Wrap(err, \"Failed to validate the generated PrometheusRule\"))\n\t}\n}", "func prometheusEvent(event *v1.Event, er *EventRouter) {\n\tvar counter prometheus.Counter\n\tvar err error\n\n\tif event.Type == \"Normal\" {\n\t\tcounter, err = kubernetesNormalEventCounterVec.GetMetricWithLabelValues(\n\t\t\tevent.InvolvedObject.Kind,\n\t\t\tevent.InvolvedObject.Name,\n\t\t\tevent.InvolvedObject.Namespace,\n\t\t\tevent.Reason,\n\t\t\tevent.Source.Host,\n\t\t\ter.Clustername,\n\t\t)\n\t} else if event.Type == \"Warning\" {\n\t\tcounter, err = kubernetesWarningEventCounterVec.GetMetricWithLabelValues(\n\t\t\tevent.InvolvedObject.Kind,\n\t\t\tevent.InvolvedObject.Name,\n\t\t\tevent.InvolvedObject.Namespace,\n\t\t\tevent.Reason,\n\t\t\tevent.Source.Host,\n\t\t\ter.Clustername,\n\t\t)\n\t}\n\n\tif err != nil {\n\t\tlogrus.Warnf(\"prometheus event error: \" + err.Error())\n\t} else {\n\t\tcounter.Add(1)\n\t}\n}", "func (c *Client) Prometheus(rw http.ResponseWriter, req *http.Request) {\n\thostnames, err := c.query(req.Context(), e2eQuery, e2eLabel, e2eFunction)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tmachines, err := c.query(req.Context(), gmxQuery, gmxLabel, gmxFunction)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = c.UpdatePrometheus(hostnames, machines)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trw.WriteHeader(http.StatusOK)\n}", "func (c *jobMessage) watchFatal() {\n\n}", "func GetExpectedPrometheus(t *testing.T, fileName string) *monitoringv1.Prometheus {\n\tobj := getKubernetesObject(t, fileName)\n\tprometheus, ok := obj.(*monitoringv1.Prometheus)\n\tassert.True(t, ok, \"Expected Prometheus object\")\n\treturn prometheus\n}", "func TestCollectorOK(t *testing.T) {\n\tc := collector.New()\n\tctx := context.Background()\n\tmpa := NewStubMeterPoints(\"a\", 0, 100*time.Millisecond)\n\tmpb := NewStubMeterPoints(\"b\", 5, 200*time.Millisecond)\n\tmpc := NewStubMeterPoints(\"c\", 10, 50*time.Millisecond)\n\terr := c.Register(mpa, mpb, mpc)\n\tif err != nil {\n\t\tt.Errorf(\"collector register error: %v\", err)\n\t}\n\tmetrics := c.Retrieve(ctx, time.Second)\n\tif a, ok := metrics.Get(\"a.count\"); !ok || a != \"1\" {\n\t\tt.Errorf(\"illegal value a: %q\", a)\n\t}\n\tmetrics = c.Retrieve(ctx, time.Second)\n\tif b, ok := metrics.Get(\"b.count\"); !ok || b != \"7\" {\n\t\tt.Errorf(\"illegal value b: %q\", b)\n\t}\n\tmetrics = c.Retrieve(ctx, time.Second)\n\tif c, ok := metrics.Get(\"c.count\"); !ok || c != \"13\" {\n\t\tt.Errorf(\"illegal value c: %q\", c)\n\t}\n}", "func TestStatsConnTopoWatch(t *testing.T) {\n\tconn := &fakeConn{}\n\tstatsConn := NewStatsConn(\"global\", conn)\n\tctx := context.Background()\n\n\tstatsConn.Watch(ctx, \"\")\n\ttimingCounts := topoStatsConnTimings.Counts()[\"Watch.global\"]\n\tif got, want := timingCounts, int64(1); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n\n}", "func prometheusRegister() {\n\tif !prometheusRegistered {\n\t\tmetrics.Registry.MustRegister(\n\t\t\tjobsSubmittedCount,\n\t\t\tjobsCompletedCount,\n\t\t\tjobsFailedCount)\n\n\t\tprometheusRegistered = true\n\t}\n}", "func (m *ProcedureMock) MinimockProceedInspect() {\n\tfor _, e := range m.ProceedMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to ProcedureMock.Proceed with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.ProceedMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterProceedCounter) < 1 {\n\t\tif m.ProceedMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to ProcedureMock.Proceed\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to ProcedureMock.Proceed with params: %#v\", *m.ProceedMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcProceed != nil && mm_atomic.LoadUint64(&m.afterProceedCounter) < 1 {\n\t\tm.t.Error(\"Expected call to ProcedureMock.Proceed\")\n\t}\n}", "func Prommetric(input string, bot *models.Bot) {\n\tif bot.Metrics {\n\t\tif input == \"init\" {\n\t\t\t// init router\n\t\t\tpromRouter = mux.NewRouter()\n\n\t\t\t// metrics health check handler\n\t\t\tpromHealthHandle := func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tif r.Method != http.MethodGet {\n\t\t\t\t\tlog.Error().Msgf(\"prometheus server: invalid method %#q\", r.Method)\n\t\t\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tlog.Debug().Msg(\"prometheus server: health check hit!\")\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\t\t\t_, err := w.Write([]byte(\"OK\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Msg(\"unable to send response\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tpromRouter.HandleFunc(\"/metrics_health\", promHealthHandle).Methods(\"GET\")\n\n\t\t\t// metrics handler\n\t\t\tprometheus.MustRegister(botResponseCollector)\n\t\t\tpromRouter.Handle(\"/metrics\", promhttp.Handler())\n\n\t\t\t// start prometheus server\n\t\t\tgo func() {\n\t\t\t\t//nolint:gosec // fix to make sure http serve is done with timeout in place\n\t\t\t\terr := http.ListenAndServe(\":8080\", promRouter)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal().Msgf(\"Prometheus handler errored: %v\", err)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tlog.Info().Msg(\"prometheus server: serving metrics at /metrics\")\n\t\t} else {\n\t\t\tbotResponseCollector.With(prometheus.Labels{\"rulename\": input}).Inc()\n\t\t}\n\t}\n}", "func (m *MetricsProvider) WitnessAddProof(value time.Duration) {\n}", "func (s *Service) prometheus(c context.Context, method string) (mts []*monitor.Monitor, err error) {\n\tvar (\n\t\tsign, ins string\n\t\tnames []string\n\t\tparams = url.Values{}\n\t)\n\tins, _ = s.GetInterfaces(c)\n\tmts = make([]*monitor.Monitor, 0)\n\tparams.Add(\"Action\", \"GetPromDataRange\")\n\tparams.Add(\"PublicKey\", s.c.Prometheus.Key)\n\tparams.Add(\"DataSource\", \"app\")\n\tsign = s.auth(params)\n\tparams.Add(\"Signature\", sign)\n\tdate := time.Now().Format(\"2006-01-02\")\n\tparams.Set(\"Start\", date+\" 23:00:00\")\n\tparams.Set(\"End\", date+\" 23:00:10\")\n\tparams.Set(\"Step\", \"30\")\n\tnames = s.c.Apps.Name\n\tfor _, name := range names {\n\t\tvar (\n\t\t\tcostRet = &PrometheusRes{}\n\t\t\tcountRet = &PrometheusRes{}\n\t\t)\n\t\tparams.Set(\"Query\", fmt.Sprintf(costQuery, method, name))\n\t\tif err = s.PrometheusProxy(c, params, costRet); err != nil {\n\t\t\treturn\n\t\t}\n\t\tparams.Set(\"Query\", fmt.Sprintf(countQuery, method, name))\n\t\tif err = s.PrometheusProxy(c, params, countRet); err != nil {\n\t\t\treturn\n\t\t}\n\t\tfor _, val := range costRet.Data {\n\t\t\tvar (\n\t\t\t\tcount float64\n\t\t\t\tapi = val.Metric.Method\n\t\t\t)\n\t\t\tif api == \"inner.Ping\" || len(val.Values) < 1 || len(val.Values[0]) < 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcost, _ := strconv.ParseFloat(val.Values[0][1].(string), 64)\n\t\t\tif int64(cost) < s.c.Apps.Max && !strings.Contains(ins, api) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, v := range countRet.Data {\n\t\t\t\tif api == v.Metric.Method {\n\t\t\t\t\tcount, _ = strconv.ParseFloat(v.Values[0][1].(string), 64)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tmt := &monitor.Monitor{\n\t\t\t\tAppID: name + \"-\" + method,\n\t\t\t\tInterface: api,\n\t\t\t\tCount: int64(count),\n\t\t\t\tCost: int64(cost),\n\t\t\t}\n\t\t\tmts = append(mts, mt)\n\t\t}\n\t}\n\treturn\n}", "func (_m *Reporter) ReportSimple(name string, status monitoring.HealthCheckStatus) {\n\t_m.Called(name, status)\n}", "func verifyNoWatch(t *testing.T, watch Watch, fn func()) {\n\tch := make(chan struct{}, 1)\n\twatch.Wait(ch)\n\n\tfn()\n\n\tselect {\n\tcase <-ch:\n\t\tt.Fatalf(\"watch should not been notified\")\n\tdefault:\n\t}\n}", "func TestMonitorPrivate(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\t// Configure as private probe\n\thelper.cfg.Blackbox.ScraperName = \"\"\n\thelper.cfg.Blackbox.ScraperSendUUID = true\n\n\thelper.addMonitorOnAPI(t)\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tidAgentMain, _ := helper.state.BleemeoCredentials()\n\tif idAgentMain == \"\" {\n\t\tt.Fatal(\"idAgentMain == '', want something\")\n\t}\n\n\tinitialMetrics := []metricPayload{\n\t\t// Metric from other probe are NOT present in API, because glouton private probe aren't allow to view them.\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"9149d491-3a6e-4f46-abf9-c1ea9b9f7227\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t}\n\n\tpushedPoints := []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"probe_success\"},\n\t\t\tlabels.Label{Name: types.LabelScraperUUID, Value: idAgentMain},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: newMonitor.URL},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: newMonitor.AgentID},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: newMonitor.AgentID},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"probe_duration\"},\n\t\t\tlabels.Label{Name: types.LabelScraperUUID, Value: idAgentMain},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: newMonitor.URL},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: newMonitor.AgentID},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: newMonitor.AgentID},\n\t\t),\n\t}\n\n\thelper.SetAPIMetrics(initialMetrics...)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\twant := []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_duration\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_duration\",\n\t\t},\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\n\tif err := helper.runOnceWithResult(t).Check(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\twant = []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tDeactivatedAt: helper.Now(),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: idAny,\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_duration\\\",instance=\\\"%s\\\",instance_uuid=\\\"%s\\\",scraper_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.URL,\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t\tidAgentMain,\n\t\t\t\t),\n\t\t\t\tDeactivatedAt: helper.Now(),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_duration\",\n\t\t},\n\t}\n\n\thelper.assertMetricsInAPI(t, want)\n}", "func TestMetricTooMany(t *testing.T) { //nolint:maintidx\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\thelper.preregisterAgent(t)\n\thelper.initSynchronizer(t)\n\n\tmetricResource, _ := helper.api.resources[mockAPIResourceMetric].(*genericResource)\n\tdefaultPatchHook := metricResource.PatchHook\n\n\t// API always reject more than 3 active metrics\n\tmetricResource.PatchHook = func(r *http.Request, body []byte, valuePtr interface{}, oldValue interface{}) error {\n\t\tif defaultPatchHook != nil {\n\t\t\terr := defaultPatchHook(r, body, valuePtr, oldValue)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tmetric, _ := valuePtr.(*metricPayload)\n\n\t\tif metric.DeactivatedAt.IsZero() {\n\t\t\tmetrics := helper.MetricsFromAPI()\n\t\t\tcountActive := 0\n\n\t\t\tfor _, m := range metrics {\n\t\t\t\tif m.DeactivatedAt.IsZero() && m.ID != metric.ID {\n\t\t\t\t\tcountActive++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif countActive >= 3 {\n\t\t\t\treturn clientError{\n\t\t\t\t\tbody: `{\"label\":[\"Too many non standard metrics\"]}`,\n\t\t\t\t\tstatusCode: http.StatusBadRequest,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tmetricResource.CreateHook = func(r *http.Request, body []byte, valuePtr interface{}) error {\n\t\treturn metricResource.PatchHook(r, body, valuePtr, nil)\n\t}\n\n\thelper.AddTime(time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// list active metrics, register agent_status + 2x metrics to register\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 6)\n\n\tmetrics := helper.MetricsFromAPI()\n\tif len(metrics) != 3 { // 2 + agent_status\n\t\tt.Errorf(\"len(metrics) = %d, want 3\", len(metrics))\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric4\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif helper.api.ClientErrorCount == 0 {\n\t\tt.Errorf(\"We should have some client error, had %d\", helper.api.ClientErrorCount)\n\t}\n\n\t// list active metrics + 2x metrics to register\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 5)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 3 { // 2 + agent_status\n\t\tt.Errorf(\"len(metrics) = %d, want 3\", len(metrics))\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodNotRun(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\thelper.SetTimeToNextFullSync()\n\t// drop all because normally store drop inactive metrics and\n\t// metric1 don't emitted for 70 minutes\n\thelper.store.DropAllMetrics()\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric4\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We need two sync: one to deactivate the metric, one to regsiter another one\n\thelper.AddTime(15 * time.Second)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 { // metric1 is now disabled, another get registered\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif !m.DeactivatedAt.IsZero() && m.Name != \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is deactivated, want active\", m.Name)\n\t\t}\n\n\t\tif m.DeactivatedAt.IsZero() && m.Name == \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is active, want deactivated\", m.Name)\n\t\t}\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric1\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif helper.api.ClientErrorCount == 0 {\n\t\tt.Errorf(\"We should have some client error, had %d\", helper.api.ClientErrorCount)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 { // metric1 is still disabled and no newly registered\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif !m.DeactivatedAt.IsZero() && m.Name != \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is deactivated, want active\", m.Name)\n\t\t}\n\n\t\tif m.DeactivatedAt.IsZero() && m.Name == \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is active, want deactivated\", m.Name)\n\t\t}\n\t}\n\n\t// We do not retry to register them\n\thelper.AddTime(5 * time.Minute)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodNotRun(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Excepted ONE per full-sync\n\thelper.SetTimeToNextFullSync()\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric2\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric3\"},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric4\"},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif helper.api.ClientErrorCount != 1 {\n\t\tt.Errorf(\"had %d client error, want 1\", helper.api.ClientErrorCount)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 4 {\n\t\tt.Errorf(\"len(metrics) = %d, want 4\", len(metrics))\n\t}\n\n\tfor _, m := range metrics {\n\t\tif !m.DeactivatedAt.IsZero() && m.Name != \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is deactivated, want active\", m.Name)\n\t\t}\n\n\t\tif m.DeactivatedAt.IsZero() && m.Name == \"metric1\" {\n\t\t\tt.Errorf(\"metric %s is active, want deactivated\", m.Name)\n\t\t}\n\t}\n\n\t// list active metrics + check existence of the metric we want to reg +\n\t// retry to register\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 3)\n}", "func init() {\n\tpromRegistry := prometheus.NewRegistry()\n\tpromRegistry.MustRegister(uptime, reqCount, passCount, blockCount, reqDuration)\n\tgo recordUptime()\n\tpromHandler = promhttp.InstrumentMetricHandler(promRegistry, promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}))\n}", "func checkWatchSyntax(ctx *cli.Context) {\n\tif len(ctx.Args()) != 1 {\n\t\tcli.ShowCommandHelpAndExit(ctx, \"watch\", 1) // last argument is exit code\n\t}\n}", "func TestWatcher(t *testing.T) {\n\twatcher := ImokWatch(500*time.Millisecond, 5)\n\twatcher.Watch(make(chan bool))\n}", "func CheckPrometheusCounter(reg *prometheus.Registry, counter prometheus.Counter, expectedValue float64, t *testing.T) {\n\tmetricFamilies, err := reg.Gather()\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to gather prometheus metrics: %+v\", err)\n\t}\n\n\tif len(metricFamilies) < 1 ||\n\t\tlen(metricFamilies[0].Metric) < 1 {\n\t\tt.Fatalf(\"Unable to gather the metrics from prometheus.\\n\\tExpected 1 MetricFamilies; got: %d.\\n\\tExpected 1 Metric; got: %d\", len(metricFamilies), len(metricFamilies[0].Metric))\n\t}\n\n\tmetric := metricFamilies[0].Metric[0]\n\tif gotValue := metric.Counter.GetValue(); gotValue != expectedValue {\n\t\tt.Fatalf(\"Prometheus counter %+v expected count %f; got %f\", counter, expectedValue, gotValue)\n\t}\n}", "func verifyWatch(t *testing.T, watch Watch, fn func()) {\n\tch := make(chan struct{}, 1)\n\twatch.Wait(ch)\n\n\tfn()\n\n\tselect {\n\tcase <-ch:\n\tdefault:\n\t\tt.Fatalf(\"watch should have been notified\")\n\t}\n}", "func testMetrics(expected map[string]int, m *Metrics) error {\n\te := &firstErr{}\n\tfor name, cnt := range expected {\n\t\tswitch name {\n\t\tcase \"incoming\":\n\t\t\te.testCounter(m.Incoming, name, cnt)\n\t\tcase \"fragments\":\n\t\t\te.testCounter(m.Fragments, name, cnt)\n\t\tcase \"defrag\":\n\t\t\te.testCounter(m.Defrag, name, cnt)\n\t\tcase \"invalid\":\n\t\t\te.testCounter(m.Invalid, name, cnt)\n\t\tcase \"seen:udp\":\n\t\t\te.testCounter(m.Seen.WithLabelValues(\"udp\"), name, cnt)\n\t\tcase \"seen:tcp\":\n\t\t\te.testCounter(m.Seen.WithLabelValues(\"tcp\"), name, cnt)\n\t\tcase \"captured:udp\":\n\t\t\te.testCounter(m.Captured.WithLabelValues(\"udp\"), name, cnt)\n\t\tcase \"captured:tcp\":\n\t\t\te.testCounter(m.Captured.WithLabelValues(\"tcp\"), name, cnt)\n\t\tdefault:\n\t\t\te.err = fmt.Errorf(\"don't know field %v\", name)\n\t\t}\n\t}\n\treturn e.err\n}", "func TestMetricsEndpoint(t *testing.T) {\n\tc := newKubeClient(t)\n\n\tlistOptions := metav1.ListOptions{LabelSelector: \"app=olm-operator\"}\n\tpodList, err := c.KubernetesInterface().CoreV1().Pods(operatorNamespace).List(listOptions)\n\tif err != nil {\n\t\tlog.Infof(\"Error %v\\n\", err)\n\t\tt.Fatalf(\"Listing pods failed: %v\\n\", err)\n\t}\n\tif len(podList.Items) > 1 {\n\t\tt.Fatalf(\"Expected only 1 olm-operator pod, got %v\", len(podList.Items))\n\t}\n\n\tpodName := podList.Items[0].GetName()\n\n\trawOutput, err := getMetricsFromPod(t, c, podName, operatorNamespace, 8080)\n\tif err != nil {\n\t\tt.Fatalf(\"Metrics test failed: %v\\n\", err)\n\t}\n\n\tlog.Debugf(\"Metrics:\\n%v\", rawOutput)\n}", "func CheckPrometheusCounterVec(t *testing.T, reg *prometheus.Registry, counter *prometheus.CounterVec, expectedValue float64, expectedLabels ...ExpectationLabelPair) {\n\tmetricFamilies, err := reg.Gather()\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to gather prometheus metrics: %+v\", err)\n\t}\n\n\tif len(metricFamilies) < 1 ||\n\t\tlen(metricFamilies[0].Metric) < 1 ||\n\t\tlen(metricFamilies[0].Metric[0].Label) < 1 {\n\t\tmetricCount := 0\n\t\tif len(metricFamilies) > 0 {\n\t\t\tmetricCount = len(metricFamilies[0].Metric)\n\t\t}\n\n\t\tt.Fatalf(\"Unable to gather the metrics from prometheus.\\n\\tExpected 1 MetricFamilies; got: %d.\\n\\tExpected 1 Metric; got: %d\", len(metricFamilies), metricCount)\n\t}\n\n\tvar metricCounterValue float64 = 0.00\n\tmetric := metricFamilies[0].Metric[0]\n\nExpectedLabels:\n\tfor _, expectedLabelPair := range expectedLabels {\n\t\tlog.Debugf(\"attempting to match expected label pair: %v\", expectedLabelPair)\n\t\tfor _, gotLabel := range metric.GetLabel() {\n\t\t\tlog.Debugf(\"\\tgot prometheus label: %s\", gotLabel.GetName())\n\t\t\tif gotLabel.GetName() == expectedLabelPair.LabelName {\n\t\t\t\tmetricCounterValue = metric.Counter.GetValue()\n\t\t\t\tgotValue := gotLabel.GetValue()\n\t\t\t\tif gotValue != expectedLabelPair.LabelValue {\n\t\t\t\t\tt.Fatalf(`Prometheus counter %+v expected [name: \"%s\" value:\"%s\"]; got [%s]`, counter, expectedLabelPair.LabelName, expectedLabelPair.LabelValue, gotLabel)\n\t\t\t\t}\n\t\t\t\tcontinue ExpectedLabels\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch {\n\tcase metricCounterValue == 0.00:\n\t\tt.Fatalf(\"Prometheus counter %+v not found in the registry.\", counter)\n\tcase metricCounterValue != expectedValue:\n\t\tt.Fatalf(\"Prometheus counter %+v expected value %f; got %f\", counter, expectedValue, metricCounterValue)\n\t}\n}", "func IsPrometheus(cli prometheus.Client) bool {\n\treturn IsClientID(cli, PrometheusClientID)\n}", "func watch(k *kite.Client, eventType string, eventId string, interval time.Duration) error {\n\teventArgs := kloud.EventArgs([]kloud.EventArg{\n\t\tkloud.EventArg{\n\t\t\tType: eventType,\n\t\t\tEventId: eventId,\n\t\t},\n\t})\n\n\tfor {\n\t\tresp, err := k.TellWithTimeout(\"event\", defaultTellTimeout, eventArgs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar events []kloud.EventResponse\n\t\tif err := resp.Unmarshal(&events); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(events) == 0 {\n\t\t\treturn errors.New(\"incoming event response is not an array\")\n\t\t}\n\n\t\tif events[0].Error != nil {\n\t\t\treturn events[0].Error\n\t\t}\n\n\t\tDefaultUi.Info(fmt.Sprintf(\"%s ==> %s [Status: %s Percentage: %d]\",\n\t\t\tfmt.Sprint(time.Now())[:19],\n\t\t\tevents[0].Event.Message,\n\t\t\tevents[0].Event.Status,\n\t\t\tevents[0].Event.Percentage,\n\t\t))\n\n\t\tif events[0].Event.Error != \"\" {\n\t\t\terr := errors.New(events[0].Event.Error)\n\t\t\tDefaultUi.Error(err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tif events[0].Event.Percentage == 100 {\n\t\t\treturn nil\n\t\t}\n\n\t\ttime.Sleep(interval)\n\t}\n}", "func TestJoeFridayGetCPUInfo(t *testing.T) {\n\tprof, err := cpuinfo.NewProfiler()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tinf, err := prof.Get()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tp, err := json.MarshalIndent(inf, \"\", \"\\t\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Logf(\"%s\\n\", string(p))\n}", "func promEventNotifier(ev cap.Event) {\n\tgauge := eventGauge.WithLabelValues(ev.GetTag().String(), ev.GetProcessRuntimeName())\n\tif ev.GetTag() == cap.ProcessStarted {\n\t\tgauge.Inc()\n\t} else {\n\t\tgauge.Dec()\n\t}\n}", "func runTestcase(t *testing.T) {\n\n\tgaugeStatHandler = new(GaugeStats)\n\n\tinitGaugeStats(METRICS_CONFIG_FILE, gaugeStatHandler)\n\n\t// read raw-metrics from mock data gen, create observer and channel prometeus metric ingestion and processing\n\trawMetrics := getRawMetrics()\n\tnsWatcher := new(NamespaceWatcher)\n\tlObserver := &Observer{}\n\tch := make(chan prometheus.Metric, 1000)\n\tpass2Metrics := requestInfoNamespaces(rawMetrics)\n\n\tnsWatcher.passTwoKeys(rawMetrics)\n\n\tnsInfoKeys := createNamespacePassTwoExpectedOutputs(rawMetrics)\n\n\t// outputs := nsWatcher.passTwoKeys(pass2Metrics)\n\t// assert.Equal(t, outputs, expectedOutputs)\n\n\terr := nsWatcher.refresh(lObserver, nsInfoKeys, rawMetrics, ch)\n\n\tif err == nil {\n\t\t// map of string ==> map[\"namespace/metric-name\"][\"<VALUE>\"]\n\t\t// map of string ==> map[\"namespace/metric-name\"][\"<Label>\"]\n\t\t// both used to assert the return values from actual code against calculated values\n\t\tlOutputValues := map[string]string{}\n\t\tlOutputLabels := map[string]string{}\n\n\t\t// reads data from the Prom channel and creates a map of strings so we can assert in the below loop\n\t\tdomore := 1\n\n\t\tfor domore == 1 {\n\t\t\tselect {\n\n\t\t\tcase nsMetric := <-ch:\n\t\t\t\tdescription := nsMetric.Desc().String()\n\t\t\t\tvar protobuffer dto.Metric\n\t\t\t\terr := nsMetric.Write(&protobuffer)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\" unable to get metric \", description, \" data into protobuf \", err)\n\t\t\t\t}\n\n\t\t\t\tmetricValue := \"\"\n\t\t\t\tmetricLabel := fmt.Sprintf(\"%s\", protobuffer.Label)\n\t\t\t\tif protobuffer.Gauge != nil {\n\t\t\t\t\tmetricValue = fmt.Sprintf(\"%.0f\", *protobuffer.Gauge.Value)\n\t\t\t\t} else if protobuffer.Counter != nil {\n\t\t\t\t\tmetricValue = fmt.Sprintf(\"%.0f\", *protobuffer.Counter.Value)\n\t\t\t\t}\n\n\t\t\t\t// Desc{fqName: \"aerospike_namespac_memory_free_pct\", help: \"memory free pct\", constLabels: {}, variableLabels: [cluster_name service ns]}\n\t\t\t\tmetricNameFromDesc := extractMetricNameFromDesc(description)\n\t\t\t\tnamespaceFromLabel := extractNamespaceFromLabel(metricLabel)\n\n\t\t\t\t// key will be like namespace/<metric_name>, this we use this check during assertion\n\t\t\t\tkeyName := makeKeyname(namespaceFromLabel, metricNameFromDesc, true)\n\t\t\t\tlOutputValues[keyName] = metricValue\n\t\t\t\tlOutputLabels[keyName] = metricLabel\n\n\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\tdomore = 0\n\n\t\t\t} // end select\n\t\t}\n\n\t\t// loop each namespace and compare the label and value\n\t\tarrNames := strings.Split(pass2Metrics[\"namespaces\"], \";\")\n\n\t\tfor nsIndex := range arrNames {\n\t\t\ttnsForNamespace := arrNames[nsIndex]\n\t\t\tlExpectedMetricNamedValues, lExpectedMetricLabels := createNamespaceWatcherExpectedOutputs(tnsForNamespace, true)\n\n\t\t\tfor key := range lOutputValues {\n\t\t\t\texpectedValues := lExpectedMetricNamedValues[key]\n\t\t\t\texpectedLabels := lExpectedMetricLabels[key]\n\t\t\t\toutputMetricValues := lOutputValues[key]\n\t\t\t\toutpuMetricLabels := lOutputLabels[key]\n\n\t\t\t\t// assert - only if the value belongs to the namespace we read expected values and processing\n\t\t\t\tif strings.HasPrefix(key, tnsForNamespace) {\n\t\t\t\t\tassert.Contains(t, expectedValues, outputMetricValues)\n\t\t\t\t\tassert.Contains(t, expectedLabels, outpuMetricLabels)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\tfmt.Println(\" Failed Refreshing, error: \", err)\n\t}\n}", "func (m *MockHealthReporter) OK(msg string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tif m.notify == nil {\n\t\treturn\n\t}\n\tm.notify <- Update{Event: \"OK\"}\n}", "func (*SimpleMetric) IsMetric() {}", "func (t *Template) RenderPrometheus(alert template.Alert) (string, error) {\n\tconst defaultTmpl = `\n[Status]: {{ .Status }}\n[Starts At]: {{ .StartsAt.Format \"2 Jan 2006 15:04:05 MST\" }}\n[Labels]: {{ range $key, $value := .Labels }}\n {{ $key }}: {{ $value }}{{ end }}\n[Annotations]: {{ range $key, $value := .Annotations }}\n {{ $key }}: {{ $value }}{{ end }}\n`\n\n\ttmpl, err := t.load(\"prometheus\")\n\tif err != nil {\n\t\ttmpl = defaultTmpl\n\t}\n\n\tcontent, err := t.render(\"prometheus\", tmpl, alert)\n\n\treturn content, err\n}", "func (p Ping) notify(pingErr error) error {\n\tenvelope_ := adapters.Envelope{\n\t\tTitle: \"actuator-failed\",\n\t\tRecipient: \"*\",\n\t}\n\t// TODO proper protocol ?\n\tpayload := fmt.Sprintf(\"endpoint=%s actuator=ping err=%s\", p.Endpoint, pingErr)\n\tif err := p.Adapter.Send(envelope_, payload); err != nil {\n\t\tp.logger.Error(\"Error sending event: %s\", err)\n\t\treturn err\n\t}\n\tp.logger.Info(\"Event '%s' dispatched\", envelope_.Title)\n\treturn pingErr\n}", "func (m *CloudWatchLogsServiceMock) CreateNewServiceIfUnHealthy() {\n\n}", "func probe() []byte {\r\n\tts := time.Now().Format(time.UnixDate)\r\n\toutput := \"{\\\"status\\\":\\\"Success\\\",\\\"ts\\\" : \\\"\" + ts + \"\\\" }\"\r\n\treturn []byte(output)\r\n}", "func ReportAPIRequestMetric(handler, method, status string, started time.Time) {\n\trequestsTotalAPI.WithLabelValues(handler, method, status).Inc()\n\trequestLatencyAPI.WithLabelValues(handler, method, status).Observe(time.Since(started).Seconds())\n}", "func (m *GatewayMock) MinimockOnPulseFromPulsarInspect() {\n\tfor _, e := range m.OnPulseFromPulsarMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.OnPulseFromPulsar with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.OnPulseFromPulsarMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterOnPulseFromPulsarCounter) < 1 {\n\t\tif m.OnPulseFromPulsarMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to GatewayMock.OnPulseFromPulsar\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.OnPulseFromPulsar with params: %#v\", *m.OnPulseFromPulsarMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcOnPulseFromPulsar != nil && mm_atomic.LoadUint64(&m.afterOnPulseFromPulsarCounter) < 1 {\n\t\tm.t.Error(\"Expected call to GatewayMock.OnPulseFromPulsar\")\n\t}\n}", "func (m *GatewayMock) MinimockRunInspect() {\n\tfor _, e := range m.RunMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.Run with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.RunMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterRunCounter) < 1 {\n\t\tif m.RunMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to GatewayMock.Run\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.Run with params: %#v\", *m.RunMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcRun != nil && mm_atomic.LoadUint64(&m.afterRunCounter) < 1 {\n\t\tm.t.Error(\"Expected call to GatewayMock.Run\")\n\t}\n}", "func TestUsage(t *testing.T) {\n\tinitialize(t)\n\tusageSubscriber(\"testing\", t)\n\tassertUsageSubscriber(\"testing\", true, t)\n\tusageSubscriber(\"witness\", t)\n\tassertUsageSubscriber(\"witness\", true, t)\n\tassertUsageSubscriber(\"badguy\", false, t)\n\tusageWritePayloads(pcnt, t)\n\tusageReadCheck(\"witness\", pcnt, true, t)\n\tusageReadCheck(\"testing\", pcnt, false, t)\n}", "func (o ArgoCDSpecOutput) Prometheus() ArgoCDSpecPrometheusPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpec) *ArgoCDSpecPrometheus { return v.Prometheus }).(ArgoCDSpecPrometheusPtrOutput)\n}", "func Test_CollectMetrics11(t *testing.T) {\n\tassert := assert.New(t)\n\tconfig := getTestConfig(1, 1)\n\tconfig.DataFunc = config.GetTestData1\n\n\tres := config.CollectMetrics()\n\t// assert.Nil(res)\n\tassert.Equal(res, []cmd.MetricData{{Name: \"m1\", Help: \"h1\", MetricType: \"gauge\", Stats: []cmd.MetricRecord{{Value: 999, Labels: []string{\"l00\"}, LabelValues: []string{\"lv00\"}}}}})\n}", "func (f *FakeVCenter) PushMetrics(context.Context, metrics.Receiver) {}", "func ShouldEventuallySeeClusterMetrics(t *testing.T, promPod corev1.Pod, cohPods []corev1.Pod) {\n\tg := NewGomegaWithT(t)\n\n\terr := wait.Poll(time.Second*5, time.Minute*5, func() (done bool, err error) {\n\t\tresult := PrometheusVector{}\n\t\terr = PrometheusQuery(promPod, \"up\", &result)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tm := make(map[string]bool)\n\t\tfor _, pod := range cohPods {\n\t\t\tm[pod.Name] = false\n\t\t}\n\n\t\tfor _, v := range result.Result {\n\t\t\tif v.Labels[\"job\"] == \"coherence-service-metrics\" {\n\t\t\t\tname := v.Labels[\"pod\"]\n\t\t\t\tm[name] = true\n\t\t\t}\n\t\t}\n\n\t\tfor _, pod := range cohPods {\n\t\t\tif m[pod.Name] == false {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tg.Expect(err).NotTo(HaveOccurred())\n}", "func (c *Cassowary) PushPrometheusMetrics(metrics ResultMetrics) error {\n\tpromTCPConnMean.Set(metrics.TCPStats.TCPMean)\n\tpromTCPConnMedian.Set(metrics.TCPStats.TCPMedian)\n\tpromTCPConn95P.Set(metrics.TCPStats.TCP95p)\n\tpromServerProcessingMean.Set(metrics.ProcessingStats.ServerProcessingMean)\n\tpromServerProcessingMedian.Set(metrics.ProcessingStats.ServerProcessingMedian)\n\tpromServerProcessing95p.Set(metrics.ProcessingStats.ServerProcessing95p)\n\tpromContentTransferMean.Set(metrics.ContentStats.ContentTransferMean)\n\tpromContentTransferMedian.Set(metrics.ContentStats.ContentTransferMedian)\n\tpromContentTransfer95p.Set(metrics.ContentStats.ContentTransfer95p)\n\tpromTotalRequests.Set(float64(metrics.TotalRequests))\n\tpromFailedRequests.Set(float64(metrics.FailedRequests))\n\tpromRequestPerSecond.Set(metrics.RequestsPerSecond)\n\n\tif err := push.New(c.PromURL, \"cassowary_load_test\").\n\t\tCollector(promTCPConnMean).\n\t\tCollector(promTCPConnMedian).\n\t\tCollector(promTCPConn95P).\n\t\tCollector(promServerProcessingMean).\n\t\tCollector(promServerProcessingMedian).\n\t\tCollector(promServerProcessing95p).\n\t\tCollector(promContentTransferMean).\n\t\tCollector(promContentTransferMedian).\n\t\tCollector(promContentTransfer95p).\n\t\tCollector(promTotalRequests).\n\t\tCollector(promFailedRequests).\n\t\tCollector(promRequestPerSecond).\n\t\tGrouping(\"url\", c.BaseURL).\n\t\tPush(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func pushMetric(addr string, interval time.Duration) {\n\tif interval == zeroDuration || len(addr) == 0 {\n\t\tlog.Info(\"disable Prometheus push client\")\n\t\treturn\n\t}\n\tlog.Info(\"start prometheus push client\", zap.String(\"server addr\", addr), zap.String(\"interval\", interval.String()))\n\tgo prometheusPushClient(addr, interval)\n}", "func PrometheusMetrics(config *cfg.InstrumentationConfig) *Metrics {\n\tif !config.Prometheus {\n\t\treturn NopMetrics()\n\t}\n\n\tbondedTokenVec := stdprometheus.NewGaugeVec(stdprometheus.GaugeOpts{\n\t\tNamespace: config.Namespace,\n\t\tSubsystem: MetricsSubsystem,\n\t\tName: \"bonded_token\",\n\t\tHelp: \"bonded token\",\n\t}, []string{\"validator_address\"})\n\n\tloosenTokenVec := stdprometheus.NewGaugeVec(stdprometheus.GaugeOpts{\n\t\tNamespace: config.Namespace,\n\t\tSubsystem: MetricsSubsystem,\n\t\tName: \"loosen_token\",\n\t\tHelp: \"loosen token\",\n\t}, []string{})\n\n\tburnedTokenVec := stdprometheus.NewGaugeVec(stdprometheus.GaugeOpts{\n\t\tNamespace: config.Namespace,\n\t\tSubsystem: MetricsSubsystem,\n\t\tName: \"burned_token\",\n\t\tHelp: \"burned token\",\n\t}, []string{})\n\n\tslashedTokenVec := stdprometheus.NewCounterVec(stdprometheus.CounterOpts{\n\t\tNamespace: config.Namespace,\n\t\tSubsystem: MetricsSubsystem,\n\t\tName: \"slashed_token\",\n\t\tHelp: \"slashed token\",\n\t}, []string{\"validator_address\"})\n\n\tjailedVec := stdprometheus.NewGaugeVec(stdprometheus.GaugeOpts{\n\t\tNamespace: config.Namespace,\n\t\tSubsystem: MetricsSubsystem,\n\t\tName: \"jailed\",\n\t\tHelp: \"jailed\",\n\t}, []string{\"validator_address\"})\n\n\tpowerVec := stdprometheus.NewGaugeVec(stdprometheus.GaugeOpts{\n\t\tNamespace: config.Namespace,\n\t\tSubsystem: MetricsSubsystem,\n\t\tName: \"power\",\n\t\tHelp: \"power\",\n\t}, []string{\"validator_address\"})\n\n\tpromutil.RegisterMetrics(bondedTokenVec, loosenTokenVec, burnedTokenVec, slashedTokenVec, jailedVec, powerVec)\n\n\treturn &Metrics{\n\t\tBondedToken: prometheus.NewGauge(bondedTokenVec),\n\t\tLoosenToken: prometheus.NewGauge(loosenTokenVec),\n\t\tBurnedToken: prometheus.NewGauge(burnedTokenVec),\n\t\tSlashedToken: prometheus.NewCounter(slashedTokenVec),\n\t\tJailed: prometheus.NewGauge(jailedVec),\n\t\tPower: prometheus.NewGauge(powerVec),\n\t}\n}", "func TestWatcher() {\n\tw, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating Watcher: %s\", err.Error())\n\t}\n\tdefer w.Close()\n\tw.Add(\"/media/peza\")\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tlog.Println(\"Starting watch...\")\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-w.Events:\n\t\t\t\tlog.Printf(\"Event: %s, %d\", ev.Name, ev.Op)\n\t\t\t\t// case err := <- w.Errors:\n\t\t\t}\n\t\t}\n\n\t}()\n\twg.Wait()\n}", "func TestUptimeCalculator(t *testing.T) {\n\n\tcalcFunc := CreateUpdateCalculationFunction(time.Now().Unix(), 10*time.Second)\n\tstart := time.Now().Unix()\n\tWaitForGracePeriod(10*time.Second, calcFunc, false)\n\tend := time.Now().Unix()\n\tdiff := end - start\n\tassert.True(t, diff >= 10)\n\n}", "func (v varExporter) WritePrometheus(w io.Writer, prefix string) {\n\tv.p.mu.Lock()\n\tprobes := make([]*Probe, 0, len(v.p.probes))\n\tfor _, probe := range v.p.probes {\n\t\tprobes = append(probes, probe)\n\t}\n\tv.p.mu.Unlock()\n\n\tsort.Slice(probes, func(i, j int) bool {\n\t\treturn probes[i].name < probes[j].name\n\t})\n\tfor _, probe := range probes {\n\t\tprobe.mu.Lock()\n\t\tkeys := make([]string, 0, len(probe.labels))\n\t\tfor k := range probe.labels {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Strings(keys)\n\t\tvar sb strings.Builder\n\t\tfmt.Fprintf(&sb, \"name=%q\", probe.name)\n\t\tfor _, k := range keys {\n\t\t\tfmt.Fprintf(&sb, \",%s=%q\", k, probe.labels[k])\n\t\t}\n\t\tlabels := sb.String()\n\n\t\tfmt.Fprintf(w, \"%s_interval_secs{%s} %f\\n\", prefix, labels, probe.interval.Seconds())\n\t\tif !probe.start.IsZero() {\n\t\t\tfmt.Fprintf(w, \"%s_start_secs{%s} %d\\n\", prefix, labels, probe.start.Unix())\n\t\t}\n\t\tif !probe.end.IsZero() {\n\t\t\tfmt.Fprintf(w, \"%s_end_secs{%s} %d\\n\", prefix, labels, probe.end.Unix())\n\t\t\t// Start is always present if end is.\n\t\t\tfmt.Fprintf(w, \"%s_latency_millis{%s} %d\\n\", prefix, labels, probe.end.Sub(probe.start).Milliseconds())\n\t\t\tif probe.result {\n\t\t\t\tfmt.Fprintf(w, \"%s_result{%s} 1\\n\", prefix, labels)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, \"%s_result{%s} 0\\n\", prefix, labels)\n\t\t\t}\n\t\t}\n\t\tprobe.mu.Unlock()\n\t}\n}", "func (srv *HealthServer) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error {\n\treturn nil\n}", "func Test_CollectMetrics21(t *testing.T) {\n\tassert := assert.New(t)\n\tconfig := getTestConfig(2, 1)\n\tconfig.DataFunc = config.GetTestData1\n\n\tres := config.CollectMetrics()\n\tfmt.Println(\"21: \", res)\n\tfmt.Println(\"21: \", []cmd.MetricData{{Name: \"m1\", Help: \"h1\", MetricType: \"gauge\", Stats: []cmd.MetricRecord{{Value: 999, Labels: []string{\"l00\"}, LabelValues: []string{\"lv00\"}}}}, {Name: \"m2\", Help: \"h2\", MetricType: \"gauge\", Stats: []cmd.MetricRecord{{Value: 999, Labels: []string{\"l10\"}, LabelValues: []string{\"lv10\"}}}}})\n\tassert.Equal(true, cmp.Equal(res, []cmd.MetricData{{Name: \"m1\", Help: \"h1\", MetricType: \"gauge\", Stats: []cmd.MetricRecord{{Value: 999, Labels: []string{\"l00\"}, LabelValues: []string{\"lv00\"}}}}, {Name: \"m2\", Help: \"h2\", MetricType: \"gauge\", Stats: []cmd.MetricRecord{{Value: 999, Labels: []string{\"l10\"}, LabelValues: []string{\"lv10\"}}}}}))\n}", "func MetricsMw(dur prometheus.ObserverVec, bp prometheus.Observer, next http.Handler) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tpromhttp.InstrumentHandlerDuration(dur.MustCurryWith(prometheus.Labels{\"endpoint\": r.URL.Path}), next).ServeHTTP(w, r)\n\t\tbp.Observe(float64(health.Len()) / float64(health.Cap()))\n\t}\n}", "func (reporter *ProgressReporter) SpecWillRun(specSummary *types.SpecSummary) {}", "func TestMetrics(t *testing.T) {\n\tctx := context.Background()\n\tvar err error\n\n\t// Check metrics-server deployment is ready.\n\terr = checkReadyDeployment(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"could not get metrics: %v\", err)\n\t}\n\n\t// Check metrics availability.\n\terr = checkMetricsAvailability(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"could not get metrics: %v\", err)\n\t}\n}", "func TestMetricSimpleSync(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\thelper.preregisterAgent(t)\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// list metrics and register agent_status\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 2)\n\n\tidAgentMain, _ := helper.state.BleemeoCredentials()\n\n\tmetrics := helper.MetricsFromAPI()\n\twant := []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"1\",\n\t\t\t\tAgentID: helper.s.agentID,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t}\n\n\tif diff := cmp.Diff(want, metrics); diff != \"\" {\n\t\tt.Errorf(\"metrics mismatch (-want +got):\\n%s\", diff)\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"cpu_system\"},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: idAgentMain},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We do 2 request: list metrics, list inactive metrics\n\t// and register new metric\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 3)\n\n\tmetrics = helper.MetricsFromAPI()\n\twant = []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"1\",\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"2\",\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: \"cpu_system\",\n\t\t},\n\t}\n\n\tif diff := cmp.Diff(want, metrics); diff != \"\" {\n\t\tt.Errorf(\"metrics mismatch (-want +got):\\n%s\", diff)\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\n\t// Register 1000 metrics\n\tfor n := 0; n < 1000; n++ {\n\t\thelper.pushPoints(t, []labels.Labels{\n\t\t\tlabels.New(\n\t\t\t\tlabels.Label{Name: types.LabelName, Value: \"metric\"},\n\t\t\t\tlabels.Label{Name: types.LabelItem, Value: strconv.FormatInt(int64(n), 10)},\n\t\t\t\tlabels.Label{Name: types.LabelMetaBleemeoItem, Value: strconv.FormatInt(int64(n), 10)},\n\t\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: idAgentMain},\n\t\t\t),\n\t\t})\n\t}\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We do 1003 request: 3 for listing and 1000 registration\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 1003)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 1002 {\n\t\tt.Errorf(\"len(metrics) = %v, want %v\", len(metrics), 1002)\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\thelper.store.DropAllMetrics()\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We do 1001 request: 1001 to mark inactive all metrics but agent_status\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 1001)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 1002 {\n\t\tt.Errorf(\"len(metrics) = %v, want %v\", len(metrics), 1002)\n\t}\n\n\tfor _, m := range metrics {\n\t\tif m.DeactivatedAt.IsZero() && m.Name != agentStatusName {\n\t\t\tt.Errorf(\"%v should be deactivated\", m)\n\n\t\t\tbreak\n\t\t} else if !m.DeactivatedAt.IsZero() && m.Name == agentStatusName {\n\t\t\tt.Errorf(\"%v should not be deactivated\", m)\n\t\t}\n\t}\n\n\thelper.AddTime(5 * time.Minute)\n\n\t// re-activate one metric + register one\n\thelper.pushPoints(t, []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"cpu_system\"},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: idAgentMain},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"disk_used\"},\n\t\t\tlabels.Label{Name: types.LabelItem, Value: \"/home\"},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoItem, Value: \"/home\"},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: idAgentMain},\n\t\t),\n\t})\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithoutFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// We do 3 request: 1 to re-enable metric,\n\t// 1 search for metric before registration, 1 to register metric\n\thelper.api.AssertCallPerResource(t, mockAPIResourceMetric, 3)\n\n\tmetrics = helper.MetricsFromAPI()\n\tif len(metrics) != 1003 {\n\t\tt.Errorf(\"len(metrics) = %v, want %v\", len(metrics), 1002)\n\t}\n\n\tfor _, m := range metrics {\n\t\tif m.Name == agentStatusName || m.Name == \"cpu_system\" || m.Name == \"disk_used\" {\n\t\t\tif !m.DeactivatedAt.IsZero() {\n\t\t\t\tt.Errorf(\"%v should be active\", m)\n\t\t\t}\n\t\t} else if m.DeactivatedAt.IsZero() {\n\t\t\tt.Errorf(\"%v should be deactivated\", m)\n\n\t\t\tbreak\n\t\t}\n\n\t\tif m.Name == \"disk_used\" {\n\t\t\tif m.Item != \"/home\" {\n\t\t\t\tt.Errorf(\"%v miss item=/home\", m)\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *StorageMock) MinimockUpdateInspect() {\n\tfor _, e := range m.UpdateMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to StorageMock.Update with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.UpdateMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterUpdateCounter) < 1 {\n\t\tif m.UpdateMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to StorageMock.Update\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to StorageMock.Update with params: %#v\", *m.UpdateMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcUpdate != nil && mm_atomic.LoadUint64(&m.afterUpdateCounter) < 1 {\n\t\tm.t.Error(\"Expected call to StorageMock.Update\")\n\t}\n}", "func (m *GatewayMock) MinimockBeforeRunInspect() {\n\tfor _, e := range m.BeforeRunMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.BeforeRun with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.BeforeRunMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterBeforeRunCounter) < 1 {\n\t\tif m.BeforeRunMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to GatewayMock.BeforeRun\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.BeforeRun with params: %#v\", *m.BeforeRunMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcBeforeRun != nil && mm_atomic.LoadUint64(&m.afterBeforeRunCounter) < 1 {\n\t\tm.t.Error(\"Expected call to GatewayMock.BeforeRun\")\n\t}\n}", "func (m *ListRepositoryMock) MinimockAddInspect() {\n\tfor _, e := range m.AddMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to ListRepositoryMock.Add with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.AddMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterAddCounter) < 1 {\n\t\tif m.AddMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to ListRepositoryMock.Add\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to ListRepositoryMock.Add with params: %#v\", *m.AddMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcAdd != nil && mm_atomic.LoadUint64(&m.afterAddCounter) < 1 {\n\t\tm.t.Error(\"Expected call to ListRepositoryMock.Add\")\n\t}\n}", "func (w *PrometheusWriter) Write(metric model.Metric) error {\n\tduration := float64(metric.Duration / time.Millisecond)\n\treason := \"\"\n\tif metric.Error != \"\" {\n\t\treason = strings.SplitN(metric.Error, \":\", 2)[0]\n\t\treason = strings.ToLower(reason)\n\t\thealthCheckStatusGauge.With(prometheus.Labels{\n\t\t\t\"name\": metric.Name,\n\t\t}).Set(0)\n\t\thealthCheckErrorCounter.With(prometheus.Labels{\n\t\t\t\"name\": metric.Name,\n\t\t\t\"reason\": reason,\n\t\t}).Inc()\n\t} else {\n\t\thealthCheckStatusGauge.With(prometheus.Labels{\n\t\t\t\"name\": metric.Name,\n\t\t}).Set(1)\n\t}\n\thealthCheckResponseTimeGauge.With(prometheus.Labels{\n\t\t\"name\": metric.Name,\n\t}).Set(duration)\n\n\treturn nil\n}", "func (m *ForkingDigesterMock) MinimockAddNextInspect() {\n\tfor _, e := range m.AddNextMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to ForkingDigesterMock.AddNext with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.AddNextMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterAddNextCounter) < 1 {\n\t\tif m.AddNextMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to ForkingDigesterMock.AddNext\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to ForkingDigesterMock.AddNext with params: %#v\", *m.AddNextMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcAddNext != nil && mm_atomic.LoadUint64(&m.afterAddNextCounter) < 1 {\n\t\tm.t.Error(\"Expected call to ForkingDigesterMock.AddNext\")\n\t}\n}", "func (m *DigestHolderMock) MinimockWriteToInspect() {\n\tfor _, e := range m.WriteToMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to DigestHolderMock.WriteTo with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.WriteToMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterWriteToCounter) < 1 {\n\t\tif m.WriteToMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to DigestHolderMock.WriteTo\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to DigestHolderMock.WriteTo with params: %#v\", *m.WriteToMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcWriteTo != nil && mm_atomic.LoadUint64(&m.afterWriteToCounter) < 1 {\n\t\tm.t.Error(\"Expected call to DigestHolderMock.WriteTo\")\n\t}\n}", "func enablePrometheusOutput(req *http.Request) bool {\n\tif format := req.URL.Query().Get(\"format\"); format == \"prometheus\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar bytes []byte\n\n\tapsc := gorillaContext.Get(r, \"apsc\").(push.Client)\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\thealthMsg := HealthStatus{\n\t\tStatus: \"ok\",\n\t}\n\n\tpwToken := gorillaContext.Get(r, \"push_worker_token\").(string)\n\tpushEnabled := gorillaContext.Get(r, \"push_enabled\").(bool)\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\n\tif pushEnabled {\n\t\t_, err := auth.GetPushWorker(pwToken, refStr)\n\t\tif err != nil {\n\t\t\thealthMsg.Status = \"warning\"\n\t\t}\n\n\t\thealthMsg.PushServers = []PushServerInfo{\n\t\t\t{\n\t\t\t\tEndpoint: apsc.Target(),\n\t\t\t\tStatus: apsc.HealthCheck(context.TODO()).Result(),\n\t\t\t},\n\t\t}\n\n\t} else {\n\t\thealthMsg.PushFunctionality = \"disabled\"\n\t}\n\n\tif bytes, err = json.MarshalIndent(healthMsg, \"\", \" \"); err != nil {\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\trespondOK(w, bytes)\n}", "func (s) TestWatchCallAnotherWatch(t *testing.T) {\n\tapiClientCh, cleanup := overrideNewAPIClient()\n\tdefer cleanup()\n\n\tclient, err := New(clientOpts(testXDSServer, false))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tdefer client.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := apiClientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"timeout when waiting for API client to be created: %v\", err)\n\t}\n\tapiClient := c.(*testAPIClient)\n\n\tclusterUpdateCh := testutils.NewChannel()\n\tfirstTime := true\n\tclient.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {\n\t\tclusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})\n\t\t// Calls another watch inline, to ensure there's deadlock.\n\t\tclient.WatchCluster(\"another-random-name\", func(ClusterUpdate, error) {})\n\n\t\tif _, err := apiClient.addWatches[ClusterResource].Receive(ctx); firstTime && err != nil {\n\t\t\tt.Fatalf(\"want new watch to start, got error %v\", err)\n\t\t}\n\t\tfirstTime = false\n\t})\n\tif _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {\n\t\tt.Fatalf(\"want new watch to start, got error %v\", err)\n\t}\n\n\twantUpdate := ClusterUpdate{ServiceName: testEDSName}\n\tclient.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate})\n\tif err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twantUpdate2 := ClusterUpdate{ServiceName: testEDSName + \"2\"}\n\tclient.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2})\n\tif err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func checkReconcileEventsOccur() {\n\t// These events are fired when the reconcile loop makes a change\n\tgomega.Eventually(func() string {\n\t\tout, err := RunKubeCtlCommand(\"describe\", \"hostpathprovisioner\", \"hostpath-provisioner\")\n\t\tgomega.Expect(err).ToNot(gomega.HaveOccurred())\n\t\treturn out\n\t}, 90*time.Second, 1*time.Second).Should(gomega.ContainSubstring(\"UpdateResourceStart\"))\n\n\tgomega.Eventually(func() string {\n\t\tout, err := RunKubeCtlCommand(\"describe\", \"hostpathprovisioner\", \"hostpath-provisioner\")\n\t\tgomega.Expect(err).ToNot(gomega.HaveOccurred())\n\t\treturn out\n\t}, 90*time.Second, 1*time.Second).Should(gomega.ContainSubstring(\"UpdateResourceSuccess\"))\n}", "func (m *ActiveNodeMock) ValidateCallCounters() {\n\n\tif !m.GetDeclaredPowerFinished() {\n\t\tm.t.Fatal(\"Expected call to ActiveNodeMock.GetDeclaredPower\")\n\t}\n\n\tif !m.GetIndexFinished() {\n\t\tm.t.Fatal(\"Expected call to ActiveNodeMock.GetIndex\")\n\t}\n\n\tif !m.GetNodeIDFinished() {\n\t\tm.t.Fatal(\"Expected call to ActiveNodeMock.GetNodeID\")\n\t}\n\n\tif !m.GetOpModeFinished() {\n\t\tm.t.Fatal(\"Expected call to ActiveNodeMock.GetOpMode\")\n\t}\n\n\tif !m.GetSignatureVerifierFinished() {\n\t\tm.t.Fatal(\"Expected call to ActiveNodeMock.GetSignatureVerifier\")\n\t}\n\n\tif !m.GetStaticFinished() {\n\t\tm.t.Fatal(\"Expected call to ActiveNodeMock.GetStatic\")\n\t}\n\n\tif !m.IsJoinerFinished() {\n\t\tm.t.Fatal(\"Expected call to ActiveNodeMock.IsJoiner\")\n\t}\n\n}", "func TestAssessRunStatusWithMixedMetrics(t *testing.T) {\n\tf := newFixture(t)\n\tdefer f.Close()\n\tc, _, _ := f.newController(noResyncPeriodFunc)\n\n\trun := v1alpha1.AnalysisRun{\n\t\tSpec: v1alpha1.AnalysisRunSpec{\n\t\t\tMetrics: []v1alpha1.Metric{\n\t\t\t\t{\n\t\t\t\t\tName: \"run-forever\",\n\t\t\t\t\tProvider: v1alpha1.MetricProvider{\n\t\t\t\t\t\tJob: &v1alpha1.JobMetric{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"success-metric\",\n\t\t\t\t\tProvider: v1alpha1.MetricProvider{\n\t\t\t\t\t\tJob: &v1alpha1.JobMetric{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDryRun: []v1alpha1.DryRun{{\n\t\t\t\tMetricName: \"success-metric\",\n\t\t\t}},\n\t\t},\n\t\tStatus: v1alpha1.AnalysisRunStatus{\n\t\t\tMetricResults: []v1alpha1.MetricResult{\n\t\t\t\t{\n\t\t\t\t\tName: \"run-forever\",\n\t\t\t\t\tInconclusive: 1,\n\t\t\t\t\tDryRun: false,\n\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseRunning,\n\t\t\t\t\tMeasurements: []v1alpha1.Measurement{{\n\t\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseRunning,\n\t\t\t\t\t\tStartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"success-metric\",\n\t\t\t\t\tCount: 1,\n\t\t\t\t\tFailed: 1,\n\t\t\t\t\tDryRun: true,\n\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseSuccessful,\n\t\t\t\t\tMeasurements: []v1alpha1.Measurement{{\n\t\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseFailed,\n\t\t\t\t\t\tStartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t\tFinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tf.provider.On(\"Run\", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseFailed), nil)\n\tf.provider.On(\"Resume\", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil)\n\n\tnewRun := c.reconcileAnalysisRun(&run)\n\tassert.Equal(t, v1alpha1.AnalysisPhaseInconclusive, newRun.Status.Phase)\n\tassert.Equal(t, \"Metric \\\"run-forever\\\" assessed Inconclusive due to inconclusive (1) > inconclusiveLimit (0)\", newRun.Status.Message)\n}", "func (pr PrometheusRecorder) RecordReqDuration(jenkinsService, operation string, code int, elapsedTime float64) {\n\treportRequestDuration(jenkinsService, operation, code, elapsedTime)\n}", "func (h *HealthImpl) Watch(in *grpc_health_v1.HealthCheckRequest, stream grpc_health_v1.Health_WatchServer) error {\n\treturn nil\n}", "func Test_Pod_Checker(t *testing.T) {\n\tworkflow := func(name string) string {\n\t\treturn workflowPath(\"pod\", name)\n\t}\n\tconst (\n\t\tadded = \"added\"\n\t\tcontainerTerminatedError = \"containerTerminatedError\"\n\t\tcontainerTerminatedSuccess = \"containerTerminatedSuccess\"\n\t\tcontainerTerminatedSuccessRestartNever = \"containerTerminatedSuccessRestartNever\"\n\t\tcreateSuccess = \"createSuccess\"\n\t\timagePullError = \"imagePullError\"\n\t\timagePullErrorResolved = \"imagePullErrorResolved\"\n\t\tscheduled = \"scheduled\"\n\t\tunready = \"unready\"\n\t\tunscheduled = \"unscheduled\"\n\t)\n\n\ttests := []struct {\n\t\tname string\n\t\trecordingPaths []string\n\t\t// TODO: optional message validator function to check returned messages\n\t\texpectReady bool\n\t}{\n\t\t{\n\t\t\tname: \"Pod added but not ready\",\n\t\t\trecordingPaths: []string{workflow(added)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod scheduled but not ready\",\n\t\t\trecordingPaths: []string{workflow(scheduled)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod create success\",\n\t\t\trecordingPaths: []string{workflow(createSuccess)},\n\t\t\texpectReady: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod image pull error\",\n\t\t\trecordingPaths: []string{workflow(imagePullError)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod create success after image pull failure resolved\",\n\t\t\trecordingPaths: []string{workflow(imagePullError), workflow(imagePullErrorResolved)},\n\t\t\texpectReady: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod unscheduled\",\n\t\t\trecordingPaths: []string{workflow(unscheduled)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod unready\",\n\t\t\trecordingPaths: []string{workflow(unready)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated with error\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedError)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated successfully\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedSuccess)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated successfully with restartPolicy: Never\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedSuccessRestartNever)},\n\t\t\texpectReady: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tchecker := NewPodChecker()\n\n\t\t\tready, messages := mustCheckIfRecordingsReady(tt.recordingPaths, checker)\n\t\t\tif ready != tt.expectReady {\n\t\t\t\tt.Errorf(\"Ready() = %t, want %t\\nMessages: %s\", ready, tt.expectReady, messages)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestUpdateNotifier(t *testing.T) {\n\tplainMsg := \"You are running an older version of Minio released \"\n\tcolorMsg := plainMsg\n\tyellow := color.New(color.FgYellow, color.Bold).SprintfFunc()\n\tif runtime.GOOS == \"windows\" {\n\t\tplainMsg += \"3 days from now\"\n\t\tcolorMsg += yellow(\"3 days from now\")\n\t} else {\n\t\tplainMsg += \"2 days from now\"\n\t\tcolorMsg += yellow(\"2 days from now\")\n\t}\n\n\tupdateMsg := colorizeUpdateMessage(minioReleaseURL, time.Duration(72*time.Hour))\n\n\tif !(strings.Contains(updateMsg, plainMsg) || strings.Contains(updateMsg, colorMsg)) {\n\t\tt.Fatal(\"Duration string not found in colorized update message\", updateMsg)\n\t}\n\n\tif !strings.Contains(updateMsg, minioReleaseURL) {\n\t\tt.Fatal(\"Update message not found in colorized update message\", minioReleaseURL)\n\t}\n}", "func (s *Service) PrometheusList(c context.Context, app, method, mType string) (ret *monitor.MoniRet, err error) {\n\tvar (\n\t\tmt = &monitor.Monitor{}\n\t\tmts = make([]*monitor.Monitor, 0)\n\t)\n\tif err = s.DB.Select(\"interface,count,cost,mtime\").Where(\"app_id = ?\", app+\"-\"+method).Group(\"mtime,interface\").Order(\"interface,mtime\").Find(&mts).Error; err != nil {\n\t\tlog.Error(\"s.PrometheusList query all error(%v)\", err)\n\t\treturn\n\t}\n\tif len(mts) < 1 {\n\t\treturn\n\t}\n\tif err = s.DB.Where(\"app_id = ?\", app+\"-\"+method).First(mt).Error; err != nil {\n\t\tlog.Error(\"s.Prometheus query first error(%v)\", err)\n\t\treturn\n\t}\n\treturn merge(s.packing(mts), s.times(mt.MTime), mType), err\n}", "func (o ArgoCDSpecPtrOutput) Prometheus() ArgoCDSpecPrometheusPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpec) *ArgoCDSpecPrometheus {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Prometheus\n\t}).(ArgoCDSpecPrometheusPtrOutput)\n}", "func (m *RecordCollectionAccessorMock) MinimockForPulseInspect() {\n\tfor _, e := range m.ForPulseMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to RecordCollectionAccessorMock.ForPulse with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.ForPulseMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterForPulseCounter) < 1 {\n\t\tif m.ForPulseMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to RecordCollectionAccessorMock.ForPulse\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to RecordCollectionAccessorMock.ForPulse with params: %#v\", *m.ForPulseMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcForPulse != nil && mm_atomic.LoadUint64(&m.afterForPulseCounter) < 1 {\n\t\tm.t.Error(\"Expected call to RecordCollectionAccessorMock.ForPulse\")\n\t}\n}", "func TestBatchOnUploadEventFailure(t *testing.T) {\n\tbatch := Batch{&mocks.FailingWriter{}}\n\terr := batch.OnUploadEvent(&spec.Measurement{\n\t\tElapsed: 1.0,\n\t})\n\tif err != mocks.ErrMocked {\n\t\tt.Fatal(\"Not the error we expected\")\n\t}\n}", "func (b *B) ReportMetric(n float64, unit string) {}", "func RegisterGinkgoAlert(test, team, contact, slack, email string, threshold int) {\n\tma := GetMetricAlerts()\n\ttestAlert := MetricAlert{\n\t\tName: test,\n\t\tTeamOwner: team,\n\t\tPrimaryContact: contact,\n\t\tSlackChannel: slack,\n\t\tEmail: email,\n\t\tFailureThreshold: threshold,\n\t}\n\tma.AddAlert(testAlert)\n}", "func (m *GatewayMock) MinimockOnPulseFromConsensusInspect() {\n\tfor _, e := range m.OnPulseFromConsensusMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.OnPulseFromConsensus with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.OnPulseFromConsensusMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterOnPulseFromConsensusCounter) < 1 {\n\t\tif m.OnPulseFromConsensusMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to GatewayMock.OnPulseFromConsensus\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to GatewayMock.OnPulseFromConsensus with params: %#v\", *m.OnPulseFromConsensusMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcOnPulseFromConsensus != nil && mm_atomic.LoadUint64(&m.afterOnPulseFromConsensusCounter) < 1 {\n\t\tm.t.Error(\"Expected call to GatewayMock.OnPulseFromConsensus\")\n\t}\n}", "func (s *Server) Watch(in *grpc_health_v1.HealthCheckRequest, server grpc_health_v1.Health_WatchServer) error {\n\tresp := &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}\n\treturn server.Send(resp)\n}", "func TestMetricsMemoryPressureResponse(t *testing.T) {\n\tvar currentMemAlloc uint64\n\tml := &memoryLimiter{\n\t\tusageChecker: memUsageChecker{\n\t\t\tmemAllocLimit: 1024,\n\t\t},\n\t\treadMemStatsFn: func(ms *runtime.MemStats) {\n\t\t\tms.Alloc = currentMemAlloc\n\t\t},\n\t\tobsrep: obsreport.NewProcessorObsReport(configtelemetry.LevelNone, \"\"),\n\t\tlogger: zap.NewNop(),\n\t}\n\tmp, err := processorhelper.NewMetricsProcessor(\n\t\t&Config{\n\t\t\tProcessorSettings: configmodels.ProcessorSettings{\n\t\t\t\tTypeVal: typeStr,\n\t\t\t\tNameVal: typeStr,\n\t\t\t},\n\t\t},\n\t\tconsumertest.NewMetricsNop(),\n\t\tml,\n\t\tprocessorhelper.WithCapabilities(processorCapabilities),\n\t\tprocessorhelper.WithShutdown(ml.shutdown))\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tmd := pdata.NewMetrics()\n\n\t// Below memAllocLimit.\n\tcurrentMemAlloc = 800\n\tml.checkMemLimits()\n\tassert.NoError(t, mp.ConsumeMetrics(ctx, md))\n\n\t// Above memAllocLimit.\n\tcurrentMemAlloc = 1800\n\tml.checkMemLimits()\n\tassert.Equal(t, errForcedDrop, mp.ConsumeMetrics(ctx, md))\n\n\t// Check ballast effect\n\tml.ballastSize = 1000\n\n\t// Below memAllocLimit accounting for ballast.\n\tcurrentMemAlloc = 800 + ml.ballastSize\n\tml.checkMemLimits()\n\tassert.NoError(t, mp.ConsumeMetrics(ctx, md))\n\n\t// Above memAllocLimit even accountiing for ballast.\n\tcurrentMemAlloc = 1800 + ml.ballastSize\n\tml.checkMemLimits()\n\tassert.Equal(t, errForcedDrop, mp.ConsumeMetrics(ctx, md))\n\n\t// Restore ballast to default.\n\tml.ballastSize = 0\n\n\t// Check spike limit\n\tml.usageChecker.memSpikeLimit = 512\n\n\t// Below memSpikeLimit.\n\tcurrentMemAlloc = 500\n\tml.checkMemLimits()\n\tassert.NoError(t, mp.ConsumeMetrics(ctx, md))\n\n\t// Above memSpikeLimit.\n\tcurrentMemAlloc = 550\n\tml.checkMemLimits()\n\tassert.Equal(t, errForcedDrop, mp.ConsumeMetrics(ctx, md))\n\n}", "func (p Prometheus) Name() string {\n\treturn \"Prometheus probe for \" + p.URL + \" [\" + p.Key + \"]\"\n}", "func PrometheusSpec() *PrometheusSpecApplyConfiguration {\n\treturn &PrometheusSpecApplyConfiguration{}\n}", "func Test_CollectMetrics01(t *testing.T) {\n\tassert := assert.New(t)\n\tconfig := getTestConfig(0, 1)\n\tconfig.DataFunc = config.GetTestData1\n\n\tres := config.CollectMetrics()\n\tassert.Nil(res)\n}", "func (*Metrics) MetricStruct() {}", "func (o StorageClusterSpecMonitoringOutput) Prometheus() StorageClusterSpecMonitoringPrometheusPtrOutput {\n\treturn o.ApplyT(func(v StorageClusterSpecMonitoring) *StorageClusterSpecMonitoringPrometheus { return v.Prometheus }).(StorageClusterSpecMonitoringPrometheusPtrOutput)\n}", "func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {\n\tlabels := []string{}\n\tfor i := 0; i < len(labelsAndValues); i += 2 {\n\t\tlabels = append(labels, labelsAndValues[i])\n\t}\n\treturn &Metrics{\n\t\tPeers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: MetricsSubsystem,\n\t\t\tName: \"peers\",\n\t\t\tHelp: \"Number of peers.\",\n\t\t}, labels).With(labelsAndValues...),\n\t\tPeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: MetricsSubsystem,\n\t\t\tName: \"peer_receive_bytes_total\",\n\t\t\tHelp: \"Number of bytes received from a given peer.\",\n\t\t}, append(labels, \"peer_id\", \"chID\")).With(labelsAndValues...),\n\t\tPeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: MetricsSubsystem,\n\t\t\tName: \"peer_send_bytes_total\",\n\t\t\tHelp: \"Number of bytes sent to a given peer.\",\n\t\t}, append(labels, \"peer_id\", \"chID\")).With(labelsAndValues...),\n\t\tPeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: MetricsSubsystem,\n\t\t\tName: \"peer_pending_send_bytes\",\n\t\t\tHelp: \"Number of pending bytes to be sent to a given peer.\",\n\t\t}, append(labels, \"peer_id\")).With(labelsAndValues...),\n\t\tNumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: MetricsSubsystem,\n\t\t\tName: \"num_txs\",\n\t\t\tHelp: \"Number of transactions submitted by each peer.\",\n\t\t}, append(labels, \"peer_id\")).With(labelsAndValues...),\n\t}\n}", "func TestTrimMeasurementHistory(t *testing.T) {\n\tf := newFixture(t)\n\tdefer f.Close()\n\tc, _, _ := f.newController(noResyncPeriodFunc)\n\n\tf.provider.On(\"GarbageCollect\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\t{\n\t\trun := newRun()\n\t\terr := c.garbageCollectMeasurements(run, map[string]*v1alpha1.MeasurementRetention{}, 2)\n\t\tassert.Nil(t, err)\n\t\tassert.Len(t, run.Status.MetricResults[0].Measurements, 1)\n\t\tassert.Equal(t, \"1\", run.Status.MetricResults[0].Measurements[0].Value)\n\t\tassert.Len(t, run.Status.MetricResults[1].Measurements, 2)\n\t\tassert.Equal(t, \"2\", run.Status.MetricResults[1].Measurements[0].Value)\n\t\tassert.Equal(t, \"3\", run.Status.MetricResults[1].Measurements[1].Value)\n\t}\n\t{\n\t\trun := newRun()\n\t\terr := c.garbageCollectMeasurements(run, map[string]*v1alpha1.MeasurementRetention{}, 1)\n\t\tassert.Nil(t, err)\n\t\tassert.Len(t, run.Status.MetricResults[0].Measurements, 1)\n\t\tassert.Equal(t, \"1\", run.Status.MetricResults[0].Measurements[0].Value)\n\t\tassert.Len(t, run.Status.MetricResults[1].Measurements, 1)\n\t\tassert.Equal(t, \"3\", run.Status.MetricResults[1].Measurements[0].Value)\n\t}\n\t{\n\t\trun := newRun()\n\t\tvar measurementRetentionMetricsMap = map[string]*v1alpha1.MeasurementRetention{}\n\t\tmeasurementRetentionMetricsMap[\"metric2\"] = &v1alpha1.MeasurementRetention{MetricName: \"*\", Limit: 2}\n\t\terr := c.garbageCollectMeasurements(run, measurementRetentionMetricsMap, 1)\n\t\tassert.Nil(t, err)\n\t\tassert.Len(t, run.Status.MetricResults[0].Measurements, 1)\n\t\tassert.Equal(t, \"1\", run.Status.MetricResults[0].Measurements[0].Value)\n\t\tassert.Len(t, run.Status.MetricResults[1].Measurements, 2)\n\t\tassert.Equal(t, \"2\", run.Status.MetricResults[1].Measurements[0].Value)\n\t\tassert.Equal(t, \"3\", run.Status.MetricResults[1].Measurements[1].Value)\n\t}\n\t{\n\t\trun := newRun()\n\t\tvar measurementRetentionMetricsMap = map[string]*v1alpha1.MeasurementRetention{}\n\t\tmeasurementRetentionMetricsMap[\"metric2\"] = &v1alpha1.MeasurementRetention{MetricName: \"metric2\", Limit: 2}\n\t\terr := c.garbageCollectMeasurements(run, measurementRetentionMetricsMap, 1)\n\t\tassert.Nil(t, err)\n\t\tassert.Len(t, run.Status.MetricResults[0].Measurements, 1)\n\t\tassert.Equal(t, \"1\", run.Status.MetricResults[0].Measurements[0].Value)\n\t\tassert.Len(t, run.Status.MetricResults[1].Measurements, 2)\n\t\tassert.Equal(t, \"2\", run.Status.MetricResults[1].Measurements[0].Value)\n\t\tassert.Equal(t, \"3\", run.Status.MetricResults[1].Measurements[1].Value)\n\t}\n}", "func (s *GRPCServer) WatchAdd(context.Context, *dashboard.WatchRequest) (*dashboard.Empty, error) {\n\tpanic(\"not implemented\")\n}", "func (m *GathererMock) MinimockGatherInspect() {\n\tfor _, e := range m.GatherMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Error(\"Expected call to GathererMock.Gather\")\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.GatherMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterGatherCounter) < 1 {\n\t\tm.t.Error(\"Expected call to GathererMock.Gather\")\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcGather != nil && mm_atomic.LoadUint64(&m.afterGatherCounter) < 1 {\n\t\tm.t.Error(\"Expected call to GathererMock.Gather\")\n\t}\n}", "func (p *Prober) NotHealthy(err error) {\n\told := atomic.SwapUint32(&p.healthy, 0)\n\n\tif old == 1 {\n\t\tlevel.Info(p.logger).Log(\"msg\", \"changing probe status\", \"status\", \"healthy\")\n\t}\n}", "func (m *MetricsProvider) WitnessAddProofVctNil(value time.Duration) {\n}", "func PrometheusMetrics(port int) error {\n\tpr := func() *httprouter.Router {\n\t\tr := httprouter.New()\n\t\tr.Handler(\"GET\", \"/metrics\", promhttp.Handler())\n\t\treturn r\n\t}\n\n\tif err := fmt.Errorf(\"%v\", http.ListenAndServe(fmt.Sprintf(\":%v\", port), LoggingHandler(pr()))); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}" ]
[ "0.64160174", "0.5866786", "0.5753714", "0.5691876", "0.5658851", "0.56017107", "0.55592465", "0.5501813", "0.5293748", "0.52713203", "0.5267254", "0.52549523", "0.5211553", "0.5194255", "0.5184001", "0.512293", "0.5121803", "0.51027983", "0.51010734", "0.5083756", "0.5078317", "0.50713855", "0.50667524", "0.5054374", "0.5053152", "0.5033388", "0.5032254", "0.5022173", "0.50092083", "0.50057715", "0.4999279", "0.4998494", "0.49946165", "0.49718723", "0.49697152", "0.49688345", "0.49574816", "0.49508932", "0.4942399", "0.49416253", "0.49274305", "0.49235094", "0.49207273", "0.4912736", "0.4912251", "0.488347", "0.48824555", "0.48579508", "0.48571855", "0.4841519", "0.48392585", "0.4832011", "0.48280546", "0.48151985", "0.48135746", "0.48110306", "0.48098853", "0.48093086", "0.48079005", "0.48029336", "0.48012626", "0.47995", "0.4793478", "0.479257", "0.4792093", "0.47907925", "0.47907165", "0.47876605", "0.4787576", "0.47845033", "0.4784421", "0.47840768", "0.47838032", "0.47726712", "0.47717696", "0.47711694", "0.4769922", "0.47698316", "0.4769686", "0.47643778", "0.47618547", "0.47613534", "0.4759178", "0.47560656", "0.4753166", "0.4750754", "0.4749585", "0.47436097", "0.4740629", "0.4736653", "0.47318935", "0.4729617", "0.47294194", "0.47271115", "0.4727008", "0.4722418", "0.47218397", "0.4715649", "0.47107974", "0.47104573" ]
0.7047378
0
Sacla mocks base method
func (m *MockmonitorInterface) Sacla(name string, num float64) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Sacla", name, num) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func mockAlwaysRun() bool { return true }", "func (m *MockisIpsecSAAction_SaHandle) isIpsecSAAction_SaHandle() {\n\tm.ctrl.Call(m, \"isIpsecSAAction_SaHandle\")\n}", "func (m *MockFileInfo) Sys() interface{} {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Sys\")\n\tret0, _ := ret[0].(interface{})\n\treturn ret0\n}", "func TestDelegatorProxyValidatorShares7Steps(t *testing.T) {\n\n}", "func mockASRockBMC() *httptest.Server {\n\thandler := http.NewServeMux()\n\thandler.HandleFunc(\"/\", index)\n\thandler.HandleFunc(\"/api/session\", session)\n\thandler.HandleFunc(\"/api/asrr/fw-info\", fwinfo)\n\thandler.HandleFunc(\"/api/fru\", fruinfo)\n\thandler.HandleFunc(\"/api/asrr/inventory_info\", inventoryinfo)\n\thandler.HandleFunc(\"/api/sensors\", sensorsinfo)\n\thandler.HandleFunc(\"/api/asrr/getbioscode\", biosPOSTCodeinfo)\n\thandler.HandleFunc(\"/api/chassis-status\", chassisStatusInfo)\n\n\t// fw update endpoints - in order of invocation\n\thandler.HandleFunc(\"/api/maintenance/flash\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/firmware\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/firmware/verification\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/firmware/upgrade\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/firmware/flash-progress\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/maintenance/reset\", bmcFirmwareUpgrade)\n\thandler.HandleFunc(\"/api/asrr/maintenance/BIOS/firmware\", biosFirmwareUpgrade)\n\n\t// user accounts endpoints\n\thandler.HandleFunc(\"/api/settings/users\", userAccountList)\n\thandler.HandleFunc(\"/api/settings/users/3\", userAccountList)\n\treturn httptest.NewTLSServer(handler)\n}", "func StartMockups() {\n\tenabledMocks = true\n}", "func (m *MockChoriaProvider) ScoutOverridesPath() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ScoutOverridesPath\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func mockChildPackages() {\n\n\t// Fake an AWS credentials file so that the mfile package will nehave as if it is happy\n\tsetFakeCredentials()\n\n\t// Fake out the creds package into using an apparently credentials response from AWS\n\tcreds.SetGetSessionTokenFunc(func(awsService *sts.STS, input *sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) {\n\t\treturn getSessionTokenOutput, nil\n\t})\n\n}", "func TestAssetSysCC_IssueToken(t *testing.T) {\n\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Test2: issueToken\")\n\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n\n\n\tres_test2 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"registerToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test2.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test2.Message))\n\t\tt.FailNow()\n\t}\n\n\tres_test3 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"issueToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test3.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\tcheckQueryInfo(t, stub, [][]byte{[]byte(\"getTokenInfo\"), []byte(\"SSToken\")})\n\n\t////query token quantity\n\t//\tres1 := stub.MockInvoke(\"2\", [][]byte{[]byte(\"getBalance\"), []byte(MAddress[:]), []byte(\"SSToken\")});\n\t//\tif res1.Status != shim.OK {\n\t//\t\tfmt.Println(\"Query failed\", string(res1.Message))\n\t//\t\tt.FailNow()\n\t//\t}\n\t//\tamount,_ := strconv.Atoi(string(res1.Payload))\n\t//\tif amount != 250 {\n\t//\t\tfmt.Printf(\"Query result error! %v\", amount )\n\t//\t\tt.FailNow()\n\t//\t}\n\n\tfmt.Println(\"Test issueToken for a registered one Success!\")\n\n\tres_test4 := stub.MockInvoke(\"2\", [][]byte{[]byte(\"issueToken\"), []byte(\"MToken\"), []byte(\"888\"), []byte(\"20\"), []byte(testAddress[:])})\n\tif res_test4.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\tcheckQueryInfo(t, stub, [][]byte{[]byte(\"getTokenInfo\"), []byte(\"MToken\")})\n\n\t////query token quantity\n\t//res2 := stub.MockInvoke(\"2\", [][]byte{[]byte(\"getBalance\"), []byte(testAddress[:]), []byte(\"CMBToken\")});\n\t//if res1.Status != shim.OK {\n\t//\tfmt.Println(\"Query failed\", string(res2.Message))\n\t//\tt.FailNow()\n\t//}\n\t//amount2,_ := strconv.Atoi(string(res2.Payload))\n\t//if amount2 != 888 {\n\t//\tfmt.Printf(\"Query result error! %v\", amount2 )\n\t//\tt.FailNow()\n\t//}\n\n\tfmt.Println(\"Test issueToken for an un registered one Success!\")\n}", "func Mock() Cluster { return mockCluster{} }", "func Mock(fake string) func() {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\torigin := backend\n\tbackend = fake\n\treturn func() { Mock(origin) }\n}", "func mockNeverRun() bool { return false }", "func TestSetGetGoodArgsFull(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetGetGoodArgsFull\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\n\t// Testing the init. It always return true. No parameters in init. \n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\"),\n\t[]byte(\"agentInfo.atype\"),[]byte(\"1.2.3.4\"),\n\t[]byte(\"agentInfo.id\"),[]byte(\"agentidentifier\"),\n\t[]byte(\"agentinfo.name\"),[]byte(\"7.8.9\"),\n\t[]byte(\"agentinfo.idp\"),[]byte(\"urn:tiani-spirit:sts\"),\n\t[]byte(\"locationInfo.id\"),[]byte(\"urn:oid:1.2.3\"),\n\t[]byte(\"locationInfo.name\"),[]byte(\"General Hospital\"),\n\t[]byte(\"locationInfo.locality\"),[]byte(\"Nashville, TN\"),\n\t[]byte(\"locationInfo.docid\"),[]byte(\"1.2.3\"),\n\t[]byte(\"action\"),[]byte(\"ex:CREATE\"),\n\t[]byte(\"date\"),[]byte(\"2017-11-21T10:29:49.816Z\"),\n\t[]byte(\"digest1\"),[]byte(\"E0nioxbCYD5AlzGWXDDDl0Gt5AAKv3ppKt4XMhE1rfo\"),\n\t[]byte(\"digest2\"),[]byte(\"xLrbWN5QJBJUAsdevfrxGlN3o0p8VZMnFFnV9iMll5o\"),\n\t[]byte(\"digest3\"),[]byte(\"THIS_IS_DIGEST_3\"),\n\t[]byte(\"digest4\"),[]byte(\"THIS_IS_DIGEST_4\")})\n\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n\tresGet := stub.MockInvoke(\"1\", [][]byte{[]byte(\"get\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\")})\n\tif resGet.Status != shim.OK {\n\t\tfmt.Println(\"Invoke failed\", string(resGet.Message))\n\t\tt.FailNow()\n\t}\n}", "func TestSqlSMSStorage_GetSMSs(t *testing.T) {\n\n}", "func (m *MockSystemContract) Init(arg0 core.Keepers, arg1 types1.BaseTx, arg2 uint64) core.SystemContract {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Init\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(core.SystemContract)\n\treturn ret0\n}", "func TestSetGoodArgsFull(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetGoodArgsFull\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\n\t// Testing the init. It always return true. No parameters in init. \n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\"),\n\t[]byte(\"agentInfo.atype\"),[]byte(\"1.2.3.4\"),\n\t[]byte(\"agentInfo.id\"),[]byte(\"agentidentifier\"),\n\t[]byte(\"agentinfo.name\"),[]byte(\"7.8.9\"),\n\t[]byte(\"agentinfo.idp\"),[]byte(\"urn:tiani-spirit:sts\"),\n\t[]byte(\"locationInfo.id\"),[]byte(\"urn:oid:1.2.3\"),\n\t[]byte(\"locationInfo.name\"),[]byte(\"General Hospital\"),\n\t[]byte(\"locationInfo.locality\"),[]byte(\"Nashville, TN\"),\n\t[]byte(\"locationInfo.docid\"),[]byte(\"1.2.3\"),\n\t[]byte(\"action\"),[]byte(\"ex:CREATE\"),\n\t[]byte(\"date\"),[]byte(\"2018-11-10T12:15:55.028Z\"),\n\t[]byte(\"digest1\"),[]byte(\"E0nioxbCYD5AlzGWXDDDl0Gt5AAKv3ppKt4XMhE1rfo\"),\n\t[]byte(\"digest3\"),[]byte(\"xLrbWN5QJBJUAsdevfrxGlN3o0p8VZMnFFnV9iMll5o\")})\n\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n}", "func TestClient_AddSAtoRole(t *testing.T) {\n\ttype testvalue = struct {\n\t\tsaname string\n\t\tproject string\n\t\troles []string\n\t\tpolicy *cloudresourcemanager.Policy\n\t\tcondition *cloudresourcemanager.Expr\n\t}\n\n\tt.Run(\"AddSAtoRole should fail because of missing mandatory arguments\", func(t *testing.T) {\n\t\t//test with empty saname, projectname, roles slice and roles members.\n\t\ttestvalues := []testvalue{\n\t\t\t{saname: \"\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"\", roles: []string{\"test_role_01\"}, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"test_project_01\", roles: nil, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"\"}, policy: nil, condition: nil},\n\t\t}\n\t\tfor _, tv := range testvalues {\n\t\t\tmockCRM := &mocks.CRM{}\n\t\t\tclient, _ := New(mockCRM)\n\n\t\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t\t//should return error\n\t\t\tif test := assert.EqualErrorf(t, err, fmt.Sprintf(\"One of mandatory method arguments saname, projectname ,role can not be empty. Got values. saname: %s projectname: %s roles: %v.\", tv.saname, tv.project, tv.roles), \"\\tnot expected: AddSAtoRole returned unexpected error or nil.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error.\")\n\t\t\t}\n\t\t\t//should return nil policy\n\t\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t\t}\n\t\t\t//should not call crmservice.GetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"GetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t\t}\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should fail because safqdn passed as saname argument\", func(t *testing.T) {\n\t\t//test with safqdn passed as saname\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01@test_project_01.iam.gserviceaccount.com\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: nil,\n\t\t\tcondition: nil,\n\t\t}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.EqualErrorf(t, err, fmt.Sprintf(\"saname argument can not be serviceaccount fqdn. Provide only name, without domain part. Got value: %s.\", tv.saname), \"\\tnot expected: AddSAtoRole returned unexpected error or nil.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error.\")\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should not call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"GetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should fail because got error when getting policy from GCP\", func(t *testing.T) {\n\t\t//test with correct arguments\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\t\tEtag: \"\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil}\n\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, errors.New(\"GetPolicy() error.\"))\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\tif test := assert.Containsf(t, err.Error(), \"When downloading policy for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t}\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should fail because got PolicyModifiedError when setting policy in GCP\", func(t *testing.T) {\n\t\t//test with correct values\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\t\tEtag: \"initial-Etag\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil}\n\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(func(string, *cloudresourcemanager.GetIamPolicyRequest) *cloudresourcemanager.Policy {\n\t\t\tif len(mockCRM.Calls) == 1 {\n\t\t\t\treturn tv.policy\n\t\t\t} else if len(mockCRM.Calls) == 2 {\n\t\t\t\treturn &cloudresourcemanager.Policy{Etag: \"different-Etag\"}\n\t\t\t}\n\t\t\treturn nil\n\t\t}, nil)\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\tif test := assert.Containsf(t, err.Error(), \"When checking if policy was modified for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t}\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should fail because got error when setting policy in GCP\", func(t *testing.T) {\n\t\t//test with correct values\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}},\n\t\t\t\tEtag: \"test-Etag\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil,\n\t\t}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(tv.policy, nil)\n\t\tmockCRM.On(\"SetPolicy\", tv.project, mock.AnythingOfType(\"*cloudresourcemanager.SetIamPolicyRequest\")).Return(nil, errors.New(\"crmservice.SetPolicy-error\"))\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole did not returned error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\tif test := assert.Containsf(t, err.Error(), \"When setting new policy for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t}\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"SetPolicy\", tv.project, mockCRM.Calls[2].Arguments.Get(1).(*cloudresourcemanager.SetIamPolicyRequest)); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t}\n\t\t//should call crmservice.SetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"SetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.SetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.SetPolicy() unexpected number of times.\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should add role without errors.\", func(t *testing.T) {\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}},\n\t\t\t\tEtag: \"test-Etag\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil,\n\t\t}\n\t\treturnpolicy := &cloudresourcemanager.Policy{\n\t\t\tAuditConfigs: nil,\n\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:test_sa_01@test_project_01.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/test_role_01\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\tEtag: \"test-Etag\",\n\t\t\tVersion: 0,\n\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\tForceSendFields: nil,\n\t\t\tNullFields: nil,\n\t\t}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(tv.policy, nil)\n\t\tmockCRM.On(\"SetPolicy\", tv.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: returnpolicy}).Return(returnpolicy, nil)\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Nil(t, err, \"\\tnot expected: AddSAtoRole returned not nil error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned nil error.\")\n\t\t}\n\t\t//should return not nil policy\n\t\tif test := assert.NotNil(t, policy, \"\\tnot expected: AddSAtoRole returned nil policy object.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned not nil policy object.\")\n\t\t}\n\t\t//should return policy of type *cloudresourcemanager.Policy\n\t\tif test := assert.IsType(t, &cloudresourcemanager.Policy{}, policy, \"\\tnotexpected: AddSAtoRole returned object not type of *cloudresourcemanager.Policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned object of expected type.\")\n\t\t}\n\t\t//should return same policy as returned by GCP API call\n\t\tif test := assert.Equal(t, returnpolicy, policy, \"\\tnot expected: AddSAtoRole returned different policy than returned by GCP API call.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned same policy as GCP API call.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy twice\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"SetPolicy\", tv.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: returnpolicy}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t}\n\t\t//should call crmservice.SetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"SetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.SetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.SetPolicy() unexpected number of times.\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should add serviceaccount to role without errors.\", func(t *testing.T) {\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"serviceAccount:some_sa@test_project_01.iam.gserviceaccount.com\"},\n\t\t\t\t\t\tRole: \"roles/test_role_01\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}},\n\t\t\t\tEtag: \"test-Etag\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil,\n\t\t}\n\t\treturnpolicy := &cloudresourcemanager.Policy{\n\t\t\tAuditConfigs: nil,\n\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:some_sa@test_project_01.iam.gserviceaccount.com\", \"serviceAccount:test_sa_01@test_project_01.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/test_role_01\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\tEtag: \"test-Etag\",\n\t\t\tVersion: 0,\n\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\tForceSendFields: nil,\n\t\t\tNullFields: nil,\n\t\t}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(tv.policy, nil)\n\t\tmockCRM.On(\"SetPolicy\", tv.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: returnpolicy}).Return(returnpolicy, nil)\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Nil(t, err, \"\\tnot expected: AddSAtoRole returned not nil error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned nil error.\")\n\t\t}\n\t\t//should return not nil policy\n\t\tif test := assert.NotNil(t, policy, \"\\tnot expected: AddSAtoRole returned nil policy object.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned not nil policy object.\")\n\t\t}\n\t\t//should return policy of type *cloudresourcemanager.Policy\n\t\tif test := assert.IsType(t, &cloudresourcemanager.Policy{}, policy, \"\\tnotexpected: AddSAtoRole returned object not type of *cloudresourcemanager.Policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned object of expected type.\")\n\t\t}\n\t\t//should return same policy as returned by GCP API call\n\t\tif test := assert.Equal(t, returnpolicy, policy, \"\\tnot expected: AddSAtoRole returned different policy than returned by GCP API call.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned same policy as GCP API call.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy twice\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"SetPolicy\", tv.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: returnpolicy}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t}\n\t\t//should call crmservice.SetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"SetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.SetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.SetPolicy() unexpected number of times.\")\n\t\t}\n\t})\n\n}", "func TestSIFTLib2(t *testing.T) { TestingT(t) }", "func TestSetGoodArgs(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetGoodArgs\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\n\t// Testing the init. It always return true. No parameters in init. \n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\"),\n\t[]byte(\"agentInfo.atype\"),[]byte(\"1.2.3.4\"),\n\t[]byte(\"agentInfo.id\"),[]byte(\"agentidentifier\"),\n\t[]byte(\"agentinfo.name\"),[]byte(\"7.8.9\"),\n\t[]byte(\"agentinfo.idp\"),[]byte(\"urn:tiani-spirit:sts\"),\n\t[]byte(\"locationInfo.id\"),[]byte(\"urn:oid:1.2.3\"),\n\t[]byte(\"locationInfo.name\"),[]byte(\"General Hospital\"),\n\t[]byte(\"locationInfo.locality\"),[]byte(\"Nashville, TN\"),\n\t[]byte(\"locationInfo.docid\"),[]byte(\"1.2.3\"),\n\t[]byte(\"action\"),[]byte(\"ex:CREATE\"),\n\t[]byte(\"date\"),[]byte(\"2018-11-10T12:15:55.028Z\")})\n\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n}", "func TestAssetSysCC_InvalidateToken(t *testing.T) {\n\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Test3: invalidateToken\")\n\n\t//fmt.Println(\"******test string to big.newInt\")\n\t//str := \"12321\"\n\t//strInt := big.NewInt(0)\n\t//strInt.SetString(str,10)\n\t//fmt.Println(strInt.String())\n\t//fmt.Println(\"*******************************\")\n\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n\n\tres_test3 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"issueToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test3.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\n\t////query token quantity\n\t//res1 := stub.MockInvoke(\"2\", [][]byte{[]byte(\"getBalance\"), []byte(MAddress[:]), []byte(\"SSToken\")});\n\t//if res1.Status != shim.OK {\n\t//\tfmt.Println(\"Query failed\", string(res1.Message))\n\t//\tt.FailNow()\n\t//}\n\t//amount,_ := strconv.Atoi(string(res1.Payload))\n\t//if amount != 250 {\n\t//\tfmt.Printf(\"Query result error! %v\", amount )\n\t//\tt.FailNow()\n\t//}\n\n\t//beging to invalidate this token\n\tcheckQueryInfo(t, stub, [][]byte{[]byte(\"getTokenInfo\"), []byte(\"SSToken\")})\n\n\ttestInvalidate := stub.MockInvoke(\"4\", [][]byte{[]byte(\"invalidateToken\"), []byte(\"SSToken\")});\n\tif testInvalidate.Status != shim.OK {\n\t\tfmt.Println(\"Query failed\", string(testInvalidate.Message))\n\t\tt.FailNow()\n\t}\n\n\tcheckQueryInfo(t, stub, [][]byte{[]byte(\"getTokenInfo\"), []byte(\"SSToken\")})\n}", "func TestClient_AddSAtoRole(t *testing.T) {\n\ttype testvalue = struct {\n\t\tsaname string\n\t\tproject string\n\t\troles []string\n\t\tpolicy *cloudresourcemanager.Policy\n\t\tcondition *cloudresourcemanager.Expr\n\t}\n\tt.Run(\"AddSAtoRole should fail because of missing mandatory arguments\", func(t *testing.T) {\n\t\t//test with empty saname, projectname, roles slice and roles members.\n\t\tvalues := []testvalue{\n\t\t\t{saname: \"\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"\", roles: []string{\"test_role_01\"}, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"test_project_01\", roles: nil, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"\"}, policy: nil, condition: nil},\n\t\t}\n\t\tfor _, value := range values {\n\t\t\tt.Logf(\"\\n\\tTesting with values:\\n\\tsaname: %s\\n\\tnproejct: %s\\n\\troles: %v\", value.saname, value.project, value.roles)\n\t\t\tmockCRM := &mocks.CRM{}\n\t\t\tclient, _ := New(mockCRM)\n\t\t\t//mockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, nil)\n\t\t\t//mockCRM.On(\"SetPolicy\", value.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: nil,}).Return(nil, nil)\n\t\t\tdefer mockCRM.AssertExpectations(t)\n\t\t\tpolicy, err := client.AddSAtoRole(value.saname, value.roles, value.project, nil)\n\t\t\t//should return error\n\t\t\tif test := assert.EqualErrorf(t, err, fmt.Sprintf(\"One of mandatory method arguments saname, projectname ,role can not be empty. Got values. saname: %s projectname: %s roles: %v.\", value.saname, value.project, value.roles), \"\\tnot expected: AddSAtoRole returned unexpected error or nil.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error.\")\n\t\t\t}\n\t\t\t//should return nil policy\n\t\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t\t}\n\t\t\t//should not call crmservice.GetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"GetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t\t}\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t\t}\n\t\t\t//client.policies should not contain project policy\n\t\t\tif test := assert.Nilf(t, client.policies[value.project], \"\\tnot expected: Client object holds policy for project: %s.\", value.project); test {\n\t\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", value.project)\n\t\t\t}\n\t\t}\n\t})\n\tt.Run(\"AddSAtoRole should fail because safqdn passed as saname argument\", func(t *testing.T) {\n\t\t//test with safqdn passed as saname\n\t\tvar (\n\t\t\tsaname = \"test_sa_01@test_project_01.iam.gserviceaccount.com\"\n\t\t\tproject = \"test_project_01\"\n\t\t\troles = []string{\"test_role_01\"}\n\t\t)\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\t//mockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, nil)\n\t\t//mockCRM.On(\"SetPolicy\", value.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: nil,}).Return(nil, nil)\n\t\tdefer mockCRM.AssertExpectations(t)\n\t\tpolicy, err := client.AddSAtoRole(saname, roles, project, nil)\n\t\t//should return error\n\t\tif test := assert.EqualErrorf(t, err, fmt.Sprintf(\"saname argument can not be serviceaccount fqdn. Provide only name, without domain part. Got value: %s.\", saname), \"\\tnot expected: AddSAtoRole returned unexpected error or nil.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error.\")\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should not call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"GetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t\t//client.policies should not contain project policy\n\t\tif test := assert.Nilf(t, client.policies[project], \"\\tnot expected: Client object holds policy for project: %s.\", project); test {\n\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", project)\n\t\t}\n\t})\n\tt.Run(\"AddSAtoRole should fail because got error when getting policy from GCP\", func(t *testing.T) {\n\t\t//test with correct arguments\n\t\tvalue := testvalue{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: &cloudresourcemanager.Policy{\n\t\t\tAuditConfigs: nil,\n\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\tCondition: nil,\n\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\tRole: \"roles/owner\",\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\tCondition: nil,\n\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t}},\n\t\t\tEtag: \"\",\n\t\t\tVersion: 0,\n\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\tForceSendFields: nil,\n\t\t\tNullFields: nil,\n\t\t}, condition: nil}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\tmockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, errors.New(\"GetPolicy() error.\"))\n\t\tdefer mockCRM.AssertExpectations(t)\n\t\tpolicy, err := client.AddSAtoRole(value.saname, value.roles, value.project, value.condition)\n\t\t//should return error\n\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\tif test := assert.Containsf(t, err.Error(), \"When downloading policy for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t}\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t\t//client.policies should not contain project policy\n\t\tif test := assert.Nilf(t, client.policies[value.project], \"\\tnot expected: Client object holds policy for project: %s.\", value.project); test {\n\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", value.project)\n\t\t}\n\t})\n\t/*\n\t\t// TODO: Can't test this as different return values are needed for different cals to crmservice.GetPolicy method. Need research how to do it.\n\t\tt.Run(\"AddSAtoRole should fail because got PolicyModifiedError when setting policy in GCP\", func(t *testing.T) {\n\t\t\t//test with correct values\n\t\t\tvalue := testvalue{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\t\tEtag: \"\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t}, condition: nil}\n\t\t\tmockCRM := &mocks.CRM{}\n\t\t\tclient, _ := New(mockCRM)\n\t\t\tmockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, &PolicyModifiedError{msg:\"test PolicyModifiedError\"})\n\t\t\t//mockCRM.On(\"SetPolicy\", value.project, &cloudresourcemanager.SetIamPolicyRequest{\n\t\t\t//\tPolicy: testvalues[1].policy,\n\t\t\t//}).Return(testvalues[1].policy, nil)\n\t\t\tdefer mockCRM.AssertExpectations(t)\n\t\t\tpolicy, err := client.AddSAtoRole(value.saname, value.roles, value.project, value.condition)\n\t\t\t//should return error\n\t\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\t\tif test := assert.Containsf(t, err.Error(), \"When checking if policy was modified for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t\t}\n\t\t\t}\n\t\t\t//should return nil policy\n\t\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t\t}\n\t\t\t//should call crmservice.GetPolicy\n\t\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t\t}\n\t\t\t//should call crmservice.GetPolicy once\n\t\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t\t}\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t\t}\n\t\t\t//client.policies should not contain project policy\n\t\t\tif test := assert.Nilf(t, client.policies[value.project], \"\\tnot expected: Client object holds policy for project: %s.\", value.project); test {\n\t\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", value.project)\n\t\t\t}\n\t\t\t//test with correct values\n\t\t\t//should return error\n\t\t\t//should return nil policy\n\t\t\t//should call crmservice.GetPolicy\n\t\t\t//should call crmservice.GetPolicy twice\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\t//client.policies should not contain project policy\n\t\t})\n\n\t\tt.Run(\"AddSAtoRole should fail because got error when setting policy in GCP\", func(t *testing.T) {\n\t\t\t//test with correct values\n\t\t\tvalue := testvalue{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\t\tEtag: \"\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t}, condition: nil}\n\t\t\tmockCRM := &mocks.CRM{}\n\t\t\tclient, _ := New(mockCRM)\n\t\t\tmockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(, nil)\n\t\t\t//mockCRM.On(\"SetPolicy\", value.project, &cloudresourcemanager.SetIamPolicyRequest{\n\t\t\t//\tPolicy: testvalues[1].policy,\n\t\t\t//}).Return(testvalues[1].policy, nil)\n\t\t\tdefer mockCRM.AssertExpectations(t)\n\t\t\tpolicy, err := client.AddSAtoRole(value.saname, value.roles, value.project, value.condition)\n\t\t\t//should return error\n\t\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\t\tif test := assert.Containsf(t, err.Error(), \"When checking if policy was modified for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t\t}\n\t\t\t}\n\t\t\t//should return nil policy\n\t\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t\t}\n\t\t\t//should call crmservice.GetPolicy\n\t\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t\t}\n\t\t\t//should call crmservice.GetPolicy once\n\t\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t\t}\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t\t}\n\t\t\t//client.policies should not contain project policy\n\t\t\tif test := assert.Nilf(t, client.policies[value.project], \"\\tnot expected: Client object holds policy for project: %s.\", value.project); test {\n\t\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", value.project)\n\t\t\t}\n\t\t\t//test with correct values\n\t\t\t//should return error\n\t\t\t//should return nil policy\n\t\t\t//should call crmservice.GetPolicy\n\t\t\t//should call crmservice.GetPolicy twice\n\t\t\t//should call crmservice.SetPolicy\n\t\t\t//should call crmservice.SetPolicy once\n\t\t\t//client.policies should not contain project policy\n\t\t})\n\t\tt.Run(\"AddSAtoRole should add serviceaccount to role without errors.\", func(t *testing.T) {\n\t\t\t//test with correct values and multiple roles\n\t\t\t//should return nil error\n\t\t\t//should return *cloudresourcemanager.Policy\n\t\t\t//should call crmservice.GetPolicy\n\t\t\t//should call crmservice.GetPolicy twice\n\t\t\t//should call crmservice.SetPolicy\n\t\t\t//should call crmservice.SetPolicy once\n\t\t\t//client.policies should contain project policy with correct binding\n\t\t\t//client.policies should contain project policy with provided member\n\t\t\t//Returned policy should be equal to the client.policies project policy\n\t\t})\n\t*/\n}", "func (m *MockBuilder) Generic() resource.ClusterSnapshot {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Generic\")\n\tret0, _ := ret[0].(resource.ClusterSnapshot)\n\treturn ret0\n}", "func TestSetupReplaceMock(t *testing.T) {\n\tt.SkipNow()\n\tstudent, mocks, err := MockCluster(false, nil, t)\n\tif err != nil {\n\t\tt.Error(\"Couldn't set up mock cluster\", err)\n\t}\n\n\t// Create a new impl for an rpc function\n\tdenyVote := func(ctx context.Context, req *RequestVoteRequest) (*RequestVoteReply, error) {\n\t\treturn &RequestVoteReply{Term: req.Term, VoteGranted: false}, nil\n\t}\n\n\t// replace the existing impl\n\tmocks[0].RequestVote = denyVote\n\tmocks[1].RequestVote = denyVote\n\n\tmocks[0].JoinCluster()\n\tmocks[1].JoinCluster()\n\n\ttime.Sleep(DefaultConfig().ElectionTimeout * 4)\n\n\tt.Log(\"Student node is:\", student.State)\n\n\tif student.State != CANDIDATE_STATE {\n\t\tt.Error(\"student state was not candidate, was:\", student.State)\n\t}\n\n\t// test as part of an rpc function\n\tmocks[0].RequestVote = func(ctx context.Context, req *RequestVoteRequest) (*RequestVoteReply, error) {\n\t\tt.Logf(\"Mock 0 recieved request vote: last_idx: %v term: %v\", req.GetLastLogIndex(), req.GetLastLogTerm())\n\t\tif req.GetLastLogIndex() != 0 || req.GetLastLogTerm() != 0 {\n\t\t\tt.Errorf(\"Student node failed to request vote correctly: last_idx: %v term: %v\", req.GetLastLogIndex(), req.GetLastLogTerm())\n\t\t}\n\n\t\tif term := student.GetCurrentTerm(); req.GetTerm() != term {\n\t\t\tt.Errorf(\"Student node sent the wrong term: (sent %v, expecting %v)\", req.GetTerm(), term)\n\t\t}\n\t\treturn denyVote(ctx, req)\n\t}\n\n\ttime.Sleep(DefaultConfig().ElectionTimeout * 5)\n}", "func (m *MockAll) SharedRules() SharedRules {\n\tret := m.ctrl.Call(m, \"SharedRules\")\n\tret0, _ := ret[0].(SharedRules)\n\treturn ret0\n}", "func (_m *MockOStream) Reset(buffer checked.Bytes) {\n\t_m.ctrl.Call(_m, \"Reset\", buffer)\n}", "func (_m *MockSeriesIteratorPool) Init() {\n\t_m.ctrl.Call(_m, \"Init\")\n}", "func (_m *mockSuite) setup(_a0 testing.T) {\n\t_m.Called(_a0)\n}", "func (m *MockSessionProvider) TS() error {\n\tret := m.ctrl.Call(m, \"TS\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockisListenResponse_Content) isListenResponse_Content() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"isListenResponse_Content\")\n}", "func (m *MockCommandScaffold) Use() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Use\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockClient) ForSubcomponent(subcomponent string) github.Client {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ForSubcomponent\", subcomponent)\n\tret0, _ := ret[0].(github.Client)\n\treturn ret0\n}", "func (m *MockManager) SerializeContentSHA(arg0 string) error {\n\tret := m.ctrl.Call(m, \"SerializeContentSHA\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestCallFunc_function(t *testing.T) {\n\n}", "func TestCallFunc_arguments(t *testing.T) {\n\n}", "func (m *MockisKey_KeyInfo) isKey_KeyInfo() {\n\tm.ctrl.Call(m, \"isKey_KeyInfo\")\n}", "func (m *MockSystemContract) Exec() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Exec\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestAssetSysCC_RegisterToken(t *testing.T) {\n\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Test1: registerToken\")\n\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n\n\tres_test3 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"registerToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test3.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\n\tfmt.Println(\"Test registerToken Success!\")\n\n}", "func (m *MockSessionRunner) Retire(arg0 protocol.ConnectionID) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Retire\", arg0)\n}", "func (s AlwaysPanicStakingMock) Slash(sdk.Context, sdk.ConsAddress, int64, int64, sdk.Dec) {\n\tpanic(\"unexpected call\")\n}", "func (m *MockLogic) SysKeeper() core.SystemKeeper {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"SysKeeper\")\n\tret0, _ := ret[0].(core.SystemKeeper)\n\treturn ret0\n}", "func TestSinglePrewrite4A(t *testing.T) {\n}", "func Test_sampe002(t *testing.T) {\n\n}", "func (m *MockStreamConnection) Dispatch(buffer buffer.IoBuffer) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Dispatch\", buffer)\n}", "func (m *MockRepositoryService) Search(arg0 http.ResponseWriter, arg1 *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Search\", arg0, arg1)\n}", "func (_m *MockAggregate) incrementVersion() {\n\t_m.Called()\n}", "func (m *MockGQUICAEAD) Open(arg0, arg1 []byte, arg2 protocol.PacketNumber, arg3 []byte) ([]byte, protocol.EncryptionLevel, error) {\n\tret := m.ctrl.Call(m, \"Open\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(protocol.EncryptionLevel)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func _getMock(url string) (content []byte, err error) {\n\tvar idnum = crc32.ChecksumIEEE([]byte(url))%uint32(5) + 1\n\tvar response = fmt.Sprintf(mockResponseTemplate, idnum, idnum, \"no message\", 200)\n\treturn []byte(response), nil\n}", "func (_m *MockAggregate) setVersion(_a0 int) {\n\t_m.Called(_a0)\n}", "func (m *MockisIpsecSAAction_SaHandle) Size() int {\n\tret := m.ctrl.Call(m, \"Size\")\n\tret0, _ := ret[0].(int)\n\treturn ret0\n}", "func (m *MockServerStreamConnection) Dispatch(buffer buffer.IoBuffer) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Dispatch\", buffer)\n}", "func TestMockIATBatchHeaderFF(t *testing.T) {\n\ttestMockIATBatchHeaderFF(t)\n}", "func (_m *MockIStream) Reset(r io.Reader) {\n\t_m.ctrl.Call(_m, \"Reset\", r)\n}", "func testInMemoryDataStore() IDataStore {\n return NewInMemoryDataStore();\n}", "func (mock *Serf) Reset() {\n\tlockSerfBootstrap.Lock()\n\tmock.calls.Bootstrap = nil\n\tlockSerfBootstrap.Unlock()\n\tlockSerfCluster.Lock()\n\tmock.calls.Cluster = nil\n\tlockSerfCluster.Unlock()\n\tlockSerfID.Lock()\n\tmock.calls.ID = nil\n\tlockSerfID.Unlock()\n\tlockSerfJoin.Lock()\n\tmock.calls.Join = nil\n\tlockSerfJoin.Unlock()\n\tlockSerfMember.Lock()\n\tmock.calls.Member = nil\n\tlockSerfMember.Unlock()\n\tlockSerfShutdown.Lock()\n\tmock.calls.Shutdown = nil\n\tlockSerfShutdown.Unlock()\n}", "func MockOnSetSystem(ctx context.Context, mockAPI *redfishMocks.RedfishAPI, systemID string,\n\tcomputerSystem redfishClient.ComputerSystem, httpResponse *http.Response, err error) {\n\trequest := redfishClient.ApiSetSystemRequest{}.ComputerSystem(computerSystem)\n\tmockAPI.On(\"SetSystem\", ctx, systemID).Return(request).Times(1)\n\tmockAPI.On(\"SetSystemExecute\", mock.Anything).Return(computerSystem, httpResponse, err).Times(1)\n}", "func (m *MockServiceRepositoryInterface) Request(id string) (model.Service, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Request\", id)\n\tret0, _ := ret[0].(model.Service)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClientStreamConnection) Dispatch(buffer buffer.IoBuffer) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Dispatch\", buffer)\n}", "func TestRestOfInternalCode(t *testing.T) {\n\n\t// In this case unit testing will not help as we need to actually corever\n\t// this package with test. Because real functions hide under internal structures\n\t// which we do not expose, so our previous approach will no longer works.\n\t// Well it works but coverage does not detect that we are testing actual\n\t// implementation\n\n\t// In order to cover this part we will need to either pretend that we are\n\t// testing something or create real integration tests and ensure that mongod\n\t// process is running. In my case I will just fake my testing and do not use\n\t// assert. This way my test will pass either way\n\n\t// Create database context. I use real database, but it is possible to mock\n\t// database and configuration through interfaces.\n\tconf := config.GetConfig()\n\tclient, _ := databases.NewClient(conf)\n\tclient.StartSession()\n\n\tdb := databases.NewDatabase(conf, client)\n\tclient.Connect()\n\tdb.Client()\n\tvar result interface{}\n\t// because we do not care for actual results, we just quickly timeout the\n\t// call and we use incorrect call method\n\ttimeoutCtx, _ := context.WithTimeout(context.Background(), 1*time.Microsecond)\n\tdb.Collection(\"non-fake-existing-collection\").FindOne(timeoutCtx, \"incorrect-value\").Decode(&result)\n\n\t// insert and delete functions seems to panic instead of returning and error.\n\t// I did not investigate anything in this case as this is not our main goal.\n\t// Just define assert panic function and use this panicing function in it.\n\tvar mongoPanics assert.PanicTestFunc\n\n\tmongoPanics = func() {\n\t\tdb.Collection(\"non-fake-existing-collection\").InsertOne(timeoutCtx, result)\n\t}\n\tassert.Panics(t, mongoPanics)\n\n\tmongoPanics = func() {\n\t\tdb.Collection(\"non-fake-existing-collection\").DeleteOne(timeoutCtx, result)\n\t}\n\tassert.Panics(t, mongoPanics)\n\n\t// And it is done. We do not need to have mongo running and our code is\n\t// covered 100%. Well the actual implementation is faked, but it should be\n\t// tested via integration tests, not unit tests.\n\n}", "func TestInternalFritzAPI(t *testing.T) {\n\ttestCases := []struct {\n\t\ttc func(t *testing.T, internal Internal)\n\t}{\n\t\t{testListLanDevices},\n\t\t{testListLogs},\n\t\t{testInetStats},\n\t\t{testBoxInfo},\n\t}\n\tfor _, testCase := range testCases {\n\t\tt.Run(fmt.Sprintf(\"Test aha api %s\", runtime.FuncForPC(reflect.ValueOf(testCase.tc).Pointer()).Name()), func(t *testing.T) {\n\t\t\tsrv := mock.New().Start()\n\t\t\tdefer srv.Close()\n\t\t\tinternal := setUpClient(t, srv)\n\t\t\tassert.NotNil(t, internal)\n\t\t\ttestCase.tc(t, internal)\n\t\t})\n\t}\n}", "func TestInvokePricerateAsset(t *testing.T) {\n\tlogger.Infof(\"Entering TestInvokePricerateAsset\")\n\n\t// Instantiate mockStub using this chaincode as the target chaincode to unit test\n\tstub := shim.NewMockStub(\"mockStub\", new(Dfarmsc))\n\tif stub == nil {\n\t\tt.Fatalf(\"MockStub creation failed\")\n\t}\n\n\tvar qualityinfos []QualityPerInfo\n\n\t// Here we perform a \"mock invoke\" to invoke the function method with associated parameters\n\t// The first parameter is the function we are invoking\n\t/*type QualityPerInfo struct {\n\t\tType string `json:\"TYPEmitempty\"`\n\t\tOpMode bool `json:\"OPMODEmitempty\"` //0 means add/more 1 means less/subscract\n\t\tPerValue float32 `json:\"PERVALUEmitempty\"`\n\t}*/\n\t/*type TableVarietyPerRateAsset struct {\n\t\tDocType string `json:\"docTypemitempty\"`\n\t\tProduceName string `json:\"PRODUCEmitempty\"`\n\t\tState string `json:\"STATEmitempty\"`\n\t\tCountry string `json:\"COUNTRYmitempty\"`\n\t\tVariety string `json:\"VARIETYmitempty\"`\n\t\tQualityPerInfos []QualityPerInfo `json:\"QUALITYPERINFOSmitempty\"`\n\t}*/\n\tqualityinfo := QualityPerInfo{\"Grade A\", false, .25}\n\tqualityinfos = append(qualityinfos, qualityinfo)\n\tperrate := TableVarietyPerRateAsset{\"\", \"Tomato\", \"NC\", \"USA\", \"Dankal\", qualityinfos}\n\tdata, _ := TableVarietyPerRateAssettoJson(perrate)\n\tlogger.Infof(\"TestInvokePricerateAsset: Data is %s\", string(data))\n\tresult := stub.MockInvoke(\"001\",\n\t\t[][]byte{[]byte(\"CreateTableVarietyPerRateAsset\"),\n\t\t\t[]byte(data)})\n\n\t// We expect a shim.ok if all goes well\n\tif result.Status != shim.OK {\n\t\tt.Fatalf(\"Expected unauthorized user error to be returned\")\n\t}\n\tlogger.Infof(\"TestInvokePricerateAsset: Create result is %s\", result)\n\tkey := TestKey{\"Dankal\", \"Tomato\", \"USA\", \"NC\"}\n\tdata, _ = json.Marshal(key)\n\tlogger.Infof(\"TestInvokePricerateAsset: Data is %s\", data)\n\tresult = stub.MockInvoke(\"001\",\n\t\t[][]byte{[]byte(\"QueryTableVarietyPerRateAsset\"),\n\t\t\t[]byte(data)})\n\n\t// We expect a shim.ok if all goes well\n\tif result.Status != shim.OK {\n\t\tt.Fatalf(\"Expected unauthorized user error to be returned\")\n\t}\n\tlogger.Infof(\"TestInvokePricerateAsset: Query Result is %s\", result)\n\n}", "func (m *MockMessageHandler) Sub(arg0 context.Context, arg1 *proto.SubRequest, arg2 *proto.Response) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Sub\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockisCryptoApiResponse_CryptoApiResp) isCryptoApiResponse_CryptoApiResp() {\n\tm.ctrl.Call(m, \"isCryptoApiResponse_CryptoApiResp\")\n}", "func newMockSubscriber() mockSubscriber {\n\treturn mockSubscriber{}\n}", "func (m *MockSaaSSystemConfigManager) Get(system, name string) (sdao.SaaSSystemConfig, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Get\", system, name)\n\tret0, _ := ret[0].(sdao.SaaSSystemConfig)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockUsecases) Call(n string) interface{} {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Call\", n)\n\tret0, _ := ret[0].(interface{})\n\treturn ret0\n}", "func (m *MockClient) Config() *sarama.Config {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Config\")\n\tret0, _ := ret[0].(*sarama.Config)\n\treturn ret0\n}", "func (m *MockAtomicLogic) SysKeeper() core.SystemKeeper {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"SysKeeper\")\n\tret0, _ := ret[0].(core.SystemKeeper)\n\treturn ret0\n}", "func mockTest0106(w http.ResponseWriter, r *http.Request) {\n\tmimetypeTable := make(map[string]string)\n\tmimetypeTable[\"txt\"] = \"text/plain\"\n\tmimetypeTable[\"jpg\"] = \"image/jpeg\"\n\tmimetypeTable[\"bin\"] = \"application/octet-stream\"\n\n\t// get query args\n\tmimetype, err := common.GetStringArgFromQuery(r, \"type\")\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\tisErrLength, err := common.GetBoolArgFromQuery(r, \"errlen\")\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\n\t// set mimetype\n\tif len(mimetype) == 0 {\n\t\tmimetype = \"txt\"\n\t}\n\tb, err := ioutil.ReadFile(fmt.Sprintf(\"testfile.%s\", mimetype))\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\n\t// set mismatch body length\n\tcontentLen := len(b)\n\tif isErrLength {\n\t\tcontentLen += 10\n\t}\n\n\tw.Header().Set(common.TextContentType, mimetypeTable[mimetype])\n\tw.Header().Set(common.TextContentLength, strconv.Itoa(contentLen))\n\tw.WriteHeader(http.StatusOK)\n\tw.(http.Flusher).Flush() // write response headers\n\n\ttime.Sleep(time.Second)\n\tif _, err := io.Copy(w, bufio.NewReader(bytes.NewReader(b))); err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t}\n}", "func (m *MockManager) UpdateVersion() {\n\tm.ctrl.Call(m, \"UpdateVersion\")\n}", "func MockOnGetSystem(ctx context.Context, mockAPI *redfishMocks.RedfishAPI,\n\tsystemID string, computerSystem redfishClient.ComputerSystem,\n\thttpResponse *http.Response, err error, times int) {\n\ttestSystemRequest := redfishClient.ApiGetSystemRequest{}\n\tcall := mockAPI.On(\"GetSystem\", ctx, systemID).Return(testSystemRequest)\n\tif times > 0 {\n\t\tcall.Times(times)\n\t}\n\tcall = mockAPI.On(\"GetSystemExecute\", testSystemRequest).Return(computerSystem, httpResponse, err)\n\tif times > 0 {\n\t\tcall.Times(times)\n\t}\n}", "func (m *MockSession) String(arg0 string) string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"String\", arg0)\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestGetVersions4A(t *testing.T) {\n}", "func TestSingleCommit4A(t *testing.T) {\n}", "func (_m *MockEncoder) Stream() xio.SegmentReader {\n\tret := _m.ctrl.Call(_m, \"Stream\")\n\tret0, _ := ret[0].(xio.SegmentReader)\n\treturn ret0\n}", "func (m *MockCerebroker) Resample() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Resample\")\n}", "func TestA(t *testing.T) {}", "func (n *mockAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgentConfig) (bool, error) {\n\treturn false, nil\n}", "func (m *MockManager) String() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"String\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockConn) Set(path string, data []byte, version int32) (*zk.Stat, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Set\", path, data, version)\n\tret0, _ := ret[0].(*zk.Stat)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockPostForkBlock) setStatus(arg0 choices.Status) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"setStatus\", arg0)\n}", "func (m *MockOnSuccess) AuditSuccess() {\n\tm.ctrl.Call(m, \"AuditSuccess\")\n}", "func (m *MockServiceDependencySet) Generic() sets.ResourceSet {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Generic\")\n\tret0, _ := ret[0].(sets.ResourceSet)\n\treturn ret0\n}", "func (m *MockDriver) Open() error { return nil }", "func (m *MockResolver) Start() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Start\")\n}", "func mockAuthenticationComponent(log *zap.Logger, serviceName string) *AuthenticationComponent {\n\thttpTimeout := 300 * time.Millisecond\n\n\treturn NewAuthenticationComponent(&AuthenticationParams{\n\t\tAuthConfig: &core_auth_sdk.Config{\n\t\t\tIssuer: issuer,\n\t\t\tPrivateBaseURL: privateBaseUrl,\n\t\t\tAudience: audience,\n\t\t\tUsername: username,\n\t\t\tPassword: password,\n\t\t\tKeychainTTL: 0,\n\t\t},\n\t\tAuthConnectionConfig: &core_auth_sdk.RetryConfig{\n\t\t\tMaxRetries: 1,\n\t\t\tMinRetryWaitTime: 200 * time.Millisecond,\n\t\t\tMaxRetryWaitTime: 300 * time.Millisecond,\n\t\t\tRequestTimeout: 500 * time.Millisecond,\n\t\t},\n\t\tLogger: log,\n\t\tOrigin: origin,\n\t}, serviceName, httpTimeout)\n}", "func MockOnResetSystem(ctx context.Context, mockAPI *redfishMocks.RedfishAPI,\n\tsystemID string, requestBody *redfishClient.ResetRequestBody, redfishErr redfishClient.RedfishError,\n\thttpResponse *http.Response, err error) {\n\trequest := redfishClient.ApiResetSystemRequest{}.ResetRequestBody(*requestBody)\n\tmockAPI.On(\"ResetSystem\", ctx, systemID).Return(request).Times(1)\n\tmockAPI.On(\"ResetSystemExecute\", mock.Anything).Return(redfishErr, httpResponse, err).Times(1)\n}", "func (m *MockResponseHandler) RedirectTo(arg0 string) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RedirectTo\", arg0)\n}", "func (_m *MockMutableSeriesIteratorsPool) Init() {\n\t_m.ctrl.Call(_m, \"Init\")\n}", "func (m *MockRoutingRuleClient) BaseClient() clients.ResourceClient {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BaseClient\")\n\tret0, _ := ret[0].(clients.ResourceClient)\n\treturn ret0\n}", "func (m *MockMappedResource) Aws() aws.Resource {\n\tret := m.ctrl.Call(m, \"Aws\")\n\tret0, _ := ret[0].(aws.Resource)\n\treturn ret0\n}", "func (m *MockManager) SerializeShipMetadata(arg0 api.ShipAppMetadata, arg1 string) error {\n\tret := m.ctrl.Call(m, \"SerializeShipMetadata\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Test_Client_MapByAlpha2(t *testing.T) {\n\tret := mockClient.MapByAlpha2(\"SG\")\n\tassert.Equal(t, ret.Name, \"Singapore\")\n}", "func MockSystemCall(std_out, std_err string, err error) {\n\tcontext.SystemCall = func(dir string, cmd string, args []string, local_out, local_err io.Writer) error {\n\t\tlocal_out.Write([]byte(std_out))\n\t\tlocal_err.Write([]byte(std_err))\n\t\treturn err\n\t}\n}", "func (m *MockKeepers) SysKeeper() core.SystemKeeper {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"SysKeeper\")\n\tret0, _ := ret[0].(core.SystemKeeper)\n\treturn ret0\n}", "func (m *MockisJobSpec_Source) isJobSpec_Source() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"isJobSpec_Source\")\n}", "func (cc *InteropCC) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\n fmt.Println(\"Invoking Mock Fabric Interop CC\")\n function, args := stub.GetFunctionAndParameters()\n if function == \"GetTotalFungibleLockedAssets\" {\n return shim.Success([]byte(strconv.Itoa(cc.fungibleAssetLockedCount[args[0]])))\n }\n caller, _ := stub.GetCreator()\n if function == \"LockAsset\" {\n assetAgreement := &common.AssetExchangeAgreement{}\n arg0, _ := base64.StdEncoding.DecodeString(args[0])\n _ = proto.Unmarshal([]byte(arg0), assetAgreement)\n key := assetAgreement.AssetType + \":\" + assetAgreement.Id\n contractId := generateSHA256HashInBase64Form(key)\n val := key + \":\" + string(caller) + \":\" + assetAgreement.Recipient\n if cc.assetLockMap[contractId] != \"\" {\n return shim.Error(fmt.Sprintf(\"Asset of type %s and ID %s is already locked\", assetAgreement.AssetType, assetAgreement.Id))\n }\n cc.assetLockMap[contractId] = val\n return shim.Success([]byte(contractId))\n }\n if function == \"LockFungibleAsset\" { // We are only going to lock once or twice in each unit test function, so bookkeeping doesn't need to be thorough\n assetAgreement := &common.FungibleAssetExchangeAgreement{}\n arg0, _ := base64.StdEncoding.DecodeString(args[0])\n _ = proto.Unmarshal([]byte(arg0), assetAgreement)\n val := assetAgreement.AssetType + \":\" + strconv.Itoa(int(assetAgreement.NumUnits)) + \":\" + string(caller) + \":\" + assetAgreement.Recipient\n contractId := generateSHA256HashInBase64Form(val)\n cc.fungibleAssetLockMap[contractId] = val\n\tif cc.fungibleAssetLockedCount[assetAgreement.AssetType] == 0 {\n\t\tcc.fungibleAssetLockedCount[assetAgreement.AssetType] = int(assetAgreement.NumUnits)\n\t} else {\n\t\tcc.fungibleAssetLockedCount[assetAgreement.AssetType] += int(assetAgreement.NumUnits)\n\t}\n return shim.Success([]byte(contractId))\n }\n if function == \"IsAssetLocked\" {\n assetAgreement := &common.AssetExchangeAgreement{}\n arg0, _ := base64.StdEncoding.DecodeString(args[0])\n _ = proto.Unmarshal([]byte(arg0), assetAgreement)\n expectedKey := assetAgreement.AssetType + \":\" + assetAgreement.Id\n contractId := generateSHA256HashInBase64Form(expectedKey)\n expectedVal := expectedKey + \":\" + assetAgreement.Locker + \":\" + assetAgreement.Recipient\n if cc.assetLockMap[contractId] == expectedVal {\n return shim.Success([]byte(\"true\"))\n } else {\n return shim.Success([]byte(\"false\"))\n }\n }\n if function == \"IsFungibleAssetLocked\" {\n contractId := args[0]\n\tif _, contractExists := cc.fungibleAssetLockMap[contractId]; contractExists {\n\t\tassetLockValSplit := strings.Split(cc.fungibleAssetLockMap[contractId], \":\")\n\t\t// caller need to be either the locker or the recipient\n\t\tif assetLockValSplit[2] != string(caller) && assetLockValSplit[3] != string(caller) {\n\t\t\treturn shim.Success([]byte(\"false\"))\n\t\t}\n\t\treturn shim.Success([]byte(\"true\"))\n\t} else {\n return shim.Success([]byte(\"false\"))\n\t}\n }\n if function == \"IsAssetLockedQueryUsingContractId\" {\n contractId := args[0]\n\tif _, contractExists := cc.assetLockMap[contractId]; contractExists {\n\t\t// caller not necessarily need to be one of either locker or recipient\n\t\treturn shim.Success([]byte(\"true\"))\n\t} else {\n return shim.Success([]byte(\"false\"))\n\t}\n }\n if function == \"UnlockAsset\" {\n assetAgreement := &common.AssetExchangeAgreement{}\n arg0, _ := base64.StdEncoding.DecodeString(args[0])\n _ = proto.Unmarshal([]byte(arg0), assetAgreement)\n expectedKey := assetAgreement.AssetType + \":\" + assetAgreement.Id\n contractId := generateSHA256HashInBase64Form(expectedKey)\n expectedVal := expectedKey + \":\" + string(caller) + \":\" + assetAgreement.Recipient\n if cc.assetLockMap[contractId] == \"\" {\n return shim.Error(fmt.Sprintf(\"No asset of type %s and ID %s is locked\", assetAgreement.AssetType, assetAgreement.Id))\n } else if cc.assetLockMap[contractId] != expectedVal {\n return shim.Error(fmt.Sprintf(\"Cannot unlock asset of type %s and ID %s as it is locked by %s for %s\", assetAgreement.AssetType, assetAgreement.Id, string(caller), assetAgreement.Recipient))\n } else {\n delete(cc.assetLockMap, contractId)\n return shim.Success(nil)\n }\n }\n if function == \"UnlockFungibleAsset\" {\n contractId := args[0]\n\tif _, contractExists := cc.fungibleAssetLockMap[contractId]; contractExists {\n\t\tassetLockValSplit := strings.Split(cc.fungibleAssetLockMap[contractId], \":\")\n\t\t// caller need to be the locker\n\t\tif assetLockValSplit[2] != string(caller) {\n\t\t\treturn shim.Error(fmt.Sprintf(\"cannot unlock fungible asset using contractId %s as caller is different from locker\", contractId))\n\t\t}\n\t\tdelete(cc.fungibleAssetLockMap, contractId)\n\t\treturn shim.Success(nil)\n\t} else {\n return shim.Error(fmt.Sprintf(\"No fungible asset is locked associated with contractId %s\", contractId))\n\t}\n }\n if function == \"UnlockAssetUsingContractId\" {\n contractId := args[0]\n\tif _, contractExists := cc.assetLockMap[contractId]; contractExists {\n\t\tassetLockValSplit := strings.Split(cc.assetLockMap[contractId], \":\")\n\t\t// caller need to be the locker\n\t\tif assetLockValSplit[2] != string(caller) {\n\t\t\treturn shim.Error(fmt.Sprintf(\"cannot unlock asset using contractId %s as caller is different from locker\", contractId))\n\t\t}\n\t\tdelete(cc.assetLockMap, contractId)\n\t\treturn shim.Success(nil)\n\t} else {\n return shim.Error(fmt.Sprintf(\"No asset is locked associated with contractId %s\", contractId))\n\t}\n }\n if function == \"ClaimAsset\" {\n assetAgreement := &common.AssetExchangeAgreement{}\n arg0, _ := base64.StdEncoding.DecodeString(args[0])\n _ = proto.Unmarshal([]byte(arg0), assetAgreement)\n expectedKey := assetAgreement.AssetType + \":\" + assetAgreement.Id\n contractId := generateSHA256HashInBase64Form(expectedKey)\n expectedVal := expectedKey + \":\" + assetAgreement.Locker + \":\" + string(caller)\n if cc.assetLockMap[contractId] == \"\" {\n return shim.Error(fmt.Sprintf(\"No asset of type %s and ID %s is locked\", assetAgreement.AssetType, assetAgreement.Id))\n } else if cc.assetLockMap[contractId] != expectedVal {\n return shim.Error(fmt.Sprintf(\"Cannot unlock asset of type %s and ID %s as it is locked by %s for %s\", assetAgreement.AssetType, assetAgreement.Id, assetAgreement.Locker, string(caller)))\n } else {\n delete(cc.assetLockMap, contractId)\n return shim.Success(nil)\n }\n }\n if function == \"ClaimFungibleAsset\" {\n contractId := args[0]\n\tif _, contractExists := cc.fungibleAssetLockMap[contractId]; contractExists {\n\t\tassetLockValSplit := strings.Split(cc.fungibleAssetLockMap[contractId], \":\")\n\t\t// caller need to be the recipient\n\t\tif assetLockValSplit[3] != string(caller) {\n\t\t\treturn shim.Error(fmt.Sprintf(\"cannot claim fungible asset using contractId %s as caller is different from recipient\", contractId))\n\t\t}\n\t\tdelete(cc.fungibleAssetLockMap, contractId)\n\t\treturn shim.Success(nil)\n\t} else {\n return shim.Error(fmt.Sprintf(\"No fungible asset is locked associated with contractId %s\", contractId))\n\t}\n }\n if function == \"ClaimAssetUsingContractId\" {\n contractId := args[0]\n\tif _, contractExists := cc.assetLockMap[contractId]; contractExists {\n\t\tassetLockValSplit := strings.Split(cc.assetLockMap[contractId], \":\")\n\t\t// caller need to be the recipient\n\t\tif assetLockValSplit[3] != string(caller) {\n\t\t\treturn shim.Error(fmt.Sprintf(\"cannot claim asset using contractId %s as caller is different from recipient\", contractId))\n\t\t}\n\t\tdelete(cc.assetLockMap, contractId)\n\t\treturn shim.Success(nil)\n\t} else {\n return shim.Error(fmt.Sprintf(\"No asset is locked associated with contractId %s\", contractId))\n\t}\n }\n if function == \"GetAllLockedAssets\" || function == \"GetAllAssetsLockedUntil\" {\n assets := []string{}\n for key, val := range cc.assetLockMap {\n assets = append(assets, key + \":\" + val)\n }\n for key, val := range cc.fungibleAssetLockMap {\n assets = append(assets, key + \":\" + val)\n }\n assetsBytes, _ := json.Marshal(assets)\n return shim.Success(assetsBytes)\n }\n if function == \"GetAllNonFungibleLockedAssets\" {\n assets := []string{}\n for key, val := range cc.assetLockMap {\n assets = append(assets, key + \":\" + val)\n }\n assetsBytes, _ := json.Marshal(assets)\n return shim.Success(assetsBytes)\n }\n if function == \"GetAllFungibleLockedAssets\" {\n assets := []string{}\n for key, val := range cc.fungibleAssetLockMap {\n assets = append(assets, key + \":\" + val)\n }\n assetsBytes, _ := json.Marshal(assets)\n return shim.Success(assetsBytes)\n }\n if function == \"GetAssetTimeToRelease\" {\n return shim.Success([]byte(strconv.Itoa(len(cc.assetLockMap))))\n }\n if function == \"GetFungibleAssetTimeToRelease\" {\n return shim.Success([]byte(strconv.Itoa(len(cc.fungibleAssetLockMap))))\n }\n if function == \"GetHTLCHash\" {\n return shim.Success([]byte(defaultHash))\n }\n if function == \"GetHTLCHashByContractId\" {\n return shim.Success([]byte(defaultHash))\n }\n if function == \"GetHTLCHashPreImage\" {\n return shim.Success([]byte(defaultPreimage))\n }\n if function == \"GetHTLCHashPreImageByContractId\" {\n return shim.Success([]byte(defaultPreimage))\n }\n return shim.Error(fmt.Sprintf(\"Invalid invoke function name: %s\", function))\n}", "func (_m *MockIteratorArrayPool) Init() {\n\t_m.ctrl.Call(_m, \"Init\")\n}", "func TestSomethingElse(t *testing.T) {\n\tmock.AddResponse(&mock.Client, \"GET\", \"/job/mycustomjobpath/api/json\", mock.Response{\n\t\tStatusCode: 200,\n\t\tBytes: nil,\n\t})\n\n\t// now do something with mock.Client\n}", "func (m *MockisCryptoApiRequest_CryptoApiReq) isCryptoApiRequest_CryptoApiReq() {\n\tm.ctrl.Call(m, \"isCryptoApiRequest_CryptoApiReq\")\n}" ]
[ "0.58445853", "0.5814883", "0.5812371", "0.5799235", "0.56438655", "0.55894667", "0.55893236", "0.558202", "0.5561989", "0.5530028", "0.5476223", "0.5469369", "0.54614514", "0.5447861", "0.54312384", "0.5419639", "0.54094994", "0.5400664", "0.5399391", "0.53771955", "0.5362037", "0.5355757", "0.5354372", "0.5307972", "0.5295227", "0.5281013", "0.525337", "0.5242458", "0.5228252", "0.52256465", "0.52111113", "0.52103287", "0.5206871", "0.51965547", "0.5189719", "0.51846516", "0.5179393", "0.5177046", "0.5174723", "0.51658213", "0.51590925", "0.5156776", "0.51553494", "0.5155303", "0.51504064", "0.51462823", "0.51455086", "0.5135826", "0.5135341", "0.51303995", "0.5127268", "0.5122263", "0.51203036", "0.51194793", "0.51188904", "0.5111346", "0.5109919", "0.5107541", "0.5104241", "0.50960475", "0.5090775", "0.5088443", "0.50867575", "0.50863427", "0.50852746", "0.50839937", "0.50828296", "0.50813645", "0.5081091", "0.5080439", "0.50794494", "0.50759584", "0.50759476", "0.5068043", "0.506605", "0.50605774", "0.505176", "0.5051517", "0.50515056", "0.50511116", "0.5050825", "0.5048797", "0.50484145", "0.50482225", "0.5047967", "0.50466734", "0.50398594", "0.50358486", "0.5032467", "0.50308585", "0.50293714", "0.5028748", "0.50211465", "0.50159925", "0.5015361", "0.500785", "0.50054586", "0.5005156", "0.5004556", "0.5003375" ]
0.5983789
0
Sacla indicates an expected call of Sacla
func (mr *MockmonitorInterfaceMockRecorder) Sacla(name, num interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sacla", reflect.TypeOf((*MockmonitorInterface)(nil).Sacla), name, num) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockmonitorInterface) Sacla(name string, num float64) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Sacla\", name, num)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (cb callBacker) cautionCall(fn_name string) error {\n\tif called, err := cb.checkCall(fn_name); called {\n\t\treturn err\n\t}\n\treturn cb.printCaution(fn_name)\n}", "func TestA(t *testing.T) {\n\tif got := A(1, 2); got != 3 {\n\t\tt.Errorf(\"want: %d; got: %d\", 3, got)\n\t}\n}", "func IndirectlyTested() string {\n\treturn \"This function is tested via a function reference rather than a direct call\"\n}", "func TestALSACommand(ctx context.Context, s *testing.State, name string) {\n\tout, err := testexec.CommandContext(ctx, name, \"-l\").CombinedOutput(testexec.DumpLogOnError)\n\tif err != nil {\n\t\ts.Fatalf(\"%s failed: %v\", name, err)\n\t}\n\tif strings.Contains(string(out), \"no soundcards found\") {\n\t\ts.Errorf(\"%s recognized no sound cards\", name)\n\t}\n}", "func TestClient_AddSAtoRole(t *testing.T) {\n\ttype testvalue = struct {\n\t\tsaname string\n\t\tproject string\n\t\troles []string\n\t\tpolicy *cloudresourcemanager.Policy\n\t\tcondition *cloudresourcemanager.Expr\n\t}\n\n\tt.Run(\"AddSAtoRole should fail because of missing mandatory arguments\", func(t *testing.T) {\n\t\t//test with empty saname, projectname, roles slice and roles members.\n\t\ttestvalues := []testvalue{\n\t\t\t{saname: \"\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"\", roles: []string{\"test_role_01\"}, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"test_project_01\", roles: nil, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"\"}, policy: nil, condition: nil},\n\t\t}\n\t\tfor _, tv := range testvalues {\n\t\t\tmockCRM := &mocks.CRM{}\n\t\t\tclient, _ := New(mockCRM)\n\n\t\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t\t//should return error\n\t\t\tif test := assert.EqualErrorf(t, err, fmt.Sprintf(\"One of mandatory method arguments saname, projectname ,role can not be empty. Got values. saname: %s projectname: %s roles: %v.\", tv.saname, tv.project, tv.roles), \"\\tnot expected: AddSAtoRole returned unexpected error or nil.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error.\")\n\t\t\t}\n\t\t\t//should return nil policy\n\t\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t\t}\n\t\t\t//should not call crmservice.GetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"GetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t\t}\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should fail because safqdn passed as saname argument\", func(t *testing.T) {\n\t\t//test with safqdn passed as saname\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01@test_project_01.iam.gserviceaccount.com\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: nil,\n\t\t\tcondition: nil,\n\t\t}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.EqualErrorf(t, err, fmt.Sprintf(\"saname argument can not be serviceaccount fqdn. Provide only name, without domain part. Got value: %s.\", tv.saname), \"\\tnot expected: AddSAtoRole returned unexpected error or nil.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error.\")\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should not call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"GetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should fail because got error when getting policy from GCP\", func(t *testing.T) {\n\t\t//test with correct arguments\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\t\tEtag: \"\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil}\n\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, errors.New(\"GetPolicy() error.\"))\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\tif test := assert.Containsf(t, err.Error(), \"When downloading policy for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t}\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should fail because got PolicyModifiedError when setting policy in GCP\", func(t *testing.T) {\n\t\t//test with correct values\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\t\tEtag: \"initial-Etag\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil}\n\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(func(string, *cloudresourcemanager.GetIamPolicyRequest) *cloudresourcemanager.Policy {\n\t\t\tif len(mockCRM.Calls) == 1 {\n\t\t\t\treturn tv.policy\n\t\t\t} else if len(mockCRM.Calls) == 2 {\n\t\t\t\treturn &cloudresourcemanager.Policy{Etag: \"different-Etag\"}\n\t\t\t}\n\t\t\treturn nil\n\t\t}, nil)\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\tif test := assert.Containsf(t, err.Error(), \"When checking if policy was modified for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t}\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should fail because got error when setting policy in GCP\", func(t *testing.T) {\n\t\t//test with correct values\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}},\n\t\t\t\tEtag: \"test-Etag\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil,\n\t\t}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(tv.policy, nil)\n\t\tmockCRM.On(\"SetPolicy\", tv.project, mock.AnythingOfType(\"*cloudresourcemanager.SetIamPolicyRequest\")).Return(nil, errors.New(\"crmservice.SetPolicy-error\"))\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole did not returned error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\tif test := assert.Containsf(t, err.Error(), \"When setting new policy for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t}\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"SetPolicy\", tv.project, mockCRM.Calls[2].Arguments.Get(1).(*cloudresourcemanager.SetIamPolicyRequest)); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t}\n\t\t//should call crmservice.SetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"SetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.SetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.SetPolicy() unexpected number of times.\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should add role without errors.\", func(t *testing.T) {\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}},\n\t\t\t\tEtag: \"test-Etag\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil,\n\t\t}\n\t\treturnpolicy := &cloudresourcemanager.Policy{\n\t\t\tAuditConfigs: nil,\n\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:test_sa_01@test_project_01.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/test_role_01\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\tEtag: \"test-Etag\",\n\t\t\tVersion: 0,\n\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\tForceSendFields: nil,\n\t\t\tNullFields: nil,\n\t\t}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(tv.policy, nil)\n\t\tmockCRM.On(\"SetPolicy\", tv.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: returnpolicy}).Return(returnpolicy, nil)\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Nil(t, err, \"\\tnot expected: AddSAtoRole returned not nil error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned nil error.\")\n\t\t}\n\t\t//should return not nil policy\n\t\tif test := assert.NotNil(t, policy, \"\\tnot expected: AddSAtoRole returned nil policy object.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned not nil policy object.\")\n\t\t}\n\t\t//should return policy of type *cloudresourcemanager.Policy\n\t\tif test := assert.IsType(t, &cloudresourcemanager.Policy{}, policy, \"\\tnotexpected: AddSAtoRole returned object not type of *cloudresourcemanager.Policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned object of expected type.\")\n\t\t}\n\t\t//should return same policy as returned by GCP API call\n\t\tif test := assert.Equal(t, returnpolicy, policy, \"\\tnot expected: AddSAtoRole returned different policy than returned by GCP API call.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned same policy as GCP API call.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy twice\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"SetPolicy\", tv.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: returnpolicy}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t}\n\t\t//should call crmservice.SetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"SetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.SetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.SetPolicy() unexpected number of times.\")\n\t\t}\n\t})\n\n\tt.Run(\"AddSAtoRole should add serviceaccount to role without errors.\", func(t *testing.T) {\n\t\ttv := testvalue{\n\t\t\tsaname: \"test_sa_01\",\n\t\t\tproject: \"test_project_01\",\n\t\t\troles: []string{\"test_role_01\"},\n\t\t\tpolicy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\t\tCondition: nil,\n\t\t\t\t\t\tMembers: []string{\"serviceAccount:some_sa@test_project_01.iam.gserviceaccount.com\"},\n\t\t\t\t\t\tRole: \"roles/test_role_01\",\n\t\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\t\tNullFields: nil,\n\t\t\t\t\t}},\n\t\t\t\tEtag: \"test-Etag\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t},\n\t\t\tcondition: nil,\n\t\t}\n\t\treturnpolicy := &cloudresourcemanager.Policy{\n\t\t\tAuditConfigs: nil,\n\t\t\tBindings: []*cloudresourcemanager.Binding{\n\t\t\t\t&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:some_sa@test_project_01.iam.gserviceaccount.com\", \"serviceAccount:test_sa_01@test_project_01.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/test_role_01\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\tEtag: \"test-Etag\",\n\t\t\tVersion: 0,\n\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\tForceSendFields: nil,\n\t\t\tNullFields: nil,\n\t\t}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\tmockCRM.On(\"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(tv.policy, nil)\n\t\tmockCRM.On(\"SetPolicy\", tv.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: returnpolicy}).Return(returnpolicy, nil)\n\t\tdefer mockCRM.AssertExpectations(t)\n\n\t\tpolicy, err := client.AddSAtoRole(tv.saname, tv.roles, tv.project, tv.condition)\n\t\t//should return error\n\t\tif test := assert.Nil(t, err, \"\\tnot expected: AddSAtoRole returned not nil error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned nil error.\")\n\t\t}\n\t\t//should return not nil policy\n\t\tif test := assert.NotNil(t, policy, \"\\tnot expected: AddSAtoRole returned nil policy object.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned not nil policy object.\")\n\t\t}\n\t\t//should return policy of type *cloudresourcemanager.Policy\n\t\tif test := assert.IsType(t, &cloudresourcemanager.Policy{}, policy, \"\\tnotexpected: AddSAtoRole returned object not type of *cloudresourcemanager.Policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned object of expected type.\")\n\t\t}\n\t\t//should return same policy as returned by GCP API call\n\t\tif test := assert.Equal(t, returnpolicy, policy, \"\\tnot expected: AddSAtoRole returned different policy than returned by GCP API call.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned same policy as GCP API call.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", tv.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy twice\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"SetPolicy\", tv.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: returnpolicy}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t}\n\t\t//should call crmservice.SetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"SetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.SetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.SetPolicy() unexpected number of times.\")\n\t\t}\n\t})\n\n}", "func TestSignContractSuccess(t *testing.T) {\n\tsignatureHelper(t, false)\n}", "func Succeeded(expected proto.Message) Check {\n\treturn func(t *testing.T, stdout string, err error) {\n\t\tt.Helper()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"expected no error but actual %v\", err)\n\t\t}\n\n\t\ts, err := printer.ProtoString(expected)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif diff := cmp.Diff(s+\"\\n\", stdout); len(diff) != 0 {\n\t\t\tt.Error(diff)\n\t\t}\n\t}\n}", "func callAndVerify(msg string, client pb.GreeterClient, shouldFail bool) error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\t_, err := client.SayHello(ctx, &pb.HelloRequest{Name: msg})\n\tif want, got := shouldFail == true, err != nil; got != want {\n\t\treturn fmt.Errorf(\"want and got mismatch, want shouldFail=%v, got fail=%v, rpc error: %v\", want, got, err)\n\t}\n\treturn nil\n}", "func TestClient_AddSAtoRole(t *testing.T) {\n\ttype testvalue = struct {\n\t\tsaname string\n\t\tproject string\n\t\troles []string\n\t\tpolicy *cloudresourcemanager.Policy\n\t\tcondition *cloudresourcemanager.Expr\n\t}\n\tt.Run(\"AddSAtoRole should fail because of missing mandatory arguments\", func(t *testing.T) {\n\t\t//test with empty saname, projectname, roles slice and roles members.\n\t\tvalues := []testvalue{\n\t\t\t{saname: \"\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"\", roles: []string{\"test_role_01\"}, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"test_project_01\", roles: nil, policy: nil, condition: nil},\n\t\t\t{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"\"}, policy: nil, condition: nil},\n\t\t}\n\t\tfor _, value := range values {\n\t\t\tt.Logf(\"\\n\\tTesting with values:\\n\\tsaname: %s\\n\\tnproejct: %s\\n\\troles: %v\", value.saname, value.project, value.roles)\n\t\t\tmockCRM := &mocks.CRM{}\n\t\t\tclient, _ := New(mockCRM)\n\t\t\t//mockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, nil)\n\t\t\t//mockCRM.On(\"SetPolicy\", value.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: nil,}).Return(nil, nil)\n\t\t\tdefer mockCRM.AssertExpectations(t)\n\t\t\tpolicy, err := client.AddSAtoRole(value.saname, value.roles, value.project, nil)\n\t\t\t//should return error\n\t\t\tif test := assert.EqualErrorf(t, err, fmt.Sprintf(\"One of mandatory method arguments saname, projectname ,role can not be empty. Got values. saname: %s projectname: %s roles: %v.\", value.saname, value.project, value.roles), \"\\tnot expected: AddSAtoRole returned unexpected error or nil.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error.\")\n\t\t\t}\n\t\t\t//should return nil policy\n\t\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t\t}\n\t\t\t//should not call crmservice.GetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"GetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t\t}\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t\t}\n\t\t\t//client.policies should not contain project policy\n\t\t\tif test := assert.Nilf(t, client.policies[value.project], \"\\tnot expected: Client object holds policy for project: %s.\", value.project); test {\n\t\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", value.project)\n\t\t\t}\n\t\t}\n\t})\n\tt.Run(\"AddSAtoRole should fail because safqdn passed as saname argument\", func(t *testing.T) {\n\t\t//test with safqdn passed as saname\n\t\tvar (\n\t\t\tsaname = \"test_sa_01@test_project_01.iam.gserviceaccount.com\"\n\t\t\tproject = \"test_project_01\"\n\t\t\troles = []string{\"test_role_01\"}\n\t\t)\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\t//mockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, nil)\n\t\t//mockCRM.On(\"SetPolicy\", value.project, &cloudresourcemanager.SetIamPolicyRequest{Policy: nil,}).Return(nil, nil)\n\t\tdefer mockCRM.AssertExpectations(t)\n\t\tpolicy, err := client.AddSAtoRole(saname, roles, project, nil)\n\t\t//should return error\n\t\tif test := assert.EqualErrorf(t, err, fmt.Sprintf(\"saname argument can not be serviceaccount fqdn. Provide only name, without domain part. Got value: %s.\", saname), \"\\tnot expected: AddSAtoRole returned unexpected error or nil.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error.\")\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should not call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"GetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t\t//client.policies should not contain project policy\n\t\tif test := assert.Nilf(t, client.policies[project], \"\\tnot expected: Client object holds policy for project: %s.\", project); test {\n\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", project)\n\t\t}\n\t})\n\tt.Run(\"AddSAtoRole should fail because got error when getting policy from GCP\", func(t *testing.T) {\n\t\t//test with correct arguments\n\t\tvalue := testvalue{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: &cloudresourcemanager.Policy{\n\t\t\tAuditConfigs: nil,\n\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\tCondition: nil,\n\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\tRole: \"roles/owner\",\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\tCondition: nil,\n\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t}},\n\t\t\tEtag: \"\",\n\t\t\tVersion: 0,\n\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\tForceSendFields: nil,\n\t\t\tNullFields: nil,\n\t\t}, condition: nil}\n\t\tmockCRM := &mocks.CRM{}\n\t\tclient, _ := New(mockCRM)\n\t\tmockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, errors.New(\"GetPolicy() error.\"))\n\t\tdefer mockCRM.AssertExpectations(t)\n\t\tpolicy, err := client.AddSAtoRole(value.saname, value.roles, value.project, value.condition)\n\t\t//should return error\n\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\tif test := assert.Containsf(t, err.Error(), \"When downloading policy for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t}\n\t\t}\n\t\t//should return nil policy\n\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t}\n\t\t//should call crmservice.GetPolicy\n\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t}\n\t\t//should call crmservice.GetPolicy once\n\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 1); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t}\n\t\t//should not call crmservice.SetPolicy\n\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t} else {\n\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t}\n\t\t//client.policies should not contain project policy\n\t\tif test := assert.Nilf(t, client.policies[value.project], \"\\tnot expected: Client object holds policy for project: %s.\", value.project); test {\n\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", value.project)\n\t\t}\n\t})\n\t/*\n\t\t// TODO: Can't test this as different return values are needed for different cals to crmservice.GetPolicy method. Need research how to do it.\n\t\tt.Run(\"AddSAtoRole should fail because got PolicyModifiedError when setting policy in GCP\", func(t *testing.T) {\n\t\t\t//test with correct values\n\t\t\tvalue := testvalue{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\t\tEtag: \"\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t}, condition: nil}\n\t\t\tmockCRM := &mocks.CRM{}\n\t\t\tclient, _ := New(mockCRM)\n\t\t\tmockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(nil, &PolicyModifiedError{msg:\"test PolicyModifiedError\"})\n\t\t\t//mockCRM.On(\"SetPolicy\", value.project, &cloudresourcemanager.SetIamPolicyRequest{\n\t\t\t//\tPolicy: testvalues[1].policy,\n\t\t\t//}).Return(testvalues[1].policy, nil)\n\t\t\tdefer mockCRM.AssertExpectations(t)\n\t\t\tpolicy, err := client.AddSAtoRole(value.saname, value.roles, value.project, value.condition)\n\t\t\t//should return error\n\t\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\t\tif test := assert.Containsf(t, err.Error(), \"When checking if policy was modified for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t\t}\n\t\t\t}\n\t\t\t//should return nil policy\n\t\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t\t}\n\t\t\t//should call crmservice.GetPolicy\n\t\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t\t}\n\t\t\t//should call crmservice.GetPolicy once\n\t\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t\t}\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t\t}\n\t\t\t//client.policies should not contain project policy\n\t\t\tif test := assert.Nilf(t, client.policies[value.project], \"\\tnot expected: Client object holds policy for project: %s.\", value.project); test {\n\t\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", value.project)\n\t\t\t}\n\t\t\t//test with correct values\n\t\t\t//should return error\n\t\t\t//should return nil policy\n\t\t\t//should call crmservice.GetPolicy\n\t\t\t//should call crmservice.GetPolicy twice\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\t//client.policies should not contain project policy\n\t\t})\n\n\t\tt.Run(\"AddSAtoRole should fail because got error when setting policy in GCP\", func(t *testing.T) {\n\t\t\t//test with correct values\n\t\t\tvalue := testvalue{saname: \"test_sa_01\", project: \"test_project_01\", roles: []string{\"test_role_01\"}, policy: &cloudresourcemanager.Policy{\n\t\t\t\tAuditConfigs: nil,\n\t\t\t\tBindings: []*cloudresourcemanager.Binding{&cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"group:[email protected]\", \"serviceAccount:some_sa@test_project.iam.gserviceaccount.com\"},\n\t\t\t\t\tRole: \"roles/owner\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}, &cloudresourcemanager.Binding{\n\t\t\t\t\tCondition: nil,\n\t\t\t\t\tMembers: []string{\"serviceAccount:[email protected]\", \"user:[email protected]\"},\n\t\t\t\t\tRole: \"roles/compute.admin\",\n\t\t\t\t\tForceSendFields: nil,\n\t\t\t\t\tNullFields: nil,\n\t\t\t\t}},\n\t\t\t\tEtag: \"\",\n\t\t\t\tVersion: 0,\n\t\t\t\tServerResponse: googleapi.ServerResponse{},\n\t\t\t\tForceSendFields: nil,\n\t\t\t\tNullFields: nil,\n\t\t\t}, condition: nil}\n\t\t\tmockCRM := &mocks.CRM{}\n\t\t\tclient, _ := New(mockCRM)\n\t\t\tmockCRM.On(\"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}).Return(, nil)\n\t\t\t//mockCRM.On(\"SetPolicy\", value.project, &cloudresourcemanager.SetIamPolicyRequest{\n\t\t\t//\tPolicy: testvalues[1].policy,\n\t\t\t//}).Return(testvalues[1].policy, nil)\n\t\t\tdefer mockCRM.AssertExpectations(t)\n\t\t\tpolicy, err := client.AddSAtoRole(value.saname, value.roles, value.project, value.condition)\n\t\t\t//should return error\n\t\t\tif test := assert.Errorf(t, err, \"\\tnot expected: AddSAtoRole do not returned error.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned error.\")\n\t\t\t\tif test := assert.Containsf(t, err.Error(), \"When checking if policy was modified for\", \"\\tnot expected: AddSAtoRole() returned unexpected error message.\"); test {\n\t\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() returned expected error message.\")\n\t\t\t\t}\n\t\t\t}\n\t\t\t//should return nil policy\n\t\t\tif test := assert.Nil(t, policy, \"\\tnotexpected: AddSAtoRole returned not nil policy.\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole returned nil policy.\")\n\t\t\t}\n\t\t\t//should call crmservice.GetPolicy\n\t\t\tif test := mockCRM.AssertCalled(t, \"GetPolicy\", value.project, &cloudresourcemanager.GetIamPolicyRequest{}); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did call crmservice.GetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did not call crmservice.GetPolicy().\")\n\t\t\t}\n\t\t\t//should call crmservice.GetPolicy once\n\t\t\tif test := mockCRM.AssertNumberOfCalls(t, \"GetPolicy\", 2); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() called crmservice.GetPolicy() expected number of times.\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() called crmservice.GetPolicy() unexpected number of times.\")\n\t\t\t}\n\t\t\t//should not call crmservice.SetPolicy\n\t\t\tif test := mockCRM.AssertNotCalled(t, \"SetPolicy\"); test {\n\t\t\t\tt.Log(\"\\texpected: AddSAtoRole() did not call crmservice.SetPolicy().\")\n\t\t\t} else {\n\t\t\t\tt.Log(\"\\tnot expected: AddSAtoRole() did call crmservice.SetPolicy().\")\n\t\t\t}\n\t\t\t//client.policies should not contain project policy\n\t\t\tif test := assert.Nilf(t, client.policies[value.project], \"\\tnot expected: Client object holds policy for project: %s.\", value.project); test {\n\t\t\t\tt.Logf(\"\\texpected: Client object do not contain policy for project: %s.\", value.project)\n\t\t\t}\n\t\t\t//test with correct values\n\t\t\t//should return error\n\t\t\t//should return nil policy\n\t\t\t//should call crmservice.GetPolicy\n\t\t\t//should call crmservice.GetPolicy twice\n\t\t\t//should call crmservice.SetPolicy\n\t\t\t//should call crmservice.SetPolicy once\n\t\t\t//client.policies should not contain project policy\n\t\t})\n\t\tt.Run(\"AddSAtoRole should add serviceaccount to role without errors.\", func(t *testing.T) {\n\t\t\t//test with correct values and multiple roles\n\t\t\t//should return nil error\n\t\t\t//should return *cloudresourcemanager.Policy\n\t\t\t//should call crmservice.GetPolicy\n\t\t\t//should call crmservice.GetPolicy twice\n\t\t\t//should call crmservice.SetPolicy\n\t\t\t//should call crmservice.SetPolicy once\n\t\t\t//client.policies should contain project policy with correct binding\n\t\t\t//client.policies should contain project policy with provided member\n\t\t\t//Returned policy should be equal to the client.policies project policy\n\t\t})\n\t*/\n}", "func TestGetActionsPassing(t *testing.T) {\n\tif 1 == 2 {\n\t\tt.Error(\"Something is seriously wrong\")\n\t}\n}", "func TestSingleStageSuccess(t *testing.T) {\n\thtlcOutpoint := wire.OutPoint{Index: 3}\n\n\tsweepTx := &wire.MsgTx{\n\t\tTxIn: []*wire.TxIn{{}},\n\t\tTxOut: []*wire.TxOut{{}},\n\t}\n\n\t// singleStageResolution is a resolution for a htlc on the remote\n\t// party's commitment.\n\tsingleStageResolution := lnwallet.IncomingHtlcResolution{\n\t\tSweepSignDesc: testSignDesc,\n\t\tClaimOutpoint: htlcOutpoint,\n\t}\n\n\t// We send a confirmation for our sweep tx to indicate that our sweep\n\t// succeeded.\n\tresolve := func(ctx *htlcSuccessResolverTestContext) {\n\t\tctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{\n\t\t\tTx: ctx.resolver.sweepTx,\n\t\t\tBlockHeight: testInitialBlockHeight - 1,\n\t\t}\n\t}\n\n\tsweepTxid := sweepTx.TxHash()\n\tclaim := &channeldb.ResolverReport{\n\t\tOutPoint: htlcOutpoint,\n\t\tAmount: btcutil.Amount(testSignDesc.Output.Value),\n\t\tResolverType: channeldb.ResolverTypeIncomingHtlc,\n\t\tResolverOutcome: channeldb.ResolverOutcomeClaimed,\n\t\tSpendTxID: &sweepTxid,\n\t}\n\ttestHtlcSuccess(\n\t\tt, singleStageResolution, resolve, sweepTx, claim,\n\t)\n}", "func (_obj *LacService) Test(_opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\tctx := context.Background()\n\terr = _obj.s.Tars_invoke(ctx, 0, \"test\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_obj.setMap(len(_opt), _resp, _context, _status)\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func VUCOMISS_SAE(x, x1 operand.Op) { ctx.VUCOMISS_SAE(x, x1) }", "func TestAckInstalledApplicationListDuplicateRegression(t *testing.T) {\n\n}", "func Expected(s string) {\n\tError(s + \" Expected\")\n}", "func TestSignContractFailure(t *testing.T) {\n\tsignatureHelper(t, true)\n}", "func VCOMISS_SAE(x, x1 operand.Op) { ctx.VCOMISS_SAE(x, x1) }", "func sequenceCallerShouldRetry(err error) bool {\n\tvar kind *errMaybeCensorship\n\tbelongs := errors.As(err, &kind)\n\treturn belongs\n}", "func test(i Instruction, ls *LuaState) {\n\ta, _, c := i.ABC()\n\ta += 1\n\n\tif luaToBoolean(ls, a) != (c != 0) {\n\t\tls.addPC(1)\n\t}\n}", "func checkMisbehaviour(cdc codec.BinaryMarshaler, clientState ClientState, soloMisbehaviour *Misbehaviour) error {\n\tpubKey := clientState.ConsensusState.GetPubKey()\n\n\tdata, err := MisbehaviourSignBytes(\n\t\tcdc,\n\t\tsoloMisbehaviour.Sequence, clientState.ConsensusState.Timestamp,\n\t\tclientState.ConsensusState.Diversifier,\n\t\tsoloMisbehaviour.SignatureOne.Data,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsigData, err := UnmarshalSignatureData(cdc, soloMisbehaviour.SignatureOne.Signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check first signature\n\tif err := VerifySignature(pubKey, data, sigData); err != nil {\n\t\treturn sdkerrors.Wrap(err, \"misbehaviour signature one failed to be verified\")\n\t}\n\n\tdata, err = MisbehaviourSignBytes(\n\t\tcdc,\n\t\tsoloMisbehaviour.Sequence, clientState.ConsensusState.Timestamp,\n\t\tclientState.ConsensusState.Diversifier,\n\t\tsoloMisbehaviour.SignatureTwo.Data,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsigData, err = UnmarshalSignatureData(cdc, soloMisbehaviour.SignatureTwo.Signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check second signature\n\tif err := VerifySignature(pubKey, data, sigData); err != nil {\n\t\treturn sdkerrors.Wrap(err, \"misbehaviour signature two failed to be verified\")\n\t}\n\n\treturn nil\n}", "func (err *RuleNotSatisfied) RuleNotSatisfied() {}", "func OK(t *testing.T, ok bool, messages ...string) {\n\t_ok(t, ok, callerLine(1), messages...)\n}", "func (s AlwaysPanicStakingMock) Slash(sdk.Context, sdk.ConsAddress, int64, int64, sdk.Dec) {\n\tpanic(\"unexpected call\")\n}", "func Expected(s string) {\n\tAbort(s + \" Expected\")\n}", "func TestSagaError(t *testing.T) {\n\t// create logger for tracing to help debug\n\tloggerForTx := trace.NewSimpleLogger()\n\n\t// create storage (\n\tstorageForTx := memory.NewLogStorage()\n\t// kafka storage could look like this:\n\t//storageForTx, err := kafka.New(\"2.7.0\", []string{\"10.9.2.7\"})\n\t//if err != nil {\n\t//\tt.Fatal(err)\n\t//}\n\n\t// create saga and register subTx definitions with it\n\tsagaForTx := NewWithLogger(loggerForTx)\n\tif err := sagaForTx.AddSubTx(\"debit\", debit, debitCompensate); err != nil {\n\t\tlog.Println(\"Failed to add Debit SubTx to Saga\")\n\t\tt.Fatal(err)\n\t}\n\n\tif err := sagaForTx.AddSubTx(\"credit\", creditError, creditCompensate); err != nil {\n\t\tlog.Println(\"Failed to add Credit SubTx to Saga\")\n\t\tt.Fatal(err)\n\t}\n\n\t// create a new transaction\n\treadyTx := tx.NewWithLogger(context.Background(), sagaForTx, storageForTx, \"transfer-100-from-sam-to-pam\", loggerForTx)\n\tdefer func() {\n\t\tif err := readyTx.End(); err != nil {\n\t\t\tlog.Println(\"Failed to close transaction.\", err.Error())\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tif err := readyTx.ExecSubTx(\"debit\", 100, \"sam\"); err != nil {\n\t\treadyTx.RollbackWithInfiniteTries()\n\t\tt.Fatal(err)\n\t}\n\tif err := readyTx.ExecSubTx(\"credit\", 100, \"pam\"); err != nil {\n\t\treadyTx.RollbackWithInfiniteTries()\n\t\tt.Log(\"This is expected to rollback \" + err.Error())\n\t}\n}", "func TestCartoonUse(t *testing.T) {\n\tConvey(\"TestCartoonUse \", t, func() {\n\t\tvar (\n\t\t\terr error\n\t\t\tmid int64 = 1\n\t\t\tct int8 = 2\n\t\t\tcount int64 = 3\n\t\t\torderNo = \"test0059\"\n\t\t\tremake = \"我是传奇5\"\n\t\t\ttips = \"共2话\"\n\t\t\tver int64 = 1\n\t\t\ttoken string\n\t\t\tret int8\n\t\t)\n\t\tret, token, err = s.CartoonUse(c, mid, orderNo, ct, ver, remake, tips, count)\n\t\tt.Logf(\"token(%s)\", token)\n\t\tt.Logf(\"ret(%d)\", ret)\n\t\tSo(err, ShouldBeNil)\n\t})\n}", "func (s *HelpersS) TestAssertSucceedWithExpected(c *gocheck.C) {\n checker := &MyChecker{}\n testHelperSuccess(c, \"Assert(1, checker, 2)\", nil, func() interface{} {\n c.Assert(1, checker, 2)\n return nil\n })\n if checker.obtained != 1 || checker.expected != 2 {\n c.Fatalf(\"Bad (obtained, expected) values for check (%d, %d)\",\n checker.obtained, checker.expected)\n }\n}", "func RequireSuccess(rt runtime.Runtime, e exitcode.ExitCode, msg string, args ...interface{}) {\n\tif !e.IsSuccess() {\n\t\trt.Abortf(e, msg, args...)\n\t}\n}", "func (a *assertEncapsulation) Success(err error, statusCode int, message string, args ...interface{}) {\n\tif e := ok_err(err, statusCode, message, args...); e != nil {\n\t\ta.throw(e)\n\t}\n}", "func TestGetNone4A(t *testing.T) {\n}", "func TestSetWrongArgsNoAgentInfo(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetWrongArgsNoAgentInfo\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\n\t// Testing the init. It always return true. No parameters in init. \n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\"),\n\t\n\t[]byte(\"action\"),[]byte(\"ex:CREATE\"),\n\t[]byte(\"date\"),[]byte(\"2018-11-10T12:15:55.028Z\")})\n\n\tif res.Status != shim.ERROR {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n}", "func (cb callBacker) mustCall(fn_name string) error {\n\treturn cb.Scripter.EraCall(fn_name)\n}", "func (t *SuiteSuite) VerifyMethodsWrongSignatureSkipped1(x int) {\n\tt.Fatalf(\"This should never run.\")\n}", "func TestIsSolved(t *testing.T) {\n\tlevel := NewLevel(5, 3, NewPlayer(NewPosition(1, 1), North), &[][]Tile{}, &[]*Position{})\n\n\tgot := level.IsSolved()\n\tif got == false {\n\t\tt.Errorf(\"solved %t; wanted: true\", got)\n\t}\n}", "func desiredAssertionStatus0(frame *rtda.Frame) {\n\tframe.OperandStack().PushBoolean(false)\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\t//fmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\tlog.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func TestMyCheat(t *testing.T) {\n\ta, b := // YOUR CODE HERE\n\n\n\t//Try to cheat verifier:\n\tproofForth := // YOUR CODE HERE\n\n\tv := gt.VerifyForth(a, b, proofForth)\n\n\tif !v {\n\t\tt.Fatal(\"My cheat does not work\")\n\t}\n}", "func TestAMIHasSymmetry(t *testing.T) {\n\tforwardseq := utils.RandSeq(rand.Intn(math.MaxInt16))\n\treverseseq := utils.ReverseSeq(forwardseq)\n\tforwardprofile := NewProfile(&forwardseq)\n\treverseprofile := NewProfile(&reverseseq)\n\n\tif !checkMatchingLengths(forwardprofile, reverseprofile) {\n\t\tt.Error(\"Forward and reverse profiles had non-matching lengths.\")\n\t}\n\tif !checkMatchingKs(forwardprofile, reverseprofile) {\n\t\tt.Error(\"Forward and reverse profiles had non-matching K values.\")\n\t}\n\tif !checkMatchingIks(forwardprofile, reverseprofile) {\n\t\tt.Error(\"Forward and reverse profiles had non-matching Ik values.\")\n\t}\n}", "func Test_sampe002(t *testing.T) {\n\n}", "func MapperErrorSuit(t *testing.T, mapper productinfo.NetworkPerfMapper) {\n\t// todo design meaningful suite(s) for testing mapper implementations\n\t// todo eg: testing all the categories for a given impl, errors, etc ...\n}", "func checkIsUnitTestScio(args ...interface{}) (bool, error) {\n\treturn false, nil\n\t//TODO BEAM-13702\n}", "func TestSyntaxErr(t *testing.T) {\n\tqc := start_ds()\n\n\tr, _, err := Run(qc, \"this is a bad query\", Namespace_FS)\n\tif err == nil || len(r) != 0 {\n\t\tt.Errorf(\"expected err\")\n\t}\n\tr, _, err = Run(qc, \"\", Namespace_FS) // empty string query\n\tif err == nil || len(r) != 0 {\n\t\tt.Errorf(\"expected err\")\n\t}\n\n\tqccs := start_cs()\n\n\trcs, _, errcs := Run(qccs, \"this is a bad query\", Namespace_CBS)\n\tif errcs == nil || len(rcs) != 0 {\n\t\tt.Errorf(\"expected err\")\n\t}\n\trcs, _, errcs = Run(qccs, \"\", Namespace_CBS) // empty string query\n\tif errcs == nil || len(rcs) != 0 {\n\t\tt.Errorf(\"expected err\")\n\t}\n\n}", "func TestCallFunc_arguments(t *testing.T) {\n\n}", "func TestTransitionSystem_IsActionDeterministic1(t *testing.T) {\n\tts1 := GetSimpleTS2()\n\n\tif ts1.IsActionDeterministic() {\n\t\tt.Errorf(\"Test is given a transition system that is not action deterministic, but function claims that it is!\")\n\t}\n}", "func runSuccess(ctx context.Context, d *rpcdut.RPCDUT, param soundCardInitTestParams) error {\n\t// Poll for sound_card_init run time file being updated, which means sound_card_init completes running.\n\tif err := verifySoundCardInitFinished(ctx, d, param.SoundCardID); err != nil {\n\t\treturn errors.Wrap(err, \"failed to wait for sound_card_init completion\")\n\t}\n\treturn nil\n}", "func (t *SimpleChaincode) myCaSecurity(stub shim.ChaincodeStubInterface,args []string,name string) pb.Response {\n\tvar a string // Entities\n\t//var x int // Transaction value\n\tvar err error\n\t\n\tif len(args) !=1 {\n\t\treturn shim.Error(\"Incorrect number of hjuh args. Expecting 1\")\n\t}\n\ta = name//args[0]\n\taSub := args[0]\n\t// Get the state from the ledger\n\taBytes, err := stub.GetState(a+\"-ca-\"+aSub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tif aBytes == nil {\n\t\treturn shim.Error(\"you are not subscribed to this security\")\n\t}\n\t\n\treturn shim.Success(aBytes)\n}", "func TestTransitionSystem_IsAPDeterministic1(t *testing.T) {\n\tts1 := GetSimpleTS2()\n\n\tif !ts1.IsAPDeterministic() {\n\t\tt.Errorf(\"Test is given a transition system that is AP-deterministic, but function claims that it is not!\")\n\t}\n}", "func TestSBSConflicts(t *testing.T) {\n\ttest(t,\n\t\tusers(\"alice\", \"bob\", \"charlie\"),\n\t\tinPrivateTlf(\"alice,bob,charlie@twitter\"),\n\t\tas(alice,\n\t\t\tmkfile(\"alice1.txt\", \"hello bob & charlie\"),\n\t\t),\n\t\tas(bob,\n\t\t\tread(\"alice1.txt\", \"hello bob & charlie\"),\n\t\t),\n\t\tas(charlie,\n\t\t\texpectError(initRoot(), \"charlie does not have read access to directory /keybase/private/alice,bob,charlie@twitter\"),\n\t\t\tnoSync(),\n\t\t),\n\n\t\tinPrivateTlf(\"alice,bob@twitter,charlie@twitter\"),\n\t\tas(alice,\n\t\t\tmkfile(\"alice2.txt\", \"hello bob & charlie\"),\n\t\t),\n\t\tas(bob,\n\t\t\texpectError(initRoot(), \"bob does not have read access to directory /keybase/private/alice,bob@twitter,charlie@twitter\"),\n\t\t\tnoSync(),\n\t\t),\n\t\tas(charlie,\n\t\t\texpectError(initRoot(), \"charlie does not have read access to directory /keybase/private/alice,bob@twitter,charlie@twitter\"),\n\t\t\tnoSync(),\n\t\t),\n\n\t\tinPrivateTlf(\"alice,bob,charlie\"),\n\t\tas(alice,\n\t\t\tmkfile(\"alice3.txt\", \"hello bob & charlie\"),\n\t\t),\n\t\tas(bob,\n\t\t\tread(\"alice3.txt\", \"hello bob & charlie\"),\n\t\t),\n\t\tas(charlie,\n\t\t\tread(\"alice3.txt\", \"hello bob & charlie\"),\n\t\t),\n\n\t\taddNewAssertion(\"bob\", \"bob@twitter\"),\n\t\taddNewAssertion(\"charlie\", \"charlie@twitter\"),\n\t\tas(alice,\n\t\t\t// TODO: Ideally, we wouldn't have to do this,\n\t\t\t// and we'd just wait for a rekey.\n\t\t\trekey(),\n\t\t),\n\n\t\t// TODO: Test that alice's favorites are updated.\n\n\t\t// TODO: Test that the three folders are resolved with\n\t\t// conflict markers. This will require changes to\n\t\t// MDServerLocal.\n\t)\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func TestAssetSysCC_IssueToken(t *testing.T) {\n\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Test2: issueToken\")\n\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n\n\n\tres_test2 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"registerToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test2.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test2.Message))\n\t\tt.FailNow()\n\t}\n\n\tres_test3 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"issueToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test3.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\tcheckQueryInfo(t, stub, [][]byte{[]byte(\"getTokenInfo\"), []byte(\"SSToken\")})\n\n\t////query token quantity\n\t//\tres1 := stub.MockInvoke(\"2\", [][]byte{[]byte(\"getBalance\"), []byte(MAddress[:]), []byte(\"SSToken\")});\n\t//\tif res1.Status != shim.OK {\n\t//\t\tfmt.Println(\"Query failed\", string(res1.Message))\n\t//\t\tt.FailNow()\n\t//\t}\n\t//\tamount,_ := strconv.Atoi(string(res1.Payload))\n\t//\tif amount != 250 {\n\t//\t\tfmt.Printf(\"Query result error! %v\", amount )\n\t//\t\tt.FailNow()\n\t//\t}\n\n\tfmt.Println(\"Test issueToken for a registered one Success!\")\n\n\tres_test4 := stub.MockInvoke(\"2\", [][]byte{[]byte(\"issueToken\"), []byte(\"MToken\"), []byte(\"888\"), []byte(\"20\"), []byte(testAddress[:])})\n\tif res_test4.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\tcheckQueryInfo(t, stub, [][]byte{[]byte(\"getTokenInfo\"), []byte(\"MToken\")})\n\n\t////query token quantity\n\t//res2 := stub.MockInvoke(\"2\", [][]byte{[]byte(\"getBalance\"), []byte(testAddress[:]), []byte(\"CMBToken\")});\n\t//if res1.Status != shim.OK {\n\t//\tfmt.Println(\"Query failed\", string(res2.Message))\n\t//\tt.FailNow()\n\t//}\n\t//amount2,_ := strconv.Atoi(string(res2.Payload))\n\t//if amount2 != 888 {\n\t//\tfmt.Printf(\"Query result error! %v\", amount2 )\n\t//\tt.FailNow()\n\t//}\n\n\tfmt.Println(\"Test issueToken for an un registered one Success!\")\n}", "func Test_step2a(t *testing.T) {\n\ttestCases := []romance.StepTestCase{\n\t\t{\"épanoUit\", 2, 4, 3, true, \"épanoU\", 2, 4, 3},\n\t\t{\"faillirent\", 4, 7, 2, true, \"faill\", 4, 5, 2},\n\t\t{\"acabit\", 2, 4, 3, true, \"acab\", 2, 4, 3},\n\t\t{\"établissait\", 2, 4, 3, true, \"établ\", 2, 4, 3},\n\t\t{\"découvrir\", 3, 6, 2, true, \"découvr\", 3, 6, 2},\n\t\t{\"réjoUissait\", 3, 5, 2, true, \"réjoU\", 3, 5, 2},\n\t\t{\"trahiront\", 4, 6, 3, true, \"trah\", 4, 4, 3},\n\t\t{\"maintenir\", 4, 7, 2, true, \"mainten\", 4, 7, 2},\n\t\t{\"vendit\", 3, 6, 2, true, \"vend\", 3, 4, 2},\n\t\t{\"repartit\", 3, 5, 2, true, \"repart\", 3, 5, 2},\n\t\t{\"giletti\", 3, 5, 2, true, \"gilett\", 3, 5, 2},\n\t\t{\"rienzi\", 4, 6, 2, true, \"rienz\", 4, 5, 2},\n\t\t{\"punie\", 3, 5, 2, true, \"pun\", 3, 3, 2},\n\t\t{\"accueillir\", 2, 7, 4, true, \"accueill\", 2, 7, 4},\n\t\t{\"rétablit\", 3, 5, 2, true, \"rétabl\", 3, 5, 2},\n\t\t{\"ravis\", 3, 5, 2, true, \"rav\", 3, 3, 2},\n\t\t{\"xviIi\", 4, 5, 3, true, \"xviI\", 4, 4, 3},\n\t\t{\"blottie\", 4, 7, 3, true, \"blott\", 4, 5, 3},\n\t\t{\"approfondie\", 2, 6, 5, true, \"approfond\", 2, 6, 5},\n\t\t{\"infirmerie\", 2, 5, 4, true, \"infirmer\", 2, 5, 4},\n\t\t{\"scotti\", 4, 6, 3, true, \"scott\", 4, 5, 3},\n\t\t{\"adoucissait\", 2, 5, 3, true, \"adouc\", 2, 5, 3},\n\t\t{\"finissait\", 3, 5, 2, true, \"fin\", 3, 3, 2},\n\t\t{\"promit\", 4, 6, 3, true, \"prom\", 4, 4, 3},\n\t\t{\"franchies\", 4, 9, 3, true, \"franch\", 4, 6, 3},\n\t\t{\"franchissant\", 4, 8, 3, true, \"franch\", 4, 6, 3},\n\t\t{\"micheli\", 3, 6, 2, true, \"michel\", 3, 6, 2},\n\t\t{\"éteignit\", 2, 5, 3, true, \"éteign\", 2, 5, 3},\n\t\t{\"puni\", 3, 4, 2, true, \"pun\", 3, 3, 2},\n\t\t{\"apoplexie\", 2, 4, 3, true, \"apoplex\", 2, 4, 3},\n\t\t{\"désira\", 3, 5, 2, true, \"dés\", 3, 3, 2},\n\t\t{\"étourdi\", 2, 5, 3, true, \"étourd\", 2, 5, 3},\n\t\t{\"giovanni\", 4, 6, 2, true, \"giovann\", 4, 6, 2},\n\t\t{\"apprécie\", 2, 6, 5, true, \"appréc\", 2, 6, 5},\n\t\t{\"poésies\", 4, 7, 2, true, \"poés\", 4, 4, 2},\n\t\t{\"pairie\", 4, 6, 2, true, \"pair\", 4, 4, 2},\n\t\t{\"sortit\", 3, 6, 2, true, \"sort\", 3, 4, 2},\n\t\t{\"subi\", 3, 4, 2, true, \"sub\", 3, 3, 2},\n\t\t{\"aigrirait\", 3, 6, 3, true, \"aigr\", 3, 4, 3},\n\t\t{\"assailli\", 2, 6, 4, true, \"assaill\", 2, 6, 4},\n\t\t{\"bertolotti\", 3, 6, 2, true, \"bertolott\", 3, 6, 2},\n\t\t{\"recouvrir\", 3, 6, 2, true, \"recouvr\", 3, 6, 2},\n\t\t{\"visconti\", 3, 6, 2, true, \"viscont\", 3, 6, 2},\n\t\t{\"surgir\", 3, 6, 2, true, \"surg\", 3, 4, 2},\n\t\t{\"remercie\", 3, 5, 2, true, \"remerc\", 3, 5, 2},\n\t\t{\"joUissaIent\", 3, 5, 2, true, \"joU\", 3, 3, 2},\n\t\t{\"bondissant\", 3, 6, 2, true, \"bond\", 3, 4, 2},\n\t\t{\"saisi\", 4, 5, 2, true, \"sais\", 4, 4, 2},\n\t\t{\"missouri\", 3, 7, 2, true, \"missour\", 3, 7, 2},\n\t\t{\"remplirent\", 3, 7, 2, true, \"rempl\", 3, 5, 2},\n\t\t{\"envahi\", 2, 5, 4, true, \"envah\", 2, 5, 4},\n\t\t{\"tandis\", 3, 6, 2, true, \"tand\", 3, 4, 2},\n\t\t{\"trahit\", 4, 6, 3, true, \"trah\", 4, 4, 3},\n\t\t{\"trahissaIent\", 4, 6, 3, true, \"trah\", 4, 4, 3},\n\t\t{\"réunie\", 4, 6, 2, true, \"réun\", 4, 4, 2},\n\t\t{\"avarie\", 2, 4, 3, true, \"avar\", 2, 4, 3},\n\t\t{\"dilettanti\", 3, 5, 2, true, \"dilettant\", 3, 5, 2},\n\t\t{\"raidie\", 4, 6, 2, true, \"raid\", 4, 4, 2},\n\t\t{\"écuries\", 2, 4, 3, true, \"écur\", 2, 4, 3},\n\t\t{\"recouvrît\", 3, 6, 2, true, \"recouvr\", 3, 6, 2},\n\t\t{\"parsis\", 3, 6, 3, true, \"pars\", 3, 4, 3},\n\t\t{\"monti\", 3, 5, 2, true, \"mont\", 3, 4, 2},\n\t\t{\"reproduisit\", 3, 6, 2, true, \"reproduis\", 3, 6, 2},\n\t\t{\"étendit\", 2, 4, 3, true, \"étend\", 2, 4, 3},\n\t\t{\"suffi\", 3, 5, 2, true, \"suff\", 3, 4, 2},\n\t\t{\"pillaji\", 3, 6, 2, true, \"pillaj\", 3, 6, 2},\n\t\t{\"rougir\", 4, 6, 2, true, \"roug\", 4, 4, 2},\n\t\t{\"désirez\", 3, 5, 2, true, \"dés\", 3, 3, 2},\n\t\t{\"subit\", 3, 5, 2, true, \"sub\", 3, 3, 2},\n\t\t{\"fondirent\", 3, 6, 2, true, \"fond\", 3, 4, 2},\n\t\t{\"coqUineries\", 3, 6, 2, true, \"coqUiner\", 3, 6, 2},\n\t\t{\"venir\", 3, 5, 2, true, \"ven\", 3, 3, 2},\n\t\t{\"plaidoirie\", 5, 8, 3, true, \"plaidoir\", 5, 8, 3},\n\t\t{\"fournissant\", 4, 7, 2, true, \"fourn\", 4, 5, 2},\n\t\t{\"bonzeries\", 3, 6, 2, true, \"bonzer\", 3, 6, 2},\n\t\t{\"flétri\", 4, 6, 3, true, \"flétr\", 4, 5, 3},\n\t\t{\"faillit\", 4, 7, 2, true, \"faill\", 4, 5, 2},\n\t\t{\"hardie\", 3, 6, 2, true, \"hard\", 3, 4, 2},\n\t\t{\"compagnie\", 3, 6, 2, true, \"compagn\", 3, 6, 2},\n\t\t{\"vernis\", 3, 6, 2, true, \"vern\", 3, 4, 2},\n\t\t{\"attendit\", 2, 5, 4, true, \"attend\", 2, 5, 4},\n\t\t{\"blanchies\", 4, 9, 3, true, \"blanch\", 4, 6, 3},\n\t\t{\"choisie\", 5, 7, 3, true, \"chois\", 5, 5, 3},\n\t\t{\"rafraîchir\", 3, 7, 2, true, \"rafraîch\", 3, 7, 2},\n\t\t{\"choisir\", 5, 7, 3, true, \"chois\", 5, 5, 3},\n\t\t{\"nourrisse\", 4, 7, 2, true, \"nourr\", 4, 5, 2},\n\t\t{\"chancellerie\", 4, 7, 3, true, \"chanceller\", 4, 7, 3},\n\t\t{\"repartie\", 3, 5, 2, true, \"repart\", 3, 5, 2},\n\t\t{\"redira\", 3, 5, 2, true, \"red\", 3, 3, 2},\n\t\t{\"sentira\", 3, 6, 2, true, \"sent\", 3, 4, 2},\n\t\t{\"surgirait\", 3, 6, 2, true, \"surg\", 3, 4, 2},\n\t\t{\"cani\", 3, 4, 2, true, \"can\", 3, 3, 2},\n\t\t{\"gratis\", 4, 6, 3, true, \"grat\", 4, 4, 3},\n\t\t{\"médît\", 3, 5, 2, true, \"méd\", 3, 3, 2},\n\t\t{\"avertis\", 2, 4, 3, true, \"avert\", 2, 4, 3},\n\t\t{\"chirurgie\", 4, 6, 3, true, \"chirurg\", 4, 6, 3},\n\t\t{\"ironie\", 2, 4, 3, true, \"iron\", 2, 4, 3},\n\t\t{\"punîtes\", 3, 5, 2, true, \"pun\", 3, 3, 2},\n\t\t{\"compromis\", 3, 7, 2, true, \"comprom\", 3, 7, 2},\n\t\t{\"simonie\", 3, 5, 2, true, \"simon\", 3, 5, 2},\n\t}\n\tromance.RunStepTest(t, step2a, testCases)\n}", "func ok(tb testing.TB, err error) {\n\ttb.Helper()\n\tif err != nil {\n\t\ttb.Fatalf(\"\\033[31m unexpected error: %s\\033[39m\\n\\n\", err.Error())\n\t}\n}", "func testValidateIATBHStandardEntryClassCode(t testing.TB) {\n\tbh := mockIATBatchHeaderFF()\n\tbh.StandardEntryClassCode = \"ABC\"\n\terr := bh.Validate()\n\tif !base.Match(err, ErrSECCode) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func testHtlcSuccess(t *testing.T, resolution lnwallet.IncomingHtlcResolution,\n\tresolve func(*htlcSuccessResolverTestContext),\n\tsweepTx *wire.MsgTx, reports ...*channeldb.ResolverReport) {\n\n\tdefer timeout(t)()\n\n\tctx := newHtlcSuccessResolverTextContext(t)\n\n\t// Replace our checkpoint with one which will push reports into a\n\t// channel for us to consume. We replace this function on the resolver\n\t// itself because it is created by the test context.\n\treportChan := make(chan *channeldb.ResolverReport)\n\tctx.resolver.Checkpoint = func(_ ContractResolver,\n\t\treports ...*channeldb.ResolverReport) er.R {\n\n\t\t// Send all of our reports into the channel.\n\t\tfor _, report := range reports {\n\t\t\treportChan <- report\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tctx.resolver.htlcResolution = resolution\n\n\t// We set the sweepTx to be non-nil and mark the output as already\n\t// incubating so that we do not need to set test values for crafting\n\t// our own sweep transaction.\n\tctx.resolver.sweepTx = sweepTx\n\tctx.resolver.outputIncubating = true\n\n\t// Start the htlc success resolver.\n\tctx.resolve()\n\n\t// Trigger and event that will resolve our test context.\n\tresolve(ctx)\n\n\tfor _, report := range reports {\n\t\tassertResolverReport(t, reportChan, report)\n\t}\n\n\t// Wait for the resolver to fully complete.\n\tctx.waitForResult()\n}", "func (i *InvariantsChecker) assertInitWasCalled() bool {\n\tif i.initStatus != colexecop.OperatorInitialized {\n\t\tif c, ok := i.Input.(*Columnarizer); ok {\n\t\t\tif c.removedFromFlow {\n\t\t\t\t// This is a special case in which we allow for the operator to\n\t\t\t\t// not be initialized. Next and DrainMeta calls are noops in\n\t\t\t\t// this case, so the caller should short-circuit.\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tcolexecerror.InternalError(errors.AssertionFailedf(\"Init hasn't been called, input is %T\", i.Input))\n\t}\n\treturn false\n}", "func (ma *FakeActor) GoodCall(ctx exec.VMContext) (uint8, error) {\n\tfastore := &FakeActorStorage{}\n\t_, err := WithState(ctx, fastore, func() (interface{}, error) {\n\t\tfastore.Changed = true\n\t\treturn nil, nil\n\t})\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn 0, nil\n}", "func TestMacroSubsErrCases(t *testing.T) {\n\tbid1, bid2 := openrtb.Bid{Id: \"bid1\"}, openrtb.Bid{Id: \"bid2\"}\n\tseatBidWith2Bids := openrtb.SeatBid{\n\t\tSeat: \"seatbidWith2Bids\",\n\t\tBids: []*openrtb.Bid{&bid1, &bid2},\n\t}\n\tbidResWith2Bids := &openrtb.BidResponse{\n\t\tId: \"bidResWith2Bids\",\n\t\tSeatBids: []*openrtb.SeatBid{&seatBidWith2Bids},\n\t}\n\tseatBid1 := openrtb.SeatBid{\n\t\tSeat: \"seatbid1\",\n\t\tBids: []*openrtb.Bid{&bid1},\n\t}\n\tseatBid2 := openrtb.SeatBid{\n\t\tSeat: \"seatbid2\",\n\t\tBids: []*openrtb.Bid{&bid2},\n\t}\n\tbidResWith2SeatBids := &openrtb.BidResponse{\n\t\tId: \"bidResWith2Seatbids\",\n\t\tSeatBids: []*openrtb.SeatBid{&seatBid1, &seatBid2},\n\t}\n\tresult, err := openrtb.MacroSubs(\"${AUCTION_ID}${AUCTION_BID_ID}\", bidResWith2Bids, testAuctionResult)\n\tif err != openrtb.ErrIncorrectBidCount {\n\t\tt.Error(\"bidResWith2Bids should give ErrIncorrectBidCount\")\n\t}\n\tif result != \"\" {\n\t\tt.Error(\"MacroSubs should empty string when there is an error\")\n\t}\n\tresult, err = openrtb.MacroSubs(\"${AUCTION_ID}${AUCTION_SEAT_ID}\", bidResWith2SeatBids, testAuctionResult)\n\tif err != openrtb.ErrIncorrectSeatCount {\n\t\tt.Error(\"bidResWith2SeatBids should give ErrIncorrectSeatCount\")\n\t}\n\tif result != \"\" {\n\t\tt.Error(\"MacroSubs should empty string when there is an error\")\n\t}\n}", "func Ok(tb testing.TB, err error) {\r\n\tif err != nil {\r\n\t\t_, file, line, _ := runtime.Caller(1)\r\n\t\tfmt.Printf(\"%s:%d: unexpected error: %s\\n\\n\", filepath.Base(file), line, err.Error())\r\n\t\ttb.FailNow()\r\n\t}\r\n}", "func TestEmptyPrewrite4A(t *testing.T) {\n}", "func testHelperSuccess(c *gocheck.C, name string,\n expectedResult interface{},\n closure func() interface{}) {\n var result interface{}\n defer (func() {\n if err := recover(); err != nil {\n panic(err)\n }\n checkState(c, result,\n &expectedState{\n name: name,\n result: expectedResult,\n failed: false,\n log: \"\",\n })\n })()\n result = closure()\n}", "func VGETMANTPS_SAE(ops ...operand.Op) { ctx.VGETMANTPS_SAE(ops...) }", "func (this *commonResult) Fail() {\n\tthis.was_successful = false\n}", "func TestSinglePrewrite4A(t *testing.T) {\n}", "func TestTransitionSystem_IsAPDeterministic3(t *testing.T) {\n\tts1 := TransitionSystem_GetSimpleTS4()\n\n\tif ts1.IsAPDeterministic() {\n\t\tt.Errorf(\"Test is given a transition system that is NOT AP-deterministic, but function claims that it is!\")\n\t}\n}", "func Ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}", "func (bldr *stackBuilder) Succeeded() bool {\n\treturn !bldr.failed\n}", "func TestSSOFlow(t *testing.T) {\n\ttests := []testCase{\n\t\t{\n\t\t\t\"fetch; sso\",\n\t\t\t[]string{\"fetch\", \"--type\", \"sso\", \"--email\", \"integration/fixtures/fake-ssocli.sh\", \"--scope\", \"pubsub\", \"--ssocli\", \"sh\"},\n\t\t\t\"fetch-sso.golden\",\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"fetch; sso; old interface\",\n\t\t\t[]string{\"fetch\", \"--sso\", \"--ssocli\", \"sh\", \"integration/fixtures/fake-ssocli.sh\", \"pubsub\"},\n\t\t\t\"fetch-sso.golden\",\n\t\t\tfalse,\n\t\t},\n\t}\n\trunTestScenarios(t, tests)\n}", "func SALQ(ci, mr operand.Op) { ctx.SALQ(ci, mr) }", "func testACC(t *testing.T) {\n\tskip := os.Getenv(\"TF_ACC\") == \"\" && os.Getenv(\"TF_SWIFT_TEST\") == \"\"\n\tif skip {\n\t\tt.Log(\"swift backend tests require setting TF_ACC or TF_SWIFT_TEST\")\n\t\tt.Skip()\n\t}\n\tt.Log(\"swift backend acceptance tests enabled\")\n}", "func (sv *StubbedValidator) StubSuccessValidateRestart() {\n\tsv.revalidationError = nil\n}", "func TestCommitConflictRollback4A(t *testing.T) {\n}", "func (s *HelpersS) TestCheckSucceedWithExpected(c *gocheck.C) {\n checker := &MyChecker{}\n testHelperSuccess(c, \"Check(1, checker, 2)\", true, func() interface{} {\n return c.Check(1, checker, 2)\n })\n if checker.obtained != 1 || checker.expected != 2 {\n c.Fatalf(\"Bad (obtained, expected) values for check (%d, %d)\",\n checker.obtained, checker.expected)\n }\n}", "func Deflake(t *testing.T, test func(kv.Txn)) {\n\tsuccessful := NewFlaky(t, 0)\n\tt.Run(\"success\", func(*testing.T) {\n\t\ttest(successful)\n\t})\n\tfor want := 1; want < successful.ErrCheckCount(); want++ {\n\t\tt.Run(Flake(want).Error(), func(t *testing.T) {\n\t\t\tflaky := NewFlaky(t, want)\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r == nil {\n\t\t\t\t\tt.Error(\"error did not cause test failure: \" + flaky.StackTrace())\n\t\t\t\t}\n\t\t\t}()\n\t\t\ttest(flaky)\n\t\t})\n\t}\n}", "func Test_step1(t *testing.T) {\n\ttestCases := []romance.StepTestCase{\n\t\t{\"rapidement\", 3, 5, 2, true, \"rapid\", 3, 5, 2},\n\t\t{\"paresseuse\", 3, 5, 3, true, \"paress\", 3, 5, 3},\n\t\t{\"prosaïqUement\", 4, 7, 3, true, \"prosaïqU\", 4, 7, 3},\n\t\t{\"nonchalance\", 3, 7, 2, true, \"nonchal\", 3, 7, 2},\n\t\t{\"apostoliqUes\", 2, 4, 3, true, \"apostol\", 2, 4, 3},\n\t\t{\"assiduités\", 2, 5, 4, true, \"assidu\", 2, 5, 4},\n\t\t{\"philosophiqUement\", 4, 6, 3, true, \"philosoph\", 4, 6, 3},\n\t\t{\"despotiqUement\", 3, 6, 2, true, \"despot\", 3, 6, 2},\n\t\t{\"incontestablement\", 2, 5, 4, true, \"incontest\", 2, 5, 4},\n\t\t{\"diminution\", 3, 5, 2, true, \"diminu\", 3, 5, 2},\n\t\t{\"séditieuse\", 3, 5, 2, true, \"séditi\", 3, 5, 2},\n\t\t{\"anonymement\", 2, 4, 3, true, \"anonym\", 2, 4, 3},\n\t\t{\"conservation\", 3, 6, 2, true, \"conserv\", 3, 6, 2},\n\t\t{\"fâcheuses\", 3, 7, 2, true, \"fâcheux\", 3, 7, 2},\n\t\t{\"houleuse\", 4, 7, 2, true, \"houleux\", 4, 7, 2},\n\t\t{\"historiqUes\", 3, 6, 2, true, \"histor\", 3, 6, 2},\n\t\t{\"impérieusement\", 2, 5, 4, true, \"impéri\", 2, 5, 4},\n\t\t{\"complaisances\", 3, 8, 2, true, \"complais\", 3, 8, 2},\n\t\t{\"confessionnaux\", 3, 6, 2, true, \"confessionnal\", 3, 6, 2},\n\t\t{\"grandement\", 4, 7, 3, true, \"grand\", 4, 5, 3},\n\t\t{\"passablement\", 3, 6, 2, true, \"passabl\", 3, 6, 2},\n\t\t{\"strictement\", 5, 8, 4, true, \"strict\", 5, 6, 4},\n\t\t{\"physiqUement\", 4, 6, 3, true, \"physiqU\", 4, 6, 3},\n\t\t{\"serieusement\", 3, 7, 2, true, \"serieux\", 3, 7, 2},\n\t\t{\"roulement\", 4, 6, 2, true, \"roul\", 4, 4, 2},\n\t\t{\"appartement\", 2, 5, 4, true, \"appart\", 2, 5, 4},\n\t\t{\"reconnaissance\", 3, 5, 2, true, \"reconnaiss\", 3, 5, 2},\n\t\t{\"aigrement\", 3, 6, 3, true, \"aigr\", 3, 4, 3},\n\t\t{\"impertinences\", 2, 5, 4, true, \"impertinent\", 2, 5, 4},\n\t\t{\"parlement\", 3, 6, 3, true, \"parl\", 3, 4, 3},\n\t\t{\"malicieux\", 3, 5, 2, true, \"malici\", 3, 5, 2},\n\t\t{\"suffisance\", 3, 6, 2, true, \"suffis\", 3, 6, 2},\n\t\t{\"prémédité\", 4, 6, 3, true, \"préméd\", 4, 6, 3},\n\t\t{\"métalliqUes\", 3, 5, 2, true, \"métall\", 3, 5, 2},\n\t\t{\"météorologiste\", 3, 6, 2, true, \"météorolog\", 3, 6, 2},\n\t\t{\"prononciation\", 4, 6, 3, true, \"prononci\", 4, 6, 3},\n\t\t{\"nombreuse\", 3, 8, 2, true, \"nombreux\", 3, 8, 2},\n\t\t{\"extatiqUe\", 2, 5, 4, true, \"extat\", 2, 5, 4},\n\t\t{\"magnifiqUement\", 3, 6, 2, true, \"magnif\", 3, 6, 2},\n\t\t{\"gymnastiqUe\", 3, 6, 2, true, \"gymnast\", 3, 6, 2},\n\t\t{\"dramatiqUe\", 4, 6, 3, true, \"dramat\", 4, 6, 3},\n\t\t{\"simplicité\", 3, 7, 2, true, \"simpliqU\", 3, 7, 2},\n\t\t{\"roYalistes\", 3, 5, 2, true, \"roYal\", 3, 5, 2},\n\t\t{\"fortifications\", 3, 6, 2, true, \"fortif\", 3, 6, 2},\n\t\t{\"attendrissement\", 2, 5, 4, true, \"attendr\", 2, 5, 4},\n\t\t{\"respectueusement\", 3, 6, 2, true, \"respectu\", 3, 6, 2},\n\t\t{\"patriotisme\", 3, 7, 2, true, \"patriot\", 3, 7, 2},\n\t\t{\"curieuse\", 3, 7, 2, true, \"curieux\", 3, 7, 2},\n\t\t{\"fascination\", 3, 6, 2, true, \"fascin\", 3, 6, 2},\n\t\t{\"effectivement\", 2, 5, 4, true, \"effect\", 2, 5, 4},\n\t\t{\"condoléance\", 3, 6, 2, true, \"condolé\", 3, 6, 2},\n\t\t{\"malignité\", 3, 5, 2, true, \"malign\", 3, 5, 2},\n\t\t{\"capricieuse\", 3, 6, 2, true, \"caprici\", 3, 6, 2},\n\t\t{\"applaudissements\", 2, 7, 5, true, \"applaud\", 2, 7, 5},\n\t\t{\"praticable\", 4, 6, 3, true, \"pratic\", 4, 6, 3},\n\t\t{\"rivaux\", 3, 6, 2, true, \"rival\", 3, 5, 2},\n\t\t{\"augmentation\", 3, 6, 3, true, \"augment\", 3, 6, 3},\n\t\t{\"ameublement\", 2, 5, 3, true, \"ameubl\", 2, 5, 3},\n\t\t{\"honorables\", 3, 5, 2, true, \"honor\", 3, 5, 2},\n\t\t{\"effervescence\", 2, 5, 4, true, \"effervescent\", 2, 5, 4},\n\t\t{\"excentricité\", 2, 5, 4, true, \"excentr\", 2, 5, 4},\n\t\t{\"misérable\", 3, 5, 2, true, \"misér\", 3, 5, 2},\n\t\t{\"capitulation\", 3, 5, 2, true, \"capitul\", 3, 5, 2},\n\t\t{\"enjoUement\", 2, 5, 4, true, \"enjoU\", 2, 5, 4},\n\t\t{\"sévérité\", 3, 5, 2, true, \"sévér\", 3, 5, 2},\n\t\t{\"perplexités\", 3, 7, 2, true, \"perplex\", 3, 7, 2},\n\t\t{\"consentement\", 3, 6, 2, true, \"consent\", 3, 6, 2},\n\t\t{\"convocation\", 3, 6, 2, true, \"convoc\", 3, 6, 2},\n\t\t{\"assurances\", 2, 5, 4, true, \"assur\", 2, 5, 4},\n\t\t{\"ébloUissement\", 2, 5, 4, true, \"ébloU\", 2, 5, 4},\n\t\t{\"méridionaux\", 3, 5, 2, true, \"méridional\", 3, 5, 2},\n\t\t{\"dérangements\", 3, 5, 2, true, \"dérang\", 3, 5, 2},\n\t\t{\"domination\", 3, 5, 2, true, \"domin\", 3, 5, 2},\n\t\t{\"incroYable\", 2, 6, 5, true, \"incroY\", 2, 6, 5},\n\t\t{\"réjoUissances\", 3, 5, 2, true, \"réjoUiss\", 3, 5, 2},\n\t\t{\"décadence\", 3, 5, 2, true, \"décadent\", 3, 5, 2},\n\t\t{\"bâillement\", 4, 7, 2, true, \"bâill\", 4, 5, 2},\n\t\t{\"habillement\", 3, 5, 2, true, \"habill\", 3, 5, 2},\n\t\t{\"irréparablement\", 2, 5, 4, true, \"irrépar\", 2, 5, 4},\n\t\t{\"diplomatiqUes\", 3, 6, 2, true, \"diplomat\", 3, 6, 2},\n\t\t{\"distribution\", 3, 7, 2, true, \"distribu\", 3, 7, 2},\n\t\t{\"pétulance\", 3, 5, 2, true, \"pétul\", 3, 5, 2},\n\t\t{\"considérable\", 3, 6, 2, true, \"considér\", 3, 6, 2},\n\t\t{\"éducation\", 2, 4, 3, true, \"éduc\", 2, 4, 3},\n\t\t{\"indications\", 2, 5, 4, true, \"indiqU\", 2, 5, 4},\n\t\t{\"cupidité\", 3, 5, 2, true, \"cupid\", 3, 5, 2},\n\t\t{\"traîtreusement\", 5, 9, 3, true, \"traîtreux\", 5, 9, 3},\n\t\t{\"silencieuse\", 3, 5, 2, true, \"silenci\", 3, 5, 2},\n\t\t{\"pessimisme\", 3, 6, 2, true, \"pessim\", 3, 6, 2},\n\t\t{\"préoccupation\", 5, 8, 3, true, \"préoccup\", 5, 8, 3},\n\t\t// Special cases that should return false despite\n\t\t// being changed. They \"don't count\".\n\t\t{\"compliment\", 3, 7, 2, false, \"compli\", 3, 6, 2},\n\t\t{\"vraiment\", 5, 7, 3, false, \"vrai\", 4, 4, 3},\n\t\t{\"remercîment\", 3, 5, 2, false, \"remercî\", 3, 5, 2},\n\t\t{\"puissamment\", 4, 7, 2, false, \"puissant\", 4, 7, 2},\n\t\t{\"absolument\", 2, 5, 4, false, \"absolu\", 2, 5, 4},\n\t\t{\"décidément\", 3, 5, 2, false, \"décidé\", 3, 5, 2},\n\t\t{\"condiments\", 3, 6, 2, false, \"condi\", 3, 5, 2},\n\t}\n\tromance.RunStepTest(t, step1, testCases)\n\n}", "func TestShortCircuitAnd(t *testing.T) {\n\tisCalled := false\n\tok := false\n\tok = ok && isCalledFn(&isCalled)\n\tif isCalled != false {\n\t\tt.Fail()\n\t}\n}", "func (o *SearchAclsNotFound) IsSuccess() bool {\n\treturn false\n}", "func cgoCheckResult(val interface{}) {\n\tif debug.cgocheck == 0 {\n\t\treturn\n\t}\n\n\tep := efaceOf(&val)\n\tt := ep._type\n\tcgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail)\n}", "func (cea *CEA) sanityCheck() error {\n\tif cea.ResultCode == 0 {\n\t\treturn ErrMissingResultCode\n\t}\n\tif len(cea.OriginHost) == 0 {\n\t\treturn ErrMissingOriginHost\n\t}\n\tif len(cea.OriginRealm) == 0 {\n\t\treturn ErrMissingOriginRealm\n\t}\n\treturn nil\n}", "func TestAcceptanceTests(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tat func(selina.Worker) error\n\t\tw selina.Worker\n\t\twantErr error\n\t}{\n\t\t{\n\t\t\tname: \"Cancel Process OK\",\n\t\t\tat: workers.ATProcessCancel,\n\t\t\tw: &idealWorker{},\n\t\t\twantErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Close input OK\",\n\t\t\tat: workers.ATProcessCloseInput,\n\t\t\tw: &idealWorker{},\n\t\t\twantErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Close output OK\",\n\t\t\tat: workers.ATProcessCloseOutput,\n\t\t\tw: &idealWorker{},\n\t\t\twantErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Cancel ignore context\",\n\t\t\tat: workers.ATProcessCancel,\n\t\t\tw: &badContextWorker{},\n\t\t\twantErr: workers.ErrProcessIgnoreCtx,\n\t\t},\n\t\t{\n\t\t\tname: \"Cancel bad propagation\",\n\t\t\tat: workers.ATProcessCancel,\n\t\t\tw: &badCtxPropagation{},\n\t\t\twantErr: errBadCtxError,\n\t\t},\n\t\t{\n\t\t\tname: \"Close input is ignored\",\n\t\t\tat: workers.ATProcessCloseInput,\n\t\t\tw: &badInputWorker{},\n\t\t\twantErr: workers.ErrNotTerminatedOnCloseInput,\n\t\t},\n\t\t{\n\t\t\tname: \"output is not closed\",\n\t\t\tat: workers.ATProcessCloseOutput,\n\t\t\tw: &badOutputWorker{},\n\t\t\twantErr: workers.ErrOutputNotClosed,\n\t\t},\n\t\t{\n\t\t\tname: \"No lock test on output\",\n\t\t\tat: workers.ATProcessCloseOutput,\n\t\t\tw: &workerN{N: 10, Msg: []byte(\"message\")},\n\t\t\twantErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"No lock test on input\",\n\t\t\tat: workers.ATProcessCloseInput,\n\t\t\tw: &workerN{N: 10, Msg: []byte(\"message\")},\n\t\t\twantErr: nil,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif err := tt.at(tt.w); err != tt.wantErr {\n\t\t\t\tt.Fatalf(\"Acceptance test is broken: got unexpected err = %v\", err)\n\t\t\t}\n\t\t})\n\t}\n}", "func CallRcaProgram(addr uint16) { // 0NNN\n\tlog.Fatal(\"Call RCA Program called\")\n}", "func TestAPIFailureSet(t *testing.T) {\n\ttests.ResetLog()\n\tdefer tests.DisplayLog()\n\n\tqsName := prefix + \"_unknown\"\n\n\tconst fixture = \"basic.json\"\n\tset1, err := qfix.Get(fixture)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould load query record from file : %v\", tests.Failed, err)\n\t}\n\tt.Logf(\"\\t%s\\tShould load query record from file.\", tests.Success)\n\n\tt.Log(\"Given the need to validate failure of API with bad session.\")\n\t{\n\t\tt.Log(\"When giving a nil session\")\n\t\t{\n\t\t\terr := query.EnsureIndexes(tests.Context, nil, set1)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be refused create by api with bad session\", tests.Failed)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be refused create by api with bad session: %s\", tests.Success, err)\n\n\t\t\terr = query.Upsert(tests.Context, nil, set1)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be refused create by api with bad session\", tests.Failed)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be refused create by api with bad session: %s\", tests.Success, err)\n\n\t\t\t_, err = query.GetNames(tests.Context, nil)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be refused get request by api with bad session\", tests.Failed)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be refused get request by api with bad session: %s\", tests.Success, err)\n\n\t\t\t_, err = query.GetAll(tests.Context, nil, nil)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be refused get request by api with bad session\", tests.Failed)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be refused get request by api with bad session: %s\", tests.Success, err)\n\n\t\t\t_, err = query.GetByName(tests.Context, nil, qsName)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be refused get request by api with bad session\", tests.Failed)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be refused get request by api with bad session: %s\", tests.Success, err)\n\n\t\t\t_, err = query.GetLastHistoryByName(tests.Context, nil, qsName)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be refused get request by api with bad session\", tests.Failed)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be refused get request by api with bad session: %s\", tests.Success, err)\n\n\t\t\terr = query.Delete(tests.Context, nil, qsName)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be refused delete by api with bad session\", tests.Failed)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be refused delete by api with bad session: %s\", tests.Success, err)\n\t\t}\n\t}\n}", "func VMULSS_RZ_SAE(ops ...operand.Op) { ctx.VMULSS_RZ_SAE(ops...) }", "func TestApcg(t *testing.T) {\n\tconst fname = \"Apcg\"\n\n\tvar date1, date2 float64\n\tvar ebpv [2][3]float64\n\tvar ehp [3]float64\n\tvar astr ASTROM\n\n\tdate1 = 2456165.5\n\tdate2 = 0.401182685\n\tebpv[0][0] = 0.901310875\n\tebpv[0][1] = -0.417402664\n\tebpv[0][2] = -0.180982288\n\tebpv[1][0] = 0.00742727954\n\tebpv[1][1] = 0.0140507459\n\tebpv[1][2] = 0.00609045792\n\tehp[0] = 0.903358544\n\tehp[1] = -0.415395237\n\tehp[2] = -0.180084014\n\n\ttests := []struct {\n\t\tref string\n\t\tfn func(a, b float64, c [2][3]float64,\n\t\t\td [3]float64, e ASTROM) ASTROM\n\t}{\n\t\t{\"cgo\", CgoApcg},\n\t\t{\"go\", GoApcg},\n\t}\n\n\tfor _, test := range tests {\n\t\ttname := fname + \" \" + test.ref\n\t\tastrom := test.fn(date1, date2, ebpv, ehp, astr)\n\n\t\tvvd(t, astrom.pmt, 12.65133794027378508, 1e-11,\n\t\t\ttname, \"pmt\")\n\n\t\tvvd(t, astrom.eb[0], 0.901310875, 1e-12,\n\t\t\ttname, \"eb(1)\")\n\t\tvvd(t, astrom.eb[1], -0.417402664, 1e-12,\n\t\t\ttname, \"eb(2)\")\n\t\tvvd(t, astrom.eb[2], -0.180982288, 1e-12,\n\t\t\ttname, \"eb(3)\")\n\n\t\tvvd(t, astrom.eh[0], 0.8940025429324143045, 1e-12,\n\t\t\ttname, \"eh(1)\")\n\t\tvvd(t, astrom.eh[1], -0.4110930268679817955, 1e-12,\n\t\t\ttname, \"eh(2)\")\n\t\tvvd(t, astrom.eh[2], -0.1782189004872870264, 1e-12,\n\t\t\ttname, \"eh(3)\")\n\n\t\tvvd(t, astrom.em, 1.010465295811013146, 1e-12,\n\t\t\ttname, \"em\")\n\n\t\tvvd(t, astrom.v[0], 0.4289638913597693554e-4, 1e-16,\n\t\t\ttname, \"v(1)\")\n\t\tvvd(t, astrom.v[1], 0.8115034051581320575e-4, 1e-16,\n\t\t\ttname, \"v(2)\")\n\t\tvvd(t, astrom.v[2], 0.3517555136380563427e-4, 1e-16,\n\t\t\ttname, \"v(3)\")\n\n\t\tvvd(t, astrom.bm1, 0.9999999951686012981, 1e-12,\n\t\t\ttname, \"bm1\")\n\n\t\tvvd(t, astrom.bpn[0][0], 1.0, 0.0, tname, \"bpn(1,1)\")\n\t\tvvd(t, astrom.bpn[1][0], 0.0, 0.0, tname, \"bpn(2,1)\")\n\t\tvvd(t, astrom.bpn[2][0], 0.0, 0.0, tname, \"bpn(3,1)\")\n\n\t\tvvd(t, astrom.bpn[0][1], 0.0, 0.0, tname, \"bpn(1,2)\")\n\t\tvvd(t, astrom.bpn[1][1], 1.0, 0.0, tname, \"bpn(2,2)\")\n\t\tvvd(t, astrom.bpn[2][1], 0.0, 0.0, tname, \"bpn(3,2)\")\n\n\t\tvvd(t, astrom.bpn[0][2], 0.0, 0.0, tname, \"bpn(1,3)\")\n\t\tvvd(t, astrom.bpn[1][2], 0.0, 0.0, tname, \"bpn(2,3)\")\n\t\tvvd(t, astrom.bpn[2][2], 1.0, 0.0, tname, \"bpn(3,3)\")\n\t}\n}", "func (c *B) FailNow()", "func TestTransitionSystem_IsActionDeterministic2(t *testing.T) {\n\tts1 := TransitionSystem_GetSimpleTS3()\n\n\tif !ts1.IsActionDeterministic() {\n\t\tt.Errorf(\"Test is given a transition system that is action deterministic, but function claims that it is not!\")\n\t}\n}", "func TestMissedBlockAndRankStreakCounter(t *testing.T) {\n\tapp := simapp.Setup(false)\n\tctx := app.BaseApp.NewContext(false, tmproto.Header{})\n\n\taddrDels := simapp.AddTestAddrsIncremental(app, ctx, 1, sdk.TokensFromConsensusPower(200, sdk.DefaultPowerReduction))\n\tvalAddrs := simapp.ConvertAddrsToValAddrs(addrDels)\n\tpks := simapp.CreateTestPubKeys(1)\n\taddr, val := valAddrs[0], pks[0]\n\tvalAddr := sdk.ValAddress(addr)\n\ttstaking := teststaking.NewHelper(t, ctx, app.CustomStakingKeeper, app.CustomGovKeeper)\n\tctx = ctx.WithBlockHeight(1)\n\n\t// Validator created\n\ttstaking.CreateValidator(addr, val, true)\n\n\tstaking.EndBlocker(ctx, app.CustomStakingKeeper)\n\n\t// Now a validator, for two blocks\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\tctx = ctx.WithBlockHeight(2)\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\n\tv := tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(1))\n\trequire.Equal(t, v.Streak, int64(1))\n\n\tinfo, found := app.CustomSlashingKeeper.GetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address()))\n\trequire.True(t, found)\n\trequire.Equal(t, int64(1), info.MischanceConfidence)\n\trequire.Equal(t, int64(0), info.Mischance)\n\trequire.Equal(t, int64(1), info.MissedBlocksCounter)\n\trequire.Equal(t, int64(1), info.ProducedBlocksCounter)\n\n\theight := ctx.BlockHeight() + 1\n\tfor i := int64(0); i < 10; i++ {\n\t\tctx = ctx.WithBlockHeight(height + i)\n\t\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\t}\n\tctx = ctx.WithBlockHeight(height + 10)\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\tinfo, found = app.CustomSlashingKeeper.GetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address()))\n\trequire.True(t, found)\n\trequire.Equal(t, int64(0), info.MischanceConfidence)\n\trequire.Equal(t, int64(0), info.Mischance)\n\trequire.Equal(t, int64(11), info.MissedBlocksCounter)\n\trequire.Equal(t, int64(2), info.ProducedBlocksCounter)\n\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(1))\n\trequire.Equal(t, v.Streak, int64(1))\n\n\t// sign 100 blocks successfully\n\theight = ctx.BlockHeight() + 1\n\tfor i := int64(0); i < 100; i++ {\n\t\tctx = ctx.WithBlockHeight(height + i)\n\t\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\t}\n\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(101))\n\trequire.Equal(t, v.Streak, int64(101))\n\n\t// miss one block\n\tctx = ctx.WithBlockHeight(height + 100)\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(101))\n\trequire.Equal(t, v.Streak, int64(101))\n\n\tapp.CustomSlashingKeeper.Inactivate(ctx, sdk.ConsAddress(val.Address()))\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Inactive)\n\trequire.Equal(t, v.Rank, int64(50))\n\trequire.Equal(t, v.Streak, int64(0))\n\n\tapp.CustomSlashingKeeper.Activate(ctx, valAddr)\n\t// miss 5 blocks\n\theight = ctx.BlockHeight() + 1\n\tfor i := int64(0); i < 5; i++ {\n\t\tctx = ctx.WithBlockHeight(height + i)\n\t\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\t}\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(50))\n\trequire.Equal(t, v.Streak, int64(0))\n}", "func iscall(prog *obj.Prog, name *obj.LSym) bool {\n\tif prog == nil {\n\t\tFatal(\"iscall: prog is nil\")\n\t}\n\tif name == nil {\n\t\tFatal(\"iscall: function name is nil\")\n\t}\n\tif prog.As != obj.ACALL {\n\t\treturn false\n\t}\n\treturn name == prog.To.Sym\n}", "func TestAnySecurityRequirementMet(t *testing.T) {\n\t// Create of a map of scheme names and whether they are valid\n\tschemes := map[string]bool{\n\t\t\"a\": true,\n\t\t\"b\": true,\n\t\t\"c\": false,\n\t\t\"d\": false,\n\t}\n\n\t// Create the test cases\n\ttc := []struct {\n\t\tname string\n\t\tschemes []string\n\t\terror bool\n\t}{\n\t\t{\n\t\t\tname: \"/ok1\",\n\t\t\tschemes: []string{\"a\", \"b\"},\n\t\t\terror: false,\n\t\t},\n\t\t{\n\t\t\tname: \"/ok2\",\n\t\t\tschemes: []string{\"a\", \"c\"},\n\t\t\terror: false,\n\t\t},\n\t\t{\n\t\t\tname: \"/error\",\n\t\t\tschemes: []string{\"c\", \"d\"},\n\t\t\terror: true,\n\t\t},\n\t}\n\n\tdoc := openapi3.T{\n\t\tOpenAPI: \"3.0.0\",\n\t\tInfo: &openapi3.Info{\n\t\t\tTitle: \"MyAPI\",\n\t\t\tVersion: \"0.1\",\n\t\t},\n\t\tPaths: map[string]*openapi3.PathItem{},\n\t\tComponents: &openapi3.Components{\n\t\t\tSecuritySchemes: map[string]*openapi3.SecuritySchemeRef{},\n\t\t},\n\t}\n\n\t// Add the security schemes to the spec's components\n\tfor schemeName := range schemes {\n\t\tdoc.Components.SecuritySchemes[schemeName] = &openapi3.SecuritySchemeRef{\n\t\t\tValue: &openapi3.SecurityScheme{\n\t\t\t\tType: \"http\",\n\t\t\t\tScheme: \"basic\",\n\t\t\t},\n\t\t}\n\t}\n\n\t// Add the paths to the spec\n\tfor _, tc := range tc {\n\t\t// Create the security requirements from the test cases's schemes\n\t\tsecurityRequirements := openapi3.NewSecurityRequirements()\n\t\tfor _, scheme := range tc.schemes {\n\t\t\tsecurityRequirements.With(openapi3.SecurityRequirement{scheme: {}})\n\t\t}\n\n\t\t// Create the path with the security requirements\n\t\tdoc.Paths[tc.name] = &openapi3.PathItem{\n\t\t\tGet: &openapi3.Operation{\n\t\t\t\tSecurity: securityRequirements,\n\t\t\t\tResponses: openapi3.NewResponses(),\n\t\t\t},\n\t\t}\n\t}\n\n\terr := doc.Validate(context.Background())\n\trequire.NoError(t, err)\n\trouter, err := legacyrouter.NewRouter(&doc)\n\trequire.NoError(t, err)\n\n\t// Create the authentication function\n\tauthFunc := makeAuthFunc(schemes)\n\n\tfor _, tc := range tc {\n\t\t// Create the request input for the path\n\t\ttcURL, err := url.Parse(tc.name)\n\t\trequire.NoError(t, err)\n\t\thttpReq := httptest.NewRequest(http.MethodGet, tcURL.String(), nil)\n\t\troute, _, err := router.FindRoute(httpReq)\n\t\trequire.NoError(t, err)\n\t\treq := RequestValidationInput{\n\t\t\tRoute: route,\n\t\t\tOptions: &Options{\n\t\t\t\tAuthenticationFunc: authFunc,\n\t\t\t},\n\t\t}\n\n\t\t// Validate the security requirements\n\t\terr = ValidateSecurityRequirements(context.Background(), &req, *route.Operation.Security)\n\n\t\t// If there should have been an error\n\t\tif tc.error {\n\t\t\trequire.Errorf(t, err, \"an error is expected for path %q\", tc.name)\n\t\t} else {\n\t\t\trequire.NoErrorf(t, err, \"an error wasn't expected for path %q\", tc.name)\n\t\t}\n\t}\n}", "func TestSetWrongArgs(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetWrongArgs\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\")})\n\n\tif res.Status != shim.ERROR {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n}", "func VMULSD_RZ_SAE(ops ...operand.Op) { ctx.VMULSD_RZ_SAE(ops...) }" ]
[ "0.61774474", "0.5446622", "0.5380431", "0.5366304", "0.53519195", "0.53207326", "0.5293268", "0.5205501", "0.5153583", "0.51294535", "0.51273733", "0.5125896", "0.51242864", "0.5117339", "0.51127476", "0.51060134", "0.5094488", "0.508832", "0.5074884", "0.5063945", "0.50372845", "0.50318253", "0.5026373", "0.5023231", "0.50191194", "0.50091416", "0.5001559", "0.49883068", "0.49606284", "0.49289876", "0.4919669", "0.4917023", "0.49071103", "0.48985788", "0.48851785", "0.48848277", "0.48842734", "0.48561403", "0.48552775", "0.48491153", "0.48489052", "0.48469213", "0.48440734", "0.48290026", "0.48247913", "0.48170176", "0.48151356", "0.48151213", "0.48082638", "0.48080575", "0.48080575", "0.48080575", "0.48080575", "0.48080575", "0.48080575", "0.48080575", "0.48080575", "0.48080575", "0.48080575", "0.48009914", "0.47988972", "0.47925588", "0.478861", "0.47802925", "0.47756732", "0.47748882", "0.47719806", "0.47719356", "0.47718713", "0.47670946", "0.47633356", "0.47464123", "0.47462738", "0.47380525", "0.47377467", "0.4734039", "0.47331977", "0.47320846", "0.47319743", "0.47316402", "0.472825", "0.4724695", "0.47230482", "0.47136673", "0.471157", "0.47084054", "0.4705642", "0.4705631", "0.47043988", "0.46961004", "0.46949673", "0.46926767", "0.46893856", "0.46893403", "0.46862635", "0.46860728", "0.46859226", "0.4683726", "0.46815494", "0.4680978" ]
0.6863956
0
WaitScala mocks base method
func (m *MockmonitorInterface) WaitScala(name string) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WaitScala", name) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockImporter) Wait() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Wait\")\n}", "func (_m *MockWaiter) Wait() {\n\t_m.Called()\n}", "func (m *MockOperation) Wait() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Wait\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *TesterMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *OutboundMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *ShifterMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *MockProcess) Wait() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Wait\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRateLimiter) Wait(ctx context.Context) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Wait\", ctx)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *ModifierMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *StateSwitcherMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *HeavySyncMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *ParcelMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.AllowedSenderObjectAndRoleFinished()\n\t\tok = ok && m.ContextFinished()\n\t\tok = ok && m.DefaultRoleFinished()\n\t\tok = ok && m.DefaultTargetFinished()\n\t\tok = ok && m.DelegationTokenFinished()\n\t\tok = ok && m.GetCallerFinished()\n\t\tok = ok && m.GetSenderFinished()\n\t\tok = ok && m.GetSignFinished()\n\t\tok = ok && m.MessageFinished()\n\t\tok = ok && m.PulseFinished()\n\t\tok = ok && m.SetSenderFinished()\n\t\tok = ok && m.TypeFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.AllowedSenderObjectAndRoleFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.AllowedSenderObjectAndRole\")\n\t\t\t}\n\n\t\t\tif !m.ContextFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.Context\")\n\t\t\t}\n\n\t\t\tif !m.DefaultRoleFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.DefaultRole\")\n\t\t\t}\n\n\t\t\tif !m.DefaultTargetFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.DefaultTarget\")\n\t\t\t}\n\n\t\t\tif !m.DelegationTokenFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.DelegationToken\")\n\t\t\t}\n\n\t\t\tif !m.GetCallerFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.GetCaller\")\n\t\t\t}\n\n\t\t\tif !m.GetSenderFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.GetSender\")\n\t\t\t}\n\n\t\t\tif !m.GetSignFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.GetSign\")\n\t\t\t}\n\n\t\t\tif !m.MessageFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.Message\")\n\t\t\t}\n\n\t\t\tif !m.PulseFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.Pulse\")\n\t\t\t}\n\n\t\t\tif !m.SetSenderFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.SetSender\")\n\t\t\t}\n\n\t\t\tif !m.TypeFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ParcelMock.Type\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *SignatureKeyHolderMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *MocktimeClient) Sleep(arg0 time.Duration) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Sleep\", arg0)\n}", "func (m *CryptographyServiceMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *MockOSProcess) Wait() (*os.ProcessState, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Wait\")\n\tret0, _ := ret[0].(*os.ProcessState)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *ActiveNodeMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (_m *SubProcessCmd) Wait() error {\n\tret := _m.Called()\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *ParcelMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *HostNetworkMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && (m.BuildResponseFunc == nil || atomic.LoadUint64(&m.BuildResponseCounter) > 0)\n\t\tok = ok && (m.GetNodeIDFunc == nil || atomic.LoadUint64(&m.GetNodeIDCounter) > 0)\n\t\tok = ok && (m.NewRequestBuilderFunc == nil || atomic.LoadUint64(&m.NewRequestBuilderCounter) > 0)\n\t\tok = ok && (m.PublicAddressFunc == nil || atomic.LoadUint64(&m.PublicAddressCounter) > 0)\n\t\tok = ok && (m.RegisterRequestHandlerFunc == nil || atomic.LoadUint64(&m.RegisterRequestHandlerCounter) > 0)\n\t\tok = ok && (m.SendRequestFunc == nil || atomic.LoadUint64(&m.SendRequestCounter) > 0)\n\t\tok = ok && (m.StartFunc == nil || atomic.LoadUint64(&m.StartCounter) > 0)\n\t\tok = ok && (m.StopFunc == nil || atomic.LoadUint64(&m.StopCounter) > 0)\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif m.BuildResponseFunc != nil && atomic.LoadUint64(&m.BuildResponseCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.BuildResponse\")\n\t\t\t}\n\n\t\t\tif m.GetNodeIDFunc != nil && atomic.LoadUint64(&m.GetNodeIDCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.GetNodeID\")\n\t\t\t}\n\n\t\t\tif m.NewRequestBuilderFunc != nil && atomic.LoadUint64(&m.NewRequestBuilderCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.NewRequestBuilder\")\n\t\t\t}\n\n\t\t\tif m.PublicAddressFunc != nil && atomic.LoadUint64(&m.PublicAddressCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.PublicAddress\")\n\t\t\t}\n\n\t\t\tif m.RegisterRequestHandlerFunc != nil && atomic.LoadUint64(&m.RegisterRequestHandlerCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.RegisterRequestHandler\")\n\t\t\t}\n\n\t\t\tif m.SendRequestFunc != nil && atomic.LoadUint64(&m.SendRequestCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.SendRequest\")\n\t\t\t}\n\n\t\t\tif m.StartFunc != nil && atomic.LoadUint64(&m.StartCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.Start\")\n\t\t\t}\n\n\t\t\tif m.StopFunc != nil && atomic.LoadUint64(&m.StopCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.Stop\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *ActiveNodeMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.GetDeclaredPowerFinished()\n\t\tok = ok && m.GetIndexFinished()\n\t\tok = ok && m.GetNodeIDFinished()\n\t\tok = ok && m.GetOpModeFinished()\n\t\tok = ok && m.GetSignatureVerifierFinished()\n\t\tok = ok && m.GetStaticFinished()\n\t\tok = ok && m.IsJoinerFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.GetDeclaredPowerFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ActiveNodeMock.GetDeclaredPower\")\n\t\t\t}\n\n\t\t\tif !m.GetIndexFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ActiveNodeMock.GetIndex\")\n\t\t\t}\n\n\t\t\tif !m.GetNodeIDFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ActiveNodeMock.GetNodeID\")\n\t\t\t}\n\n\t\t\tif !m.GetOpModeFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ActiveNodeMock.GetOpMode\")\n\t\t\t}\n\n\t\t\tif !m.GetSignatureVerifierFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ActiveNodeMock.GetSignatureVerifier\")\n\t\t\t}\n\n\t\t\tif !m.GetStaticFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ActiveNodeMock.GetStatic\")\n\t\t\t}\n\n\t\t\tif !m.IsJoinerFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ActiveNodeMock.IsJoiner\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *HostNetworkMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *UnsyncListMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *StateSwitcherMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && (m.GetStateFunc == nil || atomic.LoadUint64(&m.GetStateCounter) > 0)\n\t\tok = ok && (m.SetPulsarFunc == nil || atomic.LoadUint64(&m.SetPulsarCounter) > 0)\n\t\tok = ok && (m.SwitchToStateFunc == nil || atomic.LoadUint64(&m.SwitchToStateCounter) > 0)\n\t\tok = ok && (m.setStateFunc == nil || atomic.LoadUint64(&m.setStateCounter) > 0)\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif m.GetStateFunc != nil && atomic.LoadUint64(&m.GetStateCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to StateSwitcherMock.GetState\")\n\t\t\t}\n\n\t\t\tif m.SetPulsarFunc != nil && atomic.LoadUint64(&m.SetPulsarCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to StateSwitcherMock.SetPulsar\")\n\t\t\t}\n\n\t\t\tif m.SwitchToStateFunc != nil && atomic.LoadUint64(&m.SwitchToStateCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to StateSwitcherMock.SwitchToState\")\n\t\t\t}\n\n\t\t\tif m.setStateFunc != nil && atomic.LoadUint64(&m.setStateCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to StateSwitcherMock.setState\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *OutboundMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.AsByteStringFinished()\n\t\tok = ok && m.CanAcceptFinished()\n\t\tok = ok && m.GetEndpointTypeFinished()\n\t\tok = ok && m.GetIPAddressFinished()\n\t\tok = ok && m.GetNameAddressFinished()\n\t\tok = ok && m.GetRelayIDFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.AsByteStringFinished() {\n\t\t\t\tm.t.Error(\"Expected call to OutboundMock.AsByteString\")\n\t\t\t}\n\n\t\t\tif !m.CanAcceptFinished() {\n\t\t\t\tm.t.Error(\"Expected call to OutboundMock.CanAccept\")\n\t\t\t}\n\n\t\t\tif !m.GetEndpointTypeFinished() {\n\t\t\t\tm.t.Error(\"Expected call to OutboundMock.GetEndpointType\")\n\t\t\t}\n\n\t\t\tif !m.GetIPAddressFinished() {\n\t\t\t\tm.t.Error(\"Expected call to OutboundMock.GetIPAddress\")\n\t\t\t}\n\n\t\t\tif !m.GetNameAddressFinished() {\n\t\t\t\tm.t.Error(\"Expected call to OutboundMock.GetNameAddress\")\n\t\t\t}\n\n\t\t\tif !m.GetRelayIDFinished() {\n\t\t\t\tm.t.Error(\"Expected call to OutboundMock.GetRelayID\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *MockWorker) Wait() (interface{}, error) {\n\targs := m.MethodCalled(\"Wait\")\n\n\treturn args.Get(0), args.Error(1)\n}", "func (f *FakeCmdRunner) Wait() error {\n\treturn f.Err\n}", "func (m *MockCEImpl) Wait(id string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Wait\", id, condition)\n\tret0, _ := ret[0].(<-chan container.ContainerWaitOKBody)\n\tret1, _ := ret[1].(<-chan error)\n\treturn ret0, ret1\n}", "func (m *ConsensusNetworkMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *MocktimeClient) Until(arg0 time.Time) time.Duration {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Until\", arg0)\n\tret0, _ := ret[0].(time.Duration)\n\treturn ret0\n}", "func (m *IndexBucketModifierMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *HeavySyncMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.ResetFinished()\n\t\tok = ok && m.StartFinished()\n\t\tok = ok && m.StopFinished()\n\t\tok = ok && m.StoreBlobsFinished()\n\t\tok = ok && m.StoreDropFinished()\n\t\tok = ok && m.StoreIndicesFinished()\n\t\tok = ok && m.StoreRecordsFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.ResetFinished() {\n\t\t\t\tm.t.Error(\"Expected call to HeavySyncMock.Reset\")\n\t\t\t}\n\n\t\t\tif !m.StartFinished() {\n\t\t\t\tm.t.Error(\"Expected call to HeavySyncMock.Start\")\n\t\t\t}\n\n\t\t\tif !m.StopFinished() {\n\t\t\t\tm.t.Error(\"Expected call to HeavySyncMock.Stop\")\n\t\t\t}\n\n\t\t\tif !m.StoreBlobsFinished() {\n\t\t\t\tm.t.Error(\"Expected call to HeavySyncMock.StoreBlobs\")\n\t\t\t}\n\n\t\t\tif !m.StoreDropFinished() {\n\t\t\t\tm.t.Error(\"Expected call to HeavySyncMock.StoreDrop\")\n\t\t\t}\n\n\t\t\tif !m.StoreIndicesFinished() {\n\t\t\t\tm.t.Error(\"Expected call to HeavySyncMock.StoreIndices\")\n\t\t\t}\n\n\t\t\tif !m.StoreRecordsFinished() {\n\t\t\t\tm.t.Error(\"Expected call to HeavySyncMock.StoreRecords\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *ModifierMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.SetFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.SetFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ModifierMock.Set\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *RecentIndexStorageMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.AddObjectFinished()\n\t\tok = ok && m.AddObjectWithTLLFinished()\n\t\tok = ok && m.DecreaseIndexTTLFinished()\n\t\tok = ok && m.FilterNotExistWithLockFinished()\n\t\tok = ok && m.GetObjectsFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.AddObjectFinished() {\n\t\t\t\tm.t.Error(\"Expected call to RecentIndexStorageMock.AddObject\")\n\t\t\t}\n\n\t\t\tif !m.AddObjectWithTLLFinished() {\n\t\t\t\tm.t.Error(\"Expected call to RecentIndexStorageMock.AddObjectWithTLL\")\n\t\t\t}\n\n\t\t\tif !m.DecreaseIndexTTLFinished() {\n\t\t\t\tm.t.Error(\"Expected call to RecentIndexStorageMock.DecreaseIndexTTL\")\n\t\t\t}\n\n\t\t\tif !m.FilterNotExistWithLockFinished() {\n\t\t\t\tm.t.Error(\"Expected call to RecentIndexStorageMock.FilterNotExistWithLock\")\n\t\t\t}\n\n\t\t\tif !m.GetObjectsFinished() {\n\t\t\t\tm.t.Error(\"Expected call to RecentIndexStorageMock.GetObjects\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func wait() {\n\twaitImpl()\n}", "func (m *ConsensusNetworkMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && (m.GetNodeIDFunc == nil || atomic.LoadUint64(&m.GetNodeIDCounter) > 0)\n\t\tok = ok && (m.NewRequestBuilderFunc == nil || atomic.LoadUint64(&m.NewRequestBuilderCounter) > 0)\n\t\tok = ok && (m.PublicAddressFunc == nil || atomic.LoadUint64(&m.PublicAddressCounter) > 0)\n\t\tok = ok && (m.RegisterRequestHandlerFunc == nil || atomic.LoadUint64(&m.RegisterRequestHandlerCounter) > 0)\n\t\tok = ok && (m.SendRequestFunc == nil || atomic.LoadUint64(&m.SendRequestCounter) > 0)\n\t\tok = ok && (m.StartFunc == nil || atomic.LoadUint64(&m.StartCounter) > 0)\n\t\tok = ok && (m.StopFunc == nil || atomic.LoadUint64(&m.StopCounter) > 0)\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif m.GetNodeIDFunc != nil && atomic.LoadUint64(&m.GetNodeIDCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to ConsensusNetworkMock.GetNodeID\")\n\t\t\t}\n\n\t\t\tif m.NewRequestBuilderFunc != nil && atomic.LoadUint64(&m.NewRequestBuilderCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to ConsensusNetworkMock.NewRequestBuilder\")\n\t\t\t}\n\n\t\t\tif m.PublicAddressFunc != nil && atomic.LoadUint64(&m.PublicAddressCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to ConsensusNetworkMock.PublicAddress\")\n\t\t\t}\n\n\t\t\tif m.RegisterRequestHandlerFunc != nil && atomic.LoadUint64(&m.RegisterRequestHandlerCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to ConsensusNetworkMock.RegisterRequestHandler\")\n\t\t\t}\n\n\t\t\tif m.SendRequestFunc != nil && atomic.LoadUint64(&m.SendRequestCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to ConsensusNetworkMock.SendRequest\")\n\t\t\t}\n\n\t\t\tif m.StartFunc != nil && atomic.LoadUint64(&m.StartCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to ConsensusNetworkMock.Start\")\n\t\t\t}\n\n\t\t\tif m.StopFunc != nil && atomic.LoadUint64(&m.StopCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to ConsensusNetworkMock.Stop\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *MockCallResult) Wait() *Result {\n\targs := m.MethodCalled(\"Wait\")\n\n\tif result := args.Get(0); result != nil {\n\t\treturn result.(*Result)\n\t}\n\n\treturn nil\n}", "func (_m *MockTimelineProvider) Sleep(duration time.Duration) {\n\t_m.ctrl.Call(_m, \"Sleep\", duration)\n}", "func (m *SignatureKeyHolderMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.AsByteStringFinished()\n\t\tok = ok && m.AsBytesFinished()\n\t\tok = ok && m.EqualsFinished()\n\t\tok = ok && m.FixedByteSizeFinished()\n\t\tok = ok && m.FoldToUint64Finished()\n\t\tok = ok && m.GetSignMethodFinished()\n\t\tok = ok && m.GetSignatureKeyMethodFinished()\n\t\tok = ok && m.GetSignatureKeyTypeFinished()\n\t\tok = ok && m.ReadFinished()\n\t\tok = ok && m.WriteToFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.AsByteStringFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.AsByteString\")\n\t\t\t}\n\n\t\t\tif !m.AsBytesFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.AsBytes\")\n\t\t\t}\n\n\t\t\tif !m.EqualsFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.Equals\")\n\t\t\t}\n\n\t\t\tif !m.FixedByteSizeFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.FixedByteSize\")\n\t\t\t}\n\n\t\t\tif !m.FoldToUint64Finished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.FoldToUint64\")\n\t\t\t}\n\n\t\t\tif !m.GetSignMethodFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.GetSignMethod\")\n\t\t\t}\n\n\t\t\tif !m.GetSignatureKeyMethodFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.GetSignatureKeyMethod\")\n\t\t\t}\n\n\t\t\tif !m.GetSignatureKeyTypeFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.GetSignatureKeyType\")\n\t\t\t}\n\n\t\t\tif !m.ReadFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.Read\")\n\t\t\t}\n\n\t\t\tif !m.WriteToFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.WriteTo\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *IndexBucketModifierMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.SetBucketFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.SetBucketFinished() {\n\t\t\t\tm.t.Error(\"Expected call to IndexBucketModifierMock.SetBucket\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *CryptographyServiceMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.GetPublicKeyFinished()\n\t\tok = ok && m.SignFinished()\n\t\tok = ok && m.VerifyFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.GetPublicKeyFinished() {\n\t\t\t\tm.t.Error(\"Expected call to CryptographyServiceMock.GetPublicKey\")\n\t\t\t}\n\n\t\t\tif !m.SignFinished() {\n\t\t\t\tm.t.Error(\"Expected call to CryptographyServiceMock.Sign\")\n\t\t\t}\n\n\t\t\tif !m.VerifyFinished() {\n\t\t\t\tm.t.Error(\"Expected call to CryptographyServiceMock.Verify\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *MockSerializer) Wait() interface{} {\n\targs := m.MethodCalled(\"Wait\")\n\n\treturn args.Get(0)\n}", "func (m *UnsyncListMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.AddClaimsFinished()\n\t\tok = ok && m.CalculateHashFinished()\n\t\tok = ok && m.GetActiveNodeFinished()\n\t\tok = ok && m.GetActiveNodesFinished()\n\t\tok = ok && m.IndexToRefFinished()\n\t\tok = ok && m.LengthFinished()\n\t\tok = ok && m.RefToIndexFinished()\n\t\tok = ok && m.RemoveClaimsFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.AddClaimsFinished() {\n\t\t\t\tm.t.Error(\"Expected call to UnsyncListMock.AddClaims\")\n\t\t\t}\n\n\t\t\tif !m.CalculateHashFinished() {\n\t\t\t\tm.t.Error(\"Expected call to UnsyncListMock.CalculateHash\")\n\t\t\t}\n\n\t\t\tif !m.GetActiveNodeFinished() {\n\t\t\t\tm.t.Error(\"Expected call to UnsyncListMock.GetActiveNode\")\n\t\t\t}\n\n\t\t\tif !m.GetActiveNodesFinished() {\n\t\t\t\tm.t.Error(\"Expected call to UnsyncListMock.GetActiveNodes\")\n\t\t\t}\n\n\t\t\tif !m.IndexToRefFinished() {\n\t\t\t\tm.t.Error(\"Expected call to UnsyncListMock.IndexToRef\")\n\t\t\t}\n\n\t\t\tif !m.LengthFinished() {\n\t\t\t\tm.t.Error(\"Expected call to UnsyncListMock.Length\")\n\t\t\t}\n\n\t\t\tif !m.RefToIndexFinished() {\n\t\t\t\tm.t.Error(\"Expected call to UnsyncListMock.RefToIndex\")\n\t\t\t}\n\n\t\t\tif !m.RemoveClaimsFinished() {\n\t\t\t\tm.t.Error(\"Expected call to UnsyncListMock.RemoveClaims\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *IndexCollectionAccessorMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (tb *Testbed) Wait(timeout time.Duration) error {\n\tdefer tb.cancel()\n\tnow := time.Now()\n\tselect {\n\tcase <-tb.donec:\n\t\treturn nil\n\tcase to := <-time.After(timeout):\n\t\twaited := to.Sub(now)\n\t\treturn errors.New(\"timeout after \" + waited.String())\n\t}\n}", "func (m *RecentIndexStorageMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (m *MockMoby) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {\n\tret := m.ctrl.Call(m, \"ContainerWait\", ctx, containerID, condition)\n\tret0, _ := ret[0].(<-chan container.ContainerWaitOKBody)\n\tret1, _ := ret[1].(<-chan error)\n\treturn ret0, ret1\n}", "func (m *ShifterMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.ShiftFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.ShiftFinished() {\n\t\t\t\tm.t.Error(\"Expected call to ShifterMock.Shift\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *TesterMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && (m.ErrorFunc == nil || atomic.LoadUint64(&m.ErrorCounter) > 0)\n\t\tok = ok && (m.ErrorfFunc == nil || atomic.LoadUint64(&m.ErrorfCounter) > 0)\n\t\tok = ok && (m.FatalFunc == nil || atomic.LoadUint64(&m.FatalCounter) > 0)\n\t\tok = ok && (m.FatalfFunc == nil || atomic.LoadUint64(&m.FatalfCounter) > 0)\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif m.ErrorFunc != nil && atomic.LoadUint64(&m.ErrorCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to TesterMock.Error\")\n\t\t\t}\n\n\t\t\tif m.ErrorfFunc != nil && atomic.LoadUint64(&m.ErrorfCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to TesterMock.Errorf\")\n\t\t\t}\n\n\t\t\tif m.FatalFunc != nil && atomic.LoadUint64(&m.FatalCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to TesterMock.Fatal\")\n\t\t\t}\n\n\t\t\tif m.FatalfFunc != nil && atomic.LoadUint64(&m.FatalfCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to TesterMock.Fatalf\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *PaymentRepositoryMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *GatewayMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *ListRepositoryMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *MockPostForkBlock) setStatus(arg0 choices.Status) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"setStatus\", arg0)\n}", "func (m *MockMempool) Lock() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Lock\")\n}", "func (p *Provider) wait() {\n\t// Compute the backoff time\n\tbackoff := p.backoffDuration()\n\n\t// Setup a wait timer\n\tvar wait <-chan time.Time\n\tif backoff > 0 {\n\t\tjitter := time.Duration(rand.Uint32()) % backoff\n\t\twait = time.After(backoff + jitter)\n\t}\n\n\t// Wait until timer or shutdown\n\tselect {\n\tcase <-wait:\n\tcase <-p.shutdownCh:\n\t}\n}", "func (m *MockRateLimiter) WaitN(ctx context.Context, numToken int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WaitN\", ctx, numToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *SenderMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *StoreMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *MockWindower) Delay(arg0 context.Context, arg1, arg2 uint64, arg3 ids.NodeID) (time.Duration, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Delay\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(time.Duration)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *ProcedureMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *MockWaiter) WaitForAll(arg0 context.Context) []error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WaitForAll\", arg0)\n\tret0, _ := ret[0].([]error)\n\treturn ret0\n}", "func (m *IndexCollectionAccessorMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.ForJetFinished()\n\t\tok = ok && m.ForPulseAndJetFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.ForJetFinished() {\n\t\t\t\tm.t.Error(\"Expected call to IndexCollectionAccessorMock.ForJet\")\n\t\t\t}\n\n\t\t\tif !m.ForPulseAndJetFinished() {\n\t\t\t\tm.t.Error(\"Expected call to IndexCollectionAccessorMock.ForPulseAndJet\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func (m *LogUploaderMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *JetReleaserMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *ClientMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *ClientMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *StorageMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *StorageMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (f *fakeProgressbar) Wait() {}", "func (m *ContainerMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func TestWait_timeout(t *testing.T) {\n\tdefer check(t)\n\tcontent := \"hello world!\"\n\treq := &showcasepb.WaitRequest{\n\t\tEnd: &showcasepb.WaitRequest_Ttl{\n\t\t\tTtl: &durationpb.Duration{Seconds: 1},\n\t\t},\n\t\tResponse: &showcasepb.WaitRequest_Success{\n\t\t\tSuccess: &showcasepb.WaitResponse{Content: content},\n\t\t},\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\n\top, err := echo.Wait(ctx, req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresp, err := op.Wait(ctx)\n\tif err == nil {\n\t\tt.Errorf(\"Wait() = %+v, want error\", resp)\n\t}\n}", "func (m *ExtractorClientMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (_m *BandwidthThrottlerSvc) Wait() {\n\t_m.Called()\n}", "func (m *MockClient) Lock() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Lock\")\n}", "func (m *MockPostForkBlock) Status() choices.Status {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Status\")\n\tret0, _ := ret[0].(choices.Status)\n\treturn ret0\n}", "func (m *Mock) Sleep(d time.Duration) {\n\t<-m.After(d)\n}", "func TestWait(t *testing.T) {\n\tdefer check(t)\n\tcontent := \"hello world!\"\n\treq := &showcasepb.WaitRequest{\n\t\tEnd: &showcasepb.WaitRequest_Ttl{\n\t\t\tTtl: &durationpb.Duration{Nanos: 100},\n\t\t},\n\t\tResponse: &showcasepb.WaitRequest_Success{\n\t\t\tSuccess: &showcasepb.WaitResponse{Content: content},\n\t\t},\n\t}\n\top, err := echo.Wait(context.Background(), req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresp, err := op.Wait(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.GetContent() != content {\n\t\tt.Errorf(\"Wait() = %q, want %q\", resp.GetContent(), content)\n\t}\n}", "func (m *DownloaderMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *MockPool) ReleaseAndWait() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"ReleaseAndWait\")\n}", "func (m *TagCreatorMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *MockProvider) SyncLoop() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SyncLoop\")\n}", "func TestApply(t *testing.T) {\n\tt.Parallel()\n\tdefer logging.HideLogs(t)()\n\n\tnewWait := func() *wait.Wait {\n\t\treturn &wait.Wait{\n\t\t\tShell: &shell.Shell{\n\t\t\t\tCheckStmt: \"test\",\n\t\t\t\tStatus: &shell.CommandResults{},\n\t\t\t},\n\t\t\tRetrier: &wait.Retrier{\n\t\t\t\tMaxRetry: 3,\n\t\t\t\tInterval: 10 * time.Millisecond,\n\t\t\t},\n\t\t}\n\t}\n\n\tt.Run(\"passed\", func(t *testing.T) {\n\t\twait := newWait()\n\t\tm := new(mockExecutor)\n\t\twait.Shell.CmdGenerator = m\n\n\t\tm.On(\"Run\", mock.Anything).\n\t\t\tReturn(&shell.CommandResults{ExitStatus: 0}, nil)\n\n\t\t_, err := wait.Apply()\n\t\trequire.NoError(t, err)\n\n\t\tt.Run(\"retry count\", func(t *testing.T) {\n\t\t\tassert.Equal(t, 1, wait.RetryCount)\n\t\t})\n\n\t\tt.Run(\"status is set\", func(t *testing.T) {\n\t\t\tassert.NotNil(t, wait.Status)\n\t\t})\n\n\t\tt.Run(\"check status is set\", func(t *testing.T) {\n\t\t\tassert.NotNil(t, wait.CheckStatus)\n\t\t})\n\t})\n\n\tt.Run(\"retried\", func(t *testing.T) {\n\t\twait := newWait()\n\t\tm := new(mockExecutor)\n\t\twait.Shell.CmdGenerator = m\n\n\t\tm.On(\"Run\", mock.Anything).Return(&shell.CommandResults{\n\t\t\tResultsContext: shell.ResultsContext{},\n\t\t\tExitStatus: 1,\n\t\t}, nil)\n\n\t\t_, err := wait.Apply()\n\t\trequire.NoError(t, err)\n\n\t\tt.Run(\"retry count\", func(t *testing.T) {\n\t\t\tassert.Equal(t, 3, wait.RetryCount)\n\t\t})\n\t})\n\n\tt.Run(\"returns an error when command executor fails\", func(t *testing.T) {\n\t\twait := newWait()\n\t\tm := new(mockExecutor)\n\t\twait.Shell.CmdGenerator = m\n\n\t\tm.On(\"Run\", mock.Anything).\n\t\t\tReturn(&shell.CommandResults{ExitStatus: 0}, errors.New(\"cmd failed\"))\n\n\t\t_, err := wait.Apply()\n\t\tassert.Error(t, err)\n\t})\n}", "func (m *IndexModifierMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *RecordCollectionAccessorMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (p *Probe) wait() {\n\tp.waitGroup.Wait()\n}", "func (eb exponentialBackoff) wait() {\n\ttime.Sleep(eb.currentDelay)\n\teb.currentDelay = eb.b.Duration()\n}", "func (m *FlagMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func ExpectWaitForTierWatch(ctx context.Context, r reconcile.Reconciler, mockStatus *status.MockStatus) {\n\tmockStatus.On(\"SetDegraded\", operator.ResourceNotReady, \"Waiting for Tier watch to be established\", mock.Anything, mock.Anything).Return()\n\t_, err := r.Reconcile(ctx, reconcile.Request{})\n\tExpect(err).ShouldNot(HaveOccurred())\n\tmockStatus.AssertExpectations(GinkgoT())\n}", "func (t *ElapsedTimeout) Wait(context.Context) error { return nil }", "func (i *InvokeMotorStart) Wait() error {\n\treturn i.Result(nil)\n}", "func (m *DelegationTokenFactoryMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (tfcm *TestFCM) wait(count int) {\n\ttime.Sleep(time.Duration(count) * tfcm.timeout)\n}", "func (t *Indie) Wait() {\n t.waitModules()\n}", "func (m *MockHealthCheck) WaitForInitialStatsUpdates() {\n\tm.ctrl.Call(m, \"WaitForInitialStatsUpdates\")\n}", "func (m *PacketParserMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *MockRDSAPI) WaitUntilDBSnapshotAvailable(arg0 *rds.DescribeDBSnapshotsInput) error {\n\tret := m.ctrl.Call(m, \"WaitUntilDBSnapshotAvailable\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockFullNode) StateWaitMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch) (*types0.MsgLookup, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StateWaitMsgLimited\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(*types0.MsgLookup)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *testSignaler) wait() bool {\n\tselect {\n\tcase s := <-s.nonBlockingStatus:\n\t\treturn s\n\tcase s := <-s.status:\n\t\treturn s\n\t}\n}", "func (m *DigestHolderMock) MinimockWait(timeout mm_time.Duration) {\n\ttimeoutCh := mm_time.After(timeout)\n\tfor {\n\t\tif m.minimockDone() {\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-timeoutCh:\n\t\t\tm.MinimockFinish()\n\t\t\treturn\n\t\tcase <-mm_time.After(10 * mm_time.Millisecond):\n\t\t}\n\t}\n}", "func (m *MockMempool) Unlock() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Unlock\")\n}" ]
[ "0.72333336", "0.7223217", "0.70676416", "0.6972409", "0.686801", "0.6829541", "0.6795864", "0.66882986", "0.6675461", "0.6585401", "0.6544395", "0.64938736", "0.646358", "0.6427835", "0.64261603", "0.6414245", "0.6399369", "0.6366113", "0.63345337", "0.6327929", "0.6319095", "0.6272879", "0.6269109", "0.625926", "0.62482566", "0.6228826", "0.6225187", "0.62180495", "0.6185895", "0.6181706", "0.61768603", "0.61640066", "0.6159875", "0.60834646", "0.60564184", "0.6050649", "0.60339385", "0.60264695", "0.60209453", "0.597354", "0.59711593", "0.5949329", "0.59077626", "0.58692193", "0.58521026", "0.58438873", "0.58397645", "0.5838164", "0.580534", "0.57733935", "0.57671183", "0.5766765", "0.5753586", "0.5741884", "0.5712798", "0.5708656", "0.5704253", "0.56987864", "0.5686897", "0.56848687", "0.5682557", "0.56757087", "0.56524223", "0.56465995", "0.5645952", "0.5645952", "0.5641751", "0.5641751", "0.564035", "0.5610915", "0.560602", "0.5595938", "0.5595669", "0.5581427", "0.5577346", "0.55477893", "0.5546385", "0.554321", "0.554", "0.5537553", "0.55193216", "0.55159366", "0.5514605", "0.55062747", "0.5502031", "0.5500803", "0.5497957", "0.5494902", "0.54940647", "0.5489941", "0.54890627", "0.5483099", "0.54813576", "0.5474498", "0.547121", "0.5464408", "0.5459922", "0.54598916", "0.545492", "0.5452365" ]
0.6001722
39